text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
/******************************************************************************
This source file is part of the Avogadro project.
Copyright 2013 Kitware, Inc.
This source code is released under the New BSD License, (the "License").
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/
"""
import argparse
import json
import sys
# Some globals:
debug = False
warnings = []
def getOptions():
userOptions = {}
userOptions['Title'] = {}
userOptions['Title']['type'] = 'string'
userOptions['Title']['default'] = ''
userOptions['Calculation Type'] = {}
userOptions['Calculation Type']['type'] = "stringList"
userOptions['Calculation Type']['default'] = 1
userOptions['Calculation Type']['values'] = \
['Single Point', 'Equilibrium Geometry', 'Frequencies']
userOptions['Theory'] = {}
userOptions['Theory']['type'] = "stringList"
userOptions['Theory']['default'] = 3
userOptions['Theory']['values'] = \
['AM1', 'PM3', 'RHF', 'B3LYP', 'MP2', 'CCSD']
userOptions['Basis'] = {}
userOptions['Basis']['type'] = "stringList"
userOptions['Basis']['default'] = 2
userOptions['Basis']['values'] = \
['STO-3G', '3-21 G', '6-31 G(d)', '6-31 G(d,p)', 'LANL2DZ']
userOptions['Filename Base'] = {}
userOptions['Filename Base']['type'] = 'string'
userOptions['Filename Base']['default'] = 'job'
userOptions['Processor Cores'] = {}
userOptions['Processor Cores']['type'] = 'integer'
userOptions['Processor Cores']['default'] = 1
userOptions['Processor Cores']['minimum'] = 1
userOptions['Multiplicity'] = {}
userOptions['Multiplicity']['type'] = "integer"
userOptions['Multiplicity']['default'] = 1
userOptions['Multiplicity']['minimum'] = 1
userOptions['Multiplicity']['maximum'] = 5
userOptions['Charge'] = {}
userOptions['Charge']['type'] = "integer"
userOptions['Charge']['default'] = 0
userOptions['Charge']['minimum'] = -9
userOptions['Charge']['maximum'] = 9
userOptions['Output Format'] = {}
userOptions['Output Format']['type'] = "stringList"
userOptions['Output Format']['default'] = 0
userOptions['Output Format']['values'] = ['Standard', 'Molden', 'Molekel']
userOptions['Write Checkpoint File'] = {}
userOptions['Write Checkpoint File']['type'] = "boolean"
userOptions['Write Checkpoint File']['default'] = True
# TODO Coordinate format (need zmatrix)
opts = {'userOptions' : userOptions}
return opts
def generateInputFile(opts):
# Extract options:
title = opts['Title']
calculate = opts['Calculation Type']
theory = opts['Theory']
basis = opts['Basis']
multiplicity = opts['Multiplicity']
charge = opts['Charge']
outputFormat = opts['Output Format']
checkpoint = opts['Write Checkpoint File']
nCores = int(opts['Processor Cores'])
output = ''
# Number of cores
if nCores > 1:
output += "%%NProcShared=%d\n"%nCores
# Checkpoint
if checkpoint:
output += '%Chk=checkpoint.chk\n'
# Theory/Basis
if theory == 'AM1' or theory == 'PM3':
output += '#n %s'%(theory)
warnings.append('Ignoring basis set for semi-empirical calculation.')
else:
output += '#n %s/%s'%(theory, basis.replace(' ', ''))
# Calculation type
if calculate == 'Single Point':
output += ' SP'
elif calculate == 'Equilibrium Geometry':
output += ' Opt'
elif calculate == 'Frequencies':
output += ' Opt Freq'
else:
raise Exception('Invalid calculation type: %s'%calculate)
# Output format
if outputFormat == 'Standard':
pass
elif outputFormat == 'Molden':
output += ' gfprint pop=full'
elif outputFormat == 'Molekel':
output += ' gfoldprint pop=full'
else:
raise Exception('Invalid output format: %s'%outputFormat)
# Title
output += '\n\n %s\n\n'%title
# Charge/Multiplicity
output += "%d %d\n"%(charge, multiplicity)
# Coordinates
output += '$$coords:Sxyz$$\n'
# The gaussian code is irritatingly fickle -- it *will* silently crash if
# this extra, otherwise unnecessary newline is not present at the end of the
# file.
output += '\n'
return output
def generateInput():
# Read options from stdin
stdinStr = sys.stdin.read()
# Parse the JSON strings
opts = json.loads(stdinStr)
# Generate the input file
inp = generateInputFile(opts['options'])
# Basename for input files:
baseName = opts['options']['Filename Base']
# Prepare the result
result = {}
# Input file text -- will appear in the same order in the GUI as they are
# listed in the array:
files = []
files.append({'filename': '%s.com'%baseName, 'contents': inp})
if debug:
files.append({'filename': 'debug_info', 'contents': stdinStr})
result['files'] = files
# Specify the main input file. This will be used by MoleQueue to determine
# the value of the $$inputFileName$$ and $$inputFileBaseName$$ keywords.
result['mainFile'] = '%s.com'%baseName
if len(warnings) > 0:
result['warnings'] = warnings
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser('Generate a Gaussian input file.')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--print-options', action='store_true')
parser.add_argument('--generate-input', action='store_true')
parser.add_argument('--display-name', action='store_true')
args = vars(parser.parse_args())
debug = args['debug']
if args['display_name']:
print("Gaussian")
if args['print_options']:
print(json.dumps(getOptions()))
elif args['generate_input']:
print(json.dumps(generateInput()))
|
wadejong/avogadrolibs
|
avogadro/qtplugins/quantuminput/inputGenerators/gaussian.py
|
Python
|
bsd-3-clause
| 5,874
|
[
"Avogadro",
"Gaussian"
] |
e45b384a7ab58081b40303f6d0659c100f61a4e74949c6888760dfd20eea7684
|
# Online haptic_map implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
import tf
import os
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.transforms as tr
import hrl_lib.matplotlib_util as mpu
import pickle
import unittest
import ghmm
import ghmmwrapper
import random
import time
from hrl_haptic_manipulation_in_clutter_msgs.msg import SkinContact
from hrl_haptic_manipulation_in_clutter_msgs.msg import TaxelArray
#from m3skin_ros.msg import TaxelArray as TaxelArray_Meka
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/rapid_categorization/taxel_based/')
from data_variable_length_force_sample import Fmat_original, temp_num_fol, temp_num_trunk
def callback(data, callback_args):
#rospy.loginfo('Getting data!')
global start_time
start_time = time.time()
# Fixing Transforms
tf_lstnr = callback_args
sc = SkinContact()
sc.header.frame_id = '/torso_lift_link' # has to be this and no other coord frame.
sc.header.stamp = data.header.stamp
t1, q1 = tf_lstnr.lookupTransform(sc.header.frame_id,
data.header.frame_id,
rospy.Time(0))
t1 = np.matrix(t1).reshape(3,1)
r1 = tr.quaternion_to_matrix(q1)
# Gathering Force Data
force_vectors = np.row_stack([data.values_x, data.values_y, data.values_z])
fmags_instant = ut.norm(force_vectors)
threshold = 0.01
fmags_tuned = fmags_instant - threshold
fmags_tuned[np.where(fmags_tuned<0)]=0
fmags_instant_tuned = fmags_tuned
global fmags
for i in range(len(fmags_instant_tuned)):
if fmags_instant_tuned[i] > 0.0:
fmags[i].append(fmags_instant_tuned[i])
else:
fmags[i] = []
# Calculating no. of contact regions with hand-tuned force threshold
global total_contact
total_contact = sum(i > 0 for i in fmags_instant_tuned)
# Gathering Contact Data for Haptic Mapping
global global_contact_vector
for i in range(len(fmags_instant_tuned)):
global_contact_vector[i] = r1*((np.column_stack([data.centers_x[i], data.centers_y[i], data.centers_z[i]])).T) + t1
test_data()
global taxel_FLAG
for i in range(len(fmags_instant_tuned)):
if taxel_FLAG[i] > -1:
idx = taxel_FLAG[i]
contact_info = global_contact_vector[i]
pubdata(idx, contact_info)
def test_data():
global exp_time
# Do Stuff For Testing which basically returns which FLAG is true
global taxel_FLAG # -1 for not in Contact, 0 for Unknown (Red), 1 for Foliage (green), 2 for Trunk (brown)
global trunk_contact
trunk_contact = 0
global foliage_contact
foliage_contact = 0
global outlier_contact
outlier_contact = 0
global total_contact
global uncertain_contact
uncertain_contact = 0
num_samples = []
# For Testing
global start_time
global fmags
for i in range(384):
if (len(fmags[i]) > 0):
ts_obj = fmags[i]
final_ts_obj = ghmm.EmissionSequence(F,ts_obj)
# Find Viterbi Path
global model_ff
global model_tf
path_ff_obj = model_ff.viterbi(final_ts_obj)
path_tf_obj = model_tf.viterbi(final_ts_obj)
print path_ff_obj[1], path_tf_obj[1]
diff = abs(path_ff_obj[1]-path_tf_obj[1])
obj = max(path_ff_obj[1],path_tf_obj[1])
obj_min = min(abs(path_ff_obj[1]),abs(path_tf_obj[1]))
if ((obj == path_ff_obj[1]) and (diff > 80)):
#if ((obj == path_ff_obj[1])):
#print 'Taxel', i, 'is Foliage !'
taxel_FLAG[i] = 1
foliage_contact = foliage_contact+1
num_samples.append(len(fmags[i]))
elif ((obj == path_tf_obj[1]) and (diff > 80)):
#elif ((obj == path_tf_obj[1])):
#print 'Taxel', i, 'is Trunk !'
taxel_FLAG[i] = 2
trunk_contact = trunk_contact+1
num_samples.append(len(fmags[i]))
elif ((diff < 80) and (diff > 15)):
#print 'Taxel', i, 'is Outlier'
taxel_FLAG[i] = 3
outlier_contact = outlier_contact+1
else:
taxel_FLAG[i] = 0
#print 'Taxel', i, 'is Uncertain'
uncertain_contact = uncertain_contact+1
else:
#print 'Taxel', i, 'is not in Contact'
taxel_FLAG[i] = -1
time_taken = time.time()-start_time
global quantitative_data
exp_time = exp_time+0.01
quant_instant_data = [exp_time, total_contact, trunk_contact, foliage_contact, outlier_contact, uncertain_contact]
quantitative_data = np.row_stack([quantitative_data, quant_instant_data])
def getdata():
rospy.loginfo('Initializing the Node !')
rospy.init_node('Online_Haptic_Map_Builder', anonymous=True)
tf_lstnr = tf.TransformListener()
rospy.loginfo('Waiting to Subscribe to the Skin Message...')
rospy.Subscriber("/skin_patch_forearm_right/taxels/forces", TaxelArray, callback, callback_args = (tf_lstnr))
rospy.spin()
def pubdata(idx, contact_info):
#rospy.loginfo('Publishing data')
marker = Marker()
marker.ns = 'Haptic_Map_Markers'
marker.header.frame_id = '/torso_lift_link'
marker.type = marker.SPHERE
marker.action = marker.ADD
marker.scale.x = 0.02
marker.scale.y = 0.02
marker.scale.z = 0.02
if idx == 1:
# Green for Foliage
marker.color.a = 1.0;
marker.color.r = 0.0;
marker.color.g = 1.0;
marker.color.b = 0.0;
elif idx == 2:
# Brown for Trunk
marker.color.a = 1.0;
marker.color.r = 0.5;
marker.color.g = 0.25;
marker.color.b = 0.125;
elif idx == 3:
# Red for Outlier
marker.color.a = 1.0;
marker.color.r = 1.0;
marker.color.g = 0.0;
marker.color.b = 0.0;
else:
# Dont show Uncertain Contact
marker.color.a = 0.0;
marker.color.r = 0.0;
marker.color.g = 0.0;
marker.color.b = 0.0;
marker.pose.orientation.w = 1.0
marker.pose.position.x = contact_info[0]
marker.pose.position.y = contact_info[1]
marker.pose.position.z = contact_info[2]
markerArray.markers.append(marker)
# Renumber the marker IDs
id = 0
for m in markerArray.markers:
m.id = id
id += 1
# Publish the MarkerArray
publisher.publish(markerArray)
#rospy.sleep(0.01)
if __name__ == '__main__':
topic = 'visualization_marker_array'
publisher = rospy.Publisher(topic, MarkerArray)
markerArray = MarkerArray()
print "Initializing the HMM Models"
# HMM Implementation
Fmat = Fmat_original
Foliage_Trials = temp_num_fol
Trunk_Trials = temp_num_trunk
# Getting mean / covariance
i = 0
number_states = 10
feature_1_final_data = [0.0]*number_states
state_1 = [0.0]
while (i < Foliage_Trials):
data_length = len(Fmat[i])
feature_length = data_length/1
sample_length = feature_length/number_states
Feature_1 = Fmat[i][0:feature_length]
if i == 0:
j = 0
while (j < number_states):
feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)]
j=j+1
else:
j = 0
while (j < number_states):
state_1 = Feature_1[sample_length*j:sample_length*(j+1)]
#print np.shape(state_1)
#print np.shape(feature_1_final_data[j])
feature_1_final_data[j] = feature_1_final_data[j]+state_1
j=j+1
i = i+1
j = 0
mu_ff_force = np.zeros((number_states,1))
sigma_ff = np.zeros((number_states,1))
while (j < number_states):
mu_ff_force[j] = np.mean(feature_1_final_data[j])
sigma_ff[j] = scp.std(feature_1_final_data[j])
j = j+1
i = Foliage_Trials
feature_1_final_data = [0.0]*number_states
state_1 = [0.0]
while (i < (Foliage_Trials + Trunk_Trials)):
data_length = len(Fmat[i])
feature_length = data_length/1
sample_length = feature_length/number_states
Feature_1 = Fmat[i][0:feature_length]
if i == Foliage_Trials:
j = 0
while (j < number_states):
feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)]
j=j+1
else:
j = 0
while (j < number_states):
state_1 = Feature_1[sample_length*j:sample_length*(j+1)]
feature_1_final_data[j] = feature_1_final_data[j]+state_1
j=j+1
i = i+1
j = 0
mu_tf_force = np.zeros((number_states,1))
sigma_tf = np.zeros((number_states,1))
while (j < number_states):
mu_tf_force[j] = np.mean(feature_1_final_data[j])
sigma_tf[j] = scp.std(feature_1_final_data[j])
j = j+1
# HMM - Implementation:
# 10 Hidden States
# Max. Force(For now), Contact Area(Not now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state
# Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable
# Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch)
# For new objects, it is classified according to which model it represenst the closest..
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
if number_states == 3:
A = [[0.2, 0.5, 0.3],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0]]
elif number_states == 5:
A = [[0.2, 0.35, 0.2, 0.15, 0.1],
[0.0, 0.2, 0.45, 0.25, 0.1],
[0.0, 0.0, 0.2, 0.55, 0.25],
[0.0, 0.0, 0.0, 0.2, 0.8],
[0.0, 0.0, 0.0, 0.0, 1.0]]
elif number_states == 10:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
elif number_states == 15:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.15, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.10, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.15, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.20, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 1.00]]
elif number_states == 20:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.09, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.15, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.10, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.10, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.40, 0.20, 0.10, 0.04, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.20, 0.40, 0.20, 0.10, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.20, 0.40, 0.20, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.30, 0.50, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_ff = [0.0]*number_states
B_tf = [0.0]*number_states
for num_states in range(number_states):
B_ff[num_states] = [mu_ff_force[num_states][0],sigma_ff[num_states][0]]
B_tf[num_states] = [mu_tf_force[num_states][0],sigma_tf[num_states][0]]
# pi - initial probabilities per state
if number_states == 3:
pi = [1./3.] * 3
elif number_states == 5:
pi = [0.2] * 5
elif number_states == 10:
pi = [0.1] * 10
elif number_states == 15:
pi = [1./15.] * 15
elif number_states == 20:
pi = [0.05] * 20
# generate FF, TF models from parameters
model_ff = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_ff, pi) # Will be Trained
model_tf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_tf, pi) # Will be Trained
total_seq = Fmat
for i in range((Foliage_Trials + Trunk_Trials)):
total_seq[i][:] = sum(total_seq[i][:],[])
total_seq_ff = total_seq[0:Foliage_Trials]
total_seq_tf = total_seq[Foliage_Trials:Foliage_Trials + Trunk_Trials]
#print len(total_seq_ff)
#print len(total_seq_tf)
print "Training the HMM Models..."
train_seq_ff = total_seq_ff
train_seq_tf = total_seq_tf
final_ts_ff = ghmm.SequenceSet(F,train_seq_ff)
final_ts_tf = ghmm.SequenceSet(F,train_seq_tf)
model_ff.baumWelch(final_ts_ff)
model_tf.baumWelch(final_ts_tf)
print "Models Trained: Ready to Collect Data !"
# Gather Data from Robot Online
taxel_FLAG = {}
for i in range(384):
taxel_FLAG[i] = -1 #-1 for not in Contact,0 for Uncertain (No Colour),1 for Foliage (green),2 for Trunk (brown),3 for Outlier (red)
fmags = {}
for i in range(384):
fmags[i] = []
global_contact_vector = {}
for i in range(384):
global_contact_vector[i] = []
FLAG_Trunk = False
FLAG_Foliage = False
FLAG_Unknown = True
total_contact = 0
trunk_contact = 0
foliage_contact = 0
outlier_contact = 0
uncertain_contact = 0
start_time = 0.0
exp_time = 0.0
quantitative_data = [0,0,0,0,0,0]
getdata()
ut.save_pickle(quantitative_data, '/home/tapo/svn/robot1_data/usr/tapo/data/rapid_categorization/Taxel_Based/Tests/test4.pkl')
|
tapomayukh/projects_in_python
|
rapid_categorization/haptic_map/outlier/online_map_outlier_no_model.py
|
Python
|
mit
| 17,767
|
[
"Gaussian",
"Mayavi"
] |
40cf38b4aaabb971550d9d02439c406356181306e19861bbf4bb1e23dab8c7fc
|
# Orca
#
# Copyright 2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Superclass of classes used to generate presentations for objects."""
__id__ = "$Id:$"
__version__ = "$Revision:$"
__date__ = "$Date:$"
__copyright__ = "Copyright (c) 2009 Sun Microsystems Inc."
__license__ = "LGPL"
import sys
import time
import traceback
import pyatspi
from . import braille
from . import debug
from . import messages
from . import object_properties
from . import settings
import collections
def _formatExceptionInfo(maxTBlevel=5):
cla, exc, trbk = sys.exc_info()
excName = cla.__name__
try:
excArgs = exc.args
except KeyError:
excArgs = "<no args>"
excTb = traceback.format_tb(trbk, maxTBlevel)
return (excName, excArgs, excTb)
# [[[WDW - general note -- for all the _generate* methods, it would be great if
# we could return an empty array if we can determine the method does not
# apply to the object. This would allow us to reduce the number of strings
# needed in formatting.py.]]]
# The prefix to use for the individual generator methods
#
METHOD_PREFIX = "_generate"
class Generator:
"""Takes accessible objects and generates a presentation for those
objects. See the generate method, which is the primary entry
point."""
# pylint: disable-msg=W0142
def __init__(self, script, mode):
# pylint: disable-msg=W0108
self._mode = mode
self._script = script
self._methodsDict = {}
for method in \
[z for z in [getattr(self, y).__get__(self, self.__class__) for y in [x for x in dir(self) if x.startswith(METHOD_PREFIX)]] if isinstance(z, collections.Callable)]:
name = method.__name__[len(METHOD_PREFIX):]
name = name[0].lower() + name[1:]
self._methodsDict[name] = method
self._verifyFormatting()
def _addGlobals(self, globalsDict):
"""Other things to make available from the formatting string.
"""
globalsDict['obj'] = None
globalsDict['role'] = None
globalsDict['pyatspi'] = pyatspi
def _verifyFormatting(self):
# Verify the formatting strings are OK. This is only
# for verification and does not effect the function of
# Orca at all.
# Populate the entire globals with empty arrays
# for the results of all the legal method names.
#
globalsDict = {}
for key in list(self._methodsDict.keys()):
globalsDict[key] = []
self._addGlobals(globalsDict)
for roleKey in self._script.formatting[self._mode]:
for key in ["focused", "unfocused"]:
try:
evalString = \
self._script.formatting[self._mode][roleKey][key]
except:
continue
else:
if not evalString:
# It's legal to have an empty string.
#
continue
while True:
try:
eval(evalString, globalsDict)
break
except NameError:
info = _formatExceptionInfo()
arg = info[1][0]
arg = arg.replace("name '", "")
arg = arg.replace("' is not defined", "")
if arg not in self._methodsDict:
debug.printException(debug.LEVEL_SEVERE)
globalsDict[arg] = []
except:
debug.printException(debug.LEVEL_SEVERE)
break
def _overrideRole(self, newRole, args):
"""Convenience method to allow you to temporarily override the role in
the args dictionary. This changes the role in args ags
returns the old role so you can pass it back to _restoreRole.
"""
oldRole = args.get('role', None)
args['role'] = newRole
return oldRole
def _restoreRole(self, oldRole, args):
"""Convenience method to restore the old role back in the args
dictionary. The oldRole should have been obtained from
_overrideRole. If oldRole is None, then the 'role' key/value
pair will be deleted from args.
"""
if oldRole:
args['role'] = oldRole
else:
del args['role']
def generateContents(self, contents, **args):
return []
def generate(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the complete presentatin for the
object. The presentatin to be generated depends highly upon the
formatting strings in formatting.py.
args is a dictionary that may contain any of the following:
- alreadyFocused: if True, we're getting an object
that previously had focus
- priorObj: if set, represents the object that had focus before
this object
- includeContext: boolean (default=True) which says whether
the context for an object should be included as a prefix
and suffix
- role: a role to override the object's role
- formatType: the type of formatting, such as
'focused', 'basicWhereAmI', etc.
- forceMnemonic: boolean (default=False) which says if we
should ignore the settings.enableMnemonicSpeaking setting
- forceTutorial: boolean (default=False) which says if we
should force a tutorial to be spoken or not
"""
startTime = time.time()
result = []
globalsDict = {}
self._addGlobals(globalsDict)
globalsDict['obj'] = obj
try:
globalsDict['role'] = args.get('role', obj.getRole())
except:
msg = 'ERROR: Cannot generate presentation for: %s. Aborting' % obj
debug.println(debug.LEVEL_INFO, msg, True)
return result
try:
# We sometimes want to override the role. We'll keep the
# role in the args dictionary as a means to let us do so.
#
args['role'] = globalsDict['role']
# We loop through the format string, catching each error
# as we go. Each error should always be a NameError,
# where the name is the name of one of our generator
# functions. When we encounter this, we call the function
# and get its results, placing them in the globals for the
# the call to eval.
#
args['mode'] = self._mode
if not args.get('formatType', None):
if args.get('alreadyFocused', False):
args['formatType'] = 'focused'
else:
args['formatType'] = 'unfocused'
formatting = self._script.formatting.getFormat(**args)
# Add in the context if this is the first time
# we've been called.
#
if not args.get('recursing', False):
if args.get('includeContext', True):
prefix = self._script.formatting.getPrefix(**args)
suffix = self._script.formatting.getSuffix(**args)
formatting = '%s + %s + %s' % (prefix, formatting, suffix)
args['recursing'] = True
firstTimeCalled = True
else:
firstTimeCalled = False
msg = '%s GENERATOR: Starting generation for %s' % (self._mode.upper(), obj)
debug.println(debug.LEVEL_INFO, msg, True)
assert(formatting)
while True:
currentTime = time.time()
try:
result = eval(formatting, globalsDict)
break
except NameError:
result = []
info = _formatExceptionInfo()
arg = info[1][0]
arg = arg.replace("name '", "")
arg = arg.replace("' is not defined", "")
if arg not in self._methodsDict:
debug.printException(debug.LEVEL_SEVERE)
break
globalsDict[arg] = self._methodsDict[arg](obj, **args)
duration = "%.4f" % (time.time() - currentTime)
debug.println(debug.LEVEL_ALL,
" GENERATION TIME: %s ----> %s=%s" \
% (duration, arg, repr(globalsDict[arg])))
except:
debug.printException(debug.LEVEL_SEVERE)
result = []
duration = "%.4f" % (time.time() - startTime)
debug.println(debug.LEVEL_ALL, " COMPLETION TIME: %s" % duration)
debug.println(debug.LEVEL_ALL, "%s GENERATOR: Results:" % self._mode.upper(), True)
for element in result:
debug.println(debug.LEVEL_ALL, " %s" % element)
return result
#####################################################################
# #
# Name, role, and label information #
# #
#####################################################################
def _generateRoleName(self, obj, **args):
"""Returns the role name for the object in an array of strings, with
the exception that the pyatspi.ROLE_UNKNOWN role will yield an
empty array. Note that a 'role' attribute in args will
override the accessible role of the obj.
"""
# Subclasses must override this.
return []
def _generateName(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the name of the object. If the object is directly
displaying any text, that text will be treated as the name.
Otherwise, the accessible name of the object will be used. If
there is no accessible name, then the description of the
object will be used. This method will return an empty array
if nothing can be found. [[[WDW - I wonder if we should just
have _generateName, _generateDescription,
_generateDisplayedText, etc., that don't do any fallback.
Then, we can allow the formatting to do the fallback (e.g.,
'displayedText or name or description'). [[[JD to WDW - I
needed a _generateDescription for whereAmI. :-) See below.
"""
result = []
name = self._script.utilities.displayedText(obj)
if obj.getRole() == pyatspi.ROLE_COMBO_BOX:
children = self._script.utilities.selectedChildren(obj)
if not children and obj.childCount:
children = self._script.utilities.selectedChildren(obj[0])
children = children or [child for child in obj]
names = map(self._script.utilities.displayedText, children)
names = list(filter(lambda x: x, names))
if len(names) == 1:
name = names[0].strip()
elif len(children) == 1 and children[0].name:
name = children[0].name.strip()
elif not names and obj.name:
name = obj.name
if name:
result.append(name)
else:
try:
description = obj.description
except (LookupError, RuntimeError):
return result
if description:
result.append(description)
else:
link = None
if obj.getRole() == pyatspi.ROLE_LINK:
link = obj
elif obj.parent and obj.parent.getRole() == pyatspi.ROLE_LINK:
link = obj.parent
if link:
basename = self._script.utilities.linkBasename(link)
if basename:
result.append(basename)
# To make the unlabeled icons in gnome-panel more accessible.
try:
role = args.get('role', obj.getRole())
except (LookupError, RuntimeError):
return result
if not result and obj.getRole() == pyatspi.ROLE_ICON \
and obj.parent.getRole() == pyatspi.ROLE_PANEL:
return self._generateName(obj.parent)
return result
def _generatePlaceholderText(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the 'placeholder' text. This is typically text that
serves as a functional label and is found in a text widget until
that widget is given focus at which point the text is removed,
the assumption being that the user was able to see the text prior
to giving the widget focus.
"""
result = [x for x in obj.getAttributes() if x.startswith('placeholder-text:')]
return [x.replace('placeholder-text:', '') for x in result]
def _generateLabelAndName(self, obj, **args):
"""Returns the label and the name as an array of strings for speech
and braille. The name will only be present if the name is
different from the label.
"""
result = []
label = self._generateLabel(obj, **args)
name = self._generateName(obj, **args)
result.extend(label)
if not len(label):
result.extend(name)
elif len(name) and name[0].strip() != label[0].strip():
result.extend(name)
return result
def _generateLabelOrName(self, obj, **args):
"""Returns the label as an array of strings for speech and braille.
If the label cannot be found, the name will be used instead.
If the name cannot be found, an empty array will be returned.
"""
result = []
result.extend(self._generateLabel(obj, **args))
if not result:
if obj.name and (len(obj.name)):
result.append(obj.name)
return result
def _generateDescription(self, obj, **args):
"""Returns an array of strings fo use by speech and braille that
represent the description of the object, if that description
is different from that of the name and label.
"""
result = []
if obj.description:
label = self._script.utilities.displayedLabel(obj) or ""
name = obj.name or ""
desc = obj.description.lower()
if not (desc in name.lower() or desc in label.lower()):
result.append(obj.description)
return result
def _generateLabel(self, obj, **args):
"""Returns the label for an object as an array of strings for use by
speech and braille. The label is determined by the displayedLabel
method of the script utility, and an empty array will be returned if
no label can be found.
"""
result = []
label = self._script.utilities.displayedLabel(obj)
if label:
result.append(label)
return result
#####################################################################
# #
# Image information #
# #
#####################################################################
def _generateImageDescription(self, obj, **args ):
"""Returns an array of strings for use by speech and braille that
represent the description of the image on the object, if it
exists. Otherwise, an empty array is returned.
"""
result = []
try:
image = obj.queryImage()
except NotImplementedError:
pass
else:
description = image.imageDescription
if description and len(description):
result.append(description)
return result
#####################################################################
# #
# State information #
# #
#####################################################################
def _generateClickable(self, obj, **args):
return []
def _generateHasLongDesc(self, obj, **args):
return []
def _generateAvailability(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the grayed/sensitivity/availability state of the
object, but only if it is insensitive (i.e., grayed out and
inactive). Otherwise, and empty array will be returned.
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'insensitive'
if not obj.getState().contains(pyatspi.STATE_SENSITIVE):
result.append(self._script.formatting.getString(**args))
return result
def _generateRequired(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the required state of the object, but only if it is
required (i.e., it is in a dialog requesting input and the
user must give it a value). Otherwise, and empty array will
be returned.
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'required'
if obj.getState().contains(pyatspi.STATE_REQUIRED) \
or (obj.getRole() == pyatspi.ROLE_RADIO_BUTTON \
and obj.parent.getState().contains(pyatspi.STATE_REQUIRED)):
result.append(self._script.formatting.getString(**args))
return result
def _generateReadOnly(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the read only state of this object, but only if it
is read only (i.e., it is a text area that cannot be edited).
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'readonly'
if self._script.utilities.isReadOnlyTextArea(obj):
result.append(self._script.formatting.getString(**args))
return result
def _generateCellCheckedState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the object. This is typically
for check boxes that are in a table. An empty array will be
returned if this is not a checkable cell.
"""
result = []
if self._script.utilities.hasMeaningfulToggleAction(obj):
oldRole = self._overrideRole(pyatspi.ROLE_CHECK_BOX, args)
result.extend(self.generate(obj, **args))
self._restoreRole(oldRole, args)
return result
def _generateCheckedState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the object. This is typically
for check boxes. [[[WDW - should we return an empty array if
we can guarantee we know this thing is not checkable?]]]
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'checkbox'
indicators = self._script.formatting.getString(**args)
state = obj.getState()
if state.contains(pyatspi.STATE_CHECKED):
result.append(indicators[1])
elif state.contains(pyatspi.STATE_INDETERMINATE):
result.append(indicators[2])
else:
result.append(indicators[0])
return result
def _generateRadioState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the object. This is typically
for check boxes. [[[WDW - should we return an empty array if
we can guarantee we know this thing is not checkable?]]]
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'radiobutton'
indicators = self._script.formatting.getString(**args)
state = obj.getState()
if state.contains(pyatspi.STATE_CHECKED):
result.append(indicators[1])
else:
result.append(indicators[0])
return result
def _generateChildWidget(self, obj, **args):
widgetRoles = [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_PUSH_BUTTON,
pyatspi.ROLE_RADIO_BUTTON,
pyatspi.ROLE_SLIDER,
pyatspi.ROLE_TOGGLE_BUTTON]
isWidget = lambda x: x and x.getRole() in widgetRoles
# For GtkListBox, such as those found in the control center
if obj.parent and obj.parent.getRole() == pyatspi.ROLE_LIST_BOX:
widget = pyatspi.findDescendant(obj, isWidget)
if widget:
return self.generate(widget, includeContext=False)
return []
def _generateToggleState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the object. This is typically
for check boxes. [[[WDW - should we return an empty array if
we can guarantee we know this thing is not checkable?]]]
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'togglebutton'
indicators = self._script.formatting.getString(**args)
state = obj.getState()
if state.contains(pyatspi.STATE_CHECKED) \
or state.contains(pyatspi.STATE_PRESSED):
result.append(indicators[1])
else:
result.append(indicators[0])
return result
def _generateMenuItemCheckedState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the menu item, only if it is
checked. Otherwise, and empty array will be returned.
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'checkbox'
indicators = self._script.formatting.getString(**args)
if obj.getState().contains(pyatspi.STATE_CHECKED):
result.append(indicators[1])
return result
def _generateExpandableState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the expanded/collapsed state of an object, such as a
tree node. If the object is not expandable, an empty array
will be returned.
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'expansion'
indicators = self._script.formatting.getString(**args)
state = obj.getState()
if state.contains(pyatspi.STATE_EXPANDABLE):
if state.contains(pyatspi.STATE_EXPANDED):
result.append(indicators[1])
else:
result.append(indicators[0])
return result
#####################################################################
# #
# Table interface information #
# #
#####################################################################
def _generateRowHeader(self, obj, **args):
"""Returns an array of strings to be used in speech and braille that
represent the row header for an object that is in a table, if
it exists. Otherwise, an empty array is returned.
"""
result = []
header = self._script.utilities.rowHeaderForCell(obj)
if not header:
return result
text = self._script.utilities.displayedText(header)
if not text:
return result
roleString = self.getLocalizedRoleName(obj, pyatspi.ROLE_ROW_HEADER)
if args.get('mode') == 'speech':
if settings.speechVerbosityLevel == settings.VERBOSITY_LEVEL_VERBOSE \
and not args.get('formatType') in ['basicWhereAmI', 'detailedWhereAmI']:
text = "%s %s" % (text, roleString)
elif args.get('mode') == 'braille':
text = "%s %s" % (text, roleString)
result.append(text)
return result
def _generateColumnHeader(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the column header for an object
that is in a table, if it exists. Otherwise, an empty array
is returned.
"""
result = []
header = self._script.utilities.columnHeaderForCell(obj)
if not header:
return result
text = self._script.utilities.displayedText(header)
if not text:
return result
roleString = self.getLocalizedRoleName(obj, pyatspi.ROLE_COLUMN_HEADER)
if args.get('mode') == 'speech':
if settings.speechVerbosityLevel == settings.VERBOSITY_LEVEL_VERBOSE \
and not args.get('formatType') in ['basicWhereAmI', 'detailedWhereAmI']:
text = "%s %s" % (text, roleString)
elif args.get('mode') == 'braille':
text = "%s %s" % (text, roleString)
result.append(text)
return result
def _generateTableCell2ChildLabel(self, obj, **args):
"""Returns an array of strings for use by speech and braille for the
label of a toggle in a table cell that has a special 2 child
pattern that we run into. Otherwise, an empty array is
returned.
"""
result = []
# If this table cell has 2 children and one of them has a
# 'toggle' action and the other does not, then present this
# as a checkbox where:
# 1) we get the checked state from the cell with the 'toggle' action
# 2) we get the label from the other cell.
# See Orca bug #376015 for more details.
#
if obj.childCount == 2:
cellOrder = []
hasToggle = [False, False]
for i, child in enumerate(obj):
if self._script.utilities.hasMeaningfulToggleAction(child):
hasToggle[i] = True
break
if hasToggle[0] and not hasToggle[1]:
cellOrder = [ 1, 0 ]
elif not hasToggle[0] and hasToggle[1]:
cellOrder = [ 0, 1 ]
if cellOrder:
for i in cellOrder:
if not hasToggle[i]:
result.extend(self.generate(obj[i], **args))
return result
def _generateTableCell2ChildToggle(self, obj, **args):
"""Returns an array of strings for use by speech and braille for the
toggle value of a toggle in a table cell that has a special 2
child pattern that we run into. Otherwise, an empty array is
returned.
"""
result = []
# If this table cell has 2 children and one of them has a
# 'toggle' action and the other does not, then present this
# as a checkbox where:
# 1) we get the checked state from the cell with the 'toggle' action
# 2) we get the label from the other cell.
# See Orca bug #376015 for more details.
#
if obj.childCount == 2:
cellOrder = []
hasToggle = [False, False]
for i, child in enumerate(obj):
if self._script.utilities.hasMeaningfulToggleAction(child):
hasToggle[i] = True
break
if hasToggle[0] and not hasToggle[1]:
cellOrder = [ 1, 0 ]
elif not hasToggle[0] and hasToggle[1]:
cellOrder = [ 0, 1 ]
if cellOrder:
for i in cellOrder:
if hasToggle[i]:
result.extend(self.generate(obj[i], **args))
return result
def _generateColumnHeaderIfToggleAndNoText(self, obj, **args):
"""If this table cell has a "toggle" action, and doesn't have any
label associated with it then also speak the table column
header. See Orca bug #455230 for more details.
"""
# If we're reading just a single cell in speech, the new
# header portion is going to give us this information.
#
if args['mode'] == 'speech' and not args.get('readingRow', False):
return []
result = []
descendant = self._script.utilities.realActiveDescendant(obj)
label = self._script.utilities.displayedText(descendant)
if not label and self._script.utilities.hasMeaningfulToggleAction(obj):
accHeader = self._script.utilities.columnHeaderForCell(obj)
result.append(accHeader.name)
return result
def _generateRealTableCell(self, obj, **args):
"""Orca has a feature to automatically read an entire row of a table
as the user arrows up/down the roles. This leads to
complexity in the code. This method is used to return an
array of strings for use by speech and braille for a single
table cell itself. The string, 'blank', is added for empty
cells.
"""
result = []
oldRole = self._overrideRole('REAL_ROLE_TABLE_CELL', args)
result.extend(self.generate(obj, **args))
self._restoreRole(oldRole, args)
return result
def _generateTable(self, obj, **args):
"""Returns an array of strings for use by speech and braille to present
the size of a table."""
if self._script.utilities.isLayoutOnly(obj):
return []
try:
table = obj.queryTable()
except:
return []
return [messages.tableSize(table.nRows, table.nColumns)]
def _generateTableCellRow(self, obj, **args):
"""Orca has a feature to automatically read an entire row of a table
as the user arrows up/down the roles. This leads to complexity in
the code. This method is used to return an array of strings
(and possibly voice and audio specifications) for an entire row
in a table if that's what the user has requested and if the row
has changed. Otherwise, it will return an array for just the
current cell.
"""
result = []
try:
parentTable = obj.parent.queryTable()
except:
parentTable = None
isDetailedWhereAmI = args.get('formatType', None) == 'detailedWhereAmI'
readFullRow = self._script.utilities.shouldReadFullRow(obj)
if (readFullRow or isDetailedWhereAmI) and parentTable \
and (not self._script.utilities.isLayoutOnly(obj.parent)):
parent = obj.parent
index = self._script.utilities.cellIndex(obj)
row = parentTable.getRowAtIndex(index)
column = parentTable.getColumnAtIndex(index)
# This is an indication of whether we should speak all the
# table cells (the user has moved focus up or down a row),
# or just the current one (focus has moved left or right in
# the same row).
#
presentAll = True
if isDetailedWhereAmI:
if parentTable.nColumns <= 1:
return result
elif "lastRow" in self._script.pointOfReference \
and "lastColumn" in self._script.pointOfReference:
pointOfReference = self._script.pointOfReference
presentAll = \
(self._mode == 'braille') \
or \
((pointOfReference["lastRow"] != row) \
or ((row == 0 or row == parentTable.nRows-1) \
and pointOfReference["lastColumn"] == column))
if presentAll:
args['readingRow'] = True
if self._script.utilities.isTableRow(obj):
cells = [x for x in obj]
else:
cells = [parentTable.getAccessibleAt(row, i) \
for i in range(parentTable.nColumns)]
for cell in cells:
if not cell:
continue
state = cell.getState()
showing = state.contains(pyatspi.STATE_SHOWING)
if showing:
cellResult = self._generateRealTableCell(cell, **args)
if cellResult and result and self._mode == 'braille':
result.append(braille.Region(
object_properties.TABLE_CELL_DELIMITER_BRAILLE))
result.extend(cellResult)
else:
result.extend(self._generateRealTableCell(obj, **args))
else:
result.extend(self._generateRealTableCell(obj, **args))
return result
#####################################################################
# #
# Text interface information #
# #
#####################################################################
def _generateExpandedEOCs(self, obj, **args):
"""Returns the expanded embedded object characters for an object."""
return []
def _generateSubstring(self, obj, **args):
start = args.get('startOffset')
end = args.get('endOffset')
if start is None or end is None:
return []
substring = self._script.utilities.substring(obj, start, end)
if substring and not self._script.EMBEDDED_OBJECT_CHARACTER in substring:
return [substring]
return []
def _generateStartOffset(self, obj, **args):
return args.get('startOffset')
def _generateEndOffset(self, obj, **args):
return args.get('endOffset')
def _generateCurrentLineText(self, obj, **args ):
"""Returns an array of strings for use by speech and braille
that represents the current line of text, if
this is a text object. [[[WDW - consider returning an empty
array if this is not a text object.]]]
"""
result = self._generateSubstring(obj, **args)
if result:
return result
[text, caretOffset, startOffset] = self._script.getTextLineAtCaret(obj)
if text and not self._script.EMBEDDED_OBJECT_CHARACTER in text:
return [text]
return []
def _generateDisplayedText(self, obj, **args ):
"""Returns an array of strings for use by speech and braille that
represents all the text being displayed by the object.
"""
result = self._generateSubstring(obj, **args)
if result:
return result
displayedText = self._script.utilities.displayedText(obj)
if not displayedText:
return []
return [displayedText]
#####################################################################
# #
# Tree interface information #
# #
#####################################################################
def _generateNodeLevel(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represents the tree node level of the object, or an empty
array if the object is not a tree node.
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'nodelevel'
level = self._script.utilities.nodeLevel(obj)
if level >= 0:
result.append(self._script.formatting.getString(**args)\
% (level + 1))
return result
#####################################################################
# #
# Value interface information #
# #
#####################################################################
def _generateValue(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represents the value of the object. This is typically the
numerical value, but may also be the text of the 'value'
attribute if it exists on the object. [[[WDW - we should
consider returning an empty array if there is no value.
"""
return [self._script.utilities.textForValue(obj)]
#####################################################################
# #
# Hierarchy and related dialog information #
# #
#####################################################################
def _generateApplicationName(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represents the name of the applicaton for the object.
"""
result = []
try:
result.append(obj.getApplication().name)
except:
pass
return result
def _generateNestingLevel(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the nesting level of an object in a list.
"""
start = args.get('startOffset')
end = args.get('endOffset')
if start is not None and end is not None:
return []
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'nestinglevel'
nestingLevel = self._script.utilities.nestingLevel(obj)
if nestingLevel:
result.append(self._script.formatting.getString(**args)\
% nestingLevel)
return result
def _generateRadioButtonGroup(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represents the radio button group label for the object, or an
empty array if the object has no such label.
"""
result = []
try:
role = obj.getRole()
except:
role = None
if role == pyatspi.ROLE_RADIO_BUTTON:
radioGroupLabel = None
relations = obj.getRelationSet()
for relation in relations:
if (not radioGroupLabel) \
and (relation.getRelationType() \
== pyatspi.RELATION_LABELLED_BY):
radioGroupLabel = relation.getTarget(0)
break
if radioGroupLabel:
result.append(self._script.utilities.\
displayedText(radioGroupLabel))
else:
parent = obj.parent
while parent and (parent.parent != parent):
if parent.getRole() in [pyatspi.ROLE_PANEL,
pyatspi.ROLE_FILLER]:
label = self._generateLabelAndName(parent)
if label:
result.extend(label)
break
parent = parent.parent
return result
def _generateRealActiveDescendantDisplayedText(self, obj, **args ):
"""Objects, such as tables and trees, can represent individual cells
via a complicated nested hierarchy. This method returns an
array of strings for use by speech and braille that represents
the text actually being painted in the cell, if it can be
found. Otherwise, an empty array is returned.
"""
result = []
rad = self._script.utilities.realActiveDescendant(obj)
return self._generateDisplayedText(rad, **args)
def _generateRealActiveDescendantRoleName(self, obj, **args ):
"""Objects, such as tables and trees, can represent individual cells
via a complicated nested hierarchy. This method returns an
array of strings for use by speech and braille that represents
the role of the object actually being painted in the cell.
"""
rad = self._script.utilities.realActiveDescendant(obj)
args['role'] = rad.getRole()
return self._generateRoleName(rad, **args)
def _generateNamedContainingPanel(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represents the nearest ancestor of an object which is a named panel.
"""
result = []
parent = obj.parent
while parent and (parent.parent != parent):
if parent.getRole() == pyatspi.ROLE_PANEL:
label = self._generateLabelAndName(parent)
if label:
result.extend(label)
break
parent = parent.parent
return result
def _generatePageSummary(self, obj, **args):
return []
def _generateProgressBarIndex(self, obj, **args):
return []
def _generateProgressBarValue(self, obj, **args):
return []
|
chrys87/orca-beep
|
src/orca/generator.py
|
Python
|
lgpl-2.1
| 43,334
|
[
"ORCA"
] |
848b82f34a014ffd31136f1416ef268fa81ac4dad8361debbb10bd88db8693db
|
#!/usr/bin/python
########################################################################
# 9 Jan 2015
# Patrick Lombard, Centre for Stem Stem Research
# Core Bioinformatics Group
# University of Cambridge
# All right reserved.
########################################################################
import argparse
import os, re, sys
import subprocess
import shutil
import math
def head_file(ifile, outfile, num):
output = open(outfile, "w")
c = 0
with open(ifile) as f:
for line in f:
if c < num:
output.write(line),
else:
break
c += 1
def infer_experiment(fastq1, fastq2, bowtie_ref, refbed):
if not os.path.isdir("tmp"):
os.mkdir("tmp")
dev = open('/dev/null', 'w')
head_file(fastq1, "tmp/infer_test_1.fq", 1000000)
head_file(fastq2, "tmp/infer_test_2.fq", 1000000)
align_command = "bowtie2 -x {} -1 tmp/infer_test_1.fq -2 tmp/infer_test_2.fq -S tmp/tmp.sam".format(bowtie_ref)
subprocess.call(align_command.split(), stdout=dev)
infercommand = "infer_experiment.py -i tmp/tmp.sam -r {} > tmp/infer_res.txt".format(refbed)
insert = get_insert("tmp/tmp.sam")
subprocess.call(infercommand, shell=True, stdout=dev)
reverse = read_infer()
shutil.rmtree("tmp")
return reverse, insert
def read_infer():
with open("tmp/infer_res.txt") as f:
for line in f:
line = line.rstrip()
if line.startswith("Fraction of reads explained by \"1++,1--,2+-,2-+\": "):
per1 = line.lstrip("Fraction of reads explained by \"1++,1--,2+-,2-+\": ")
elif line.startswith("Fraction of reads explained by \"1+-,1-+,2++,2--\": "):
per2 = line.lstrip("Fraction of reads explained by \"1+-,1-+,2++,2--\": ")
if float(per1) > 0.8:
infer = "yes"
elif float(per2) > 0.8:
infer = "reverse"
else:
infer = "no"
return infer
def paired_rnaseq_process(fastq1, fastq2, gse, gsm, bowtie_ref, gtf, anno_gtf, reverse, insert, threads):
#Need to look at insert size as well! Add that to infer_experiment?
print "==> Running Tophat...\n"
dev = open('/dev/null', 'w')
align_command = "pyrna_align.py tophat -p {0} {1} -i {2} -g {3} -t {4} -o {5}/{6} -a {7} -b {8}".format(fastq1, fastq2, bowtie_ref, gtf, threads, gse, gsm,
int(round(insert[0])), int(round(insert[1])))
subprocess.call(align_command.split(), stdout=dev)
print "==> Running HTSeq-count...\n"
if reverse == "reverse":
htseq_count = "pyrna_count.py htseq -i {0}/{1}/accepted_hits.bam -g {2} -o {0}/{1}/{1}.count -s reverse".format(gse, gsm, anno_gtf)
elif reverse == "yes":
htseq_count = "pyrna_count.py htseq -i {0}/{1}/accepted_hits.bam -g {2} -o {0}/{1}/{1}.count -s yes".format(gse, gsm, anno_gtf)
elif reverse == "no":
htseq_count = "pyrna_count.py htseq -i {0}/{1}/accepted_hits.bam -g {2} -o {0}/{1}/{1}.count -s no".format(gse, gsm, anno_gtf)
subprocess.call(htseq_count.split(), stdout=dev)
return align_command, htseq_count
def paired_chipseq_process(fastq1, fastq2, gse, gsm, bowtie_ref, genome, threads):
#For human chipseq, use v1, mouse use v2
dev = open('/dev/null', 'w')
if genome == "mm10":
v = 2
elif genome == "hg19":
v = 1
print "==> Running Bowtie...\n"
align_command = "pychip_align.py -p {0} {1} -i {2} -v {3} -n {4} -o {5}/{4} -t {6}".format(fastq1, fastq2, bowtie_ref, v, gsm, gse, threads)
subprocess.call(align_command.split(), stdout=dev)
print "==> Converting to BigWig...\n"
toucsc = "pychip_ucsc.py -i {0}/{1}/{1}.sam -g {2} -p".format(gse, gsm, genome)
subprocess.call(toucsc.split(), stdout=dev)
return align_command, toucsc
def single_rnaseq_process(fastq, gse, gsm, bowtie_ref, gtf, anno_gtf, threads):
print "==> Running Tophat...\n"
dev = open('/dev/null', 'w')
align_command = "pyrna_align.py tophat -f {0} -i {1} -g {2} -t {3} -o {4}/{5}".format(fastq, bowtie_ref, gtf, threads, gse, gsm)
subprocess.call(align_command.split(), stdout=dev)
print "==> Running HTSeq-count...\n"
htseq_count = "pyrna_count.py htseq -s no -i {0}/{1}/accepted_hits.bam -g {2} -o {0}/{1}/{1}.count".format(gse, gsm, anno_gtf)
subprocess.call(htseq_count.split(), stdout=dev)
return align_command, htseq_count
def single_chipseq_process(fastq, gse, gsm, bowtie_ref, genome, threads):
#For human chipseq, use v1, mouse use v2
dev = open('/dev/null', 'w')
if genome == "mm10":
v = 2
elif genome == "hg19":
v = 1
print "==> Running Bowtie...\n"
align_command = "pychip_align.py -f {0} -i {1} -v {2} -n {3} -o {4}/{3} -t {5}".format(fastq, bowtie_ref, v, gsm, gse, threads)
subprocess.call(align_command.split(), stdout=dev)
print "==> Converting to BigWig...\n"
toucsc = "pychip_ucsc.py -i {0}/{1}/{1}.sam -g {2}".format(gse, gsm, genome)
subprocess.call(toucsc.split(), stdout=dev)
return align_command, toucsc
def getmeanval(dic,maxbound=-1):
nsum=0; n=0;
for (k,v) in dic.items():
if maxbound!=-1 and k>maxbound:
continue
nsum=nsum+k*v;
n=n+v;
meanv=nsum*1.0/n;
nsum=0; n=0;
for (k,v) in dic.items():
if maxbound!=-1 and k>maxbound:
continue;
nsum=nsum+(k-meanv)*(k-meanv)*v;
n=n+v;
varv=math.sqrt(nsum*1.0/(n-1));
return [meanv,varv];
def get_insert(samfile):
plrdlen={};
plrdspan={};
objmrl=re.compile('([0-9]+)M$');
objmtj=re.compile('NH:i:(\d+)');
nline=0
sam = open(samfile, "r")
for lines in sam:
field=lines.strip().split();
nline=nline+1;
if len(field)<12:
continue;
try:
mrl=objmrl.match(field[5]);
if mrl==None: # ignore non-perfect reads
continue;
readlen=int(mrl.group(1));
if readlen in plrdlen.keys():
plrdlen[readlen]=plrdlen[readlen]+1;
else:
plrdlen[readlen]=1;
if field[6]!='=':
continue;
dist=int(field[8]);
if dist<=0: # ignore neg dist
continue;
mtj=objmtj.search(lines);
if dist in plrdspan.keys():
plrdspan[dist]=plrdspan[dist]+1;
else:
plrdspan[dist]=1;
except ValueError:
continue;
if len(plrdspan)==0:
print('No qualified paired-end reads found. Are they single-end reads?');
else:
maxv=max(plrdspan,key=plrdspan.get);
spanval=getmeanval(plrdspan,maxbound=maxv*3);
return spanval
|
pdl30/pyngspipe
|
pyngspipe/tools.py
|
Python
|
gpl-2.0
| 6,001
|
[
"Bowtie",
"HTSeq"
] |
1c7ad3c0569213482a4ca4107b51eb9bbd33866cf687e42f7b24423a812fbc3d
|
import os
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
class SegmentEditorHollow(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
import string
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "SegmentEditorHollow"
self.parent.categories = ["Segmentation"]
self.parent.dependencies = ["Segmentations"]
self.parent.contributors = ["Andras Lasso (PerkLab)"]
self.parent.hidden = True
self.parent.helpText = "This hidden module registers the segment editor effect"
self.parent.helpText += self.getDefaultModuleDocumentationLink()
self.parent.acknowledgementText = "Supported by NA-MIC, NAC, BIRN, NCIGT, and the Slicer Community. See http://www.slicer.org for details."
slicer.app.connect("startupCompleted()", self.registerEditorEffect)
def registerEditorEffect(self):
import qSlicerSegmentationsEditorEffectsPythonQt as qSlicerSegmentationsEditorEffects
instance = qSlicerSegmentationsEditorEffects.qSlicerSegmentEditorScriptedEffect(None)
effectFilename = os.path.join(os.path.dirname(__file__), self.__class__.__name__+'Lib/SegmentEditorEffect.py')
instance.setPythonSource(effectFilename.replace('\\','/'))
instance.self().register()
class SegmentEditorHollowTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_SegmentEditorHollow1()
def test_SegmentEditorHollow1(self):
"""
Basic automated test of the segmentation method:
- Create segmentation by placing sphere-shaped seeds
- Run segmentation
- Verify results using segment statistics
The test can be executed from SelfTests module (test name: SegmentEditorHollow)
"""
self.delayDisplay("Starting test_SegmentEditorHollow1")
import vtkSegmentationCorePython as vtkSegmentationCore
import vtkSlicerSegmentationsModuleLogicPython as vtkSlicerSegmentationsModuleLogic
import SampleData
from SegmentStatistics import SegmentStatisticsLogic
##################################
self.delayDisplay("Load master volume")
import SampleData
sampleDataLogic = SampleData.SampleDataLogic()
masterVolumeNode = sampleDataLogic.downloadMRBrainTumor1()
##################################
self.delayDisplay("Create segmentation containing a few spheres")
segmentationNode = slicer.vtkMRMLSegmentationNode()
slicer.mrmlScene.AddNode(segmentationNode)
segmentationNode.CreateDefaultDisplayNodes()
segmentationNode.SetReferenceImageGeometryParameterFromVolumeNode(masterVolumeNode)
# Segments are defined by a list of: name and a list of sphere [radius, posX, posY, posZ]
segmentGeometries = [
['Tumor', [[10, -6,30,28]]],
['Background', [[10, 0,65,22], [15, 1, -14, 30], [12, 0, 28, -7], [5, 0,30,54], [12, 31, 33, 27], [17, -42, 30, 27], [6, -2,-17,71]]],
['Air', [[10, 76,73,0], [15, -70,74,0]]] ]
for segmentGeometry in segmentGeometries:
segmentName = segmentGeometry[0]
appender = vtk.vtkAppendPolyData()
for sphere in segmentGeometry[1]:
sphereSource = vtk.vtkSphereSource()
sphereSource.SetRadius(sphere[0])
sphereSource.SetCenter(sphere[1], sphere[2], sphere[3])
appender.AddInputConnection(sphereSource.GetOutputPort())
segment = vtkSegmentationCore.vtkSegment()
segment.SetName(segmentationNode.GetSegmentation().GenerateUniqueSegmentID(segmentName))
appender.Update()
segment.AddRepresentation(vtkSegmentationCore.vtkSegmentationConverter.GetSegmentationClosedSurfaceRepresentationName(), appender.GetOutput())
segmentationNode.GetSegmentation().AddSegment(segment)
##################################
self.delayDisplay("Create segment editor")
segmentEditorWidget = slicer.qMRMLSegmentEditorWidget()
segmentEditorWidget.show()
segmentEditorWidget.setMRMLScene(slicer.mrmlScene)
segmentEditorNode = slicer.vtkMRMLSegmentEditorNode()
slicer.mrmlScene.AddNode(segmentEditorNode)
segmentEditorWidget.setMRMLSegmentEditorNode(segmentEditorNode)
segmentEditorWidget.setSegmentationNode(segmentationNode)
segmentEditorWidget.setMasterVolumeNode(masterVolumeNode)
##################################
self.delayDisplay("Run segmentation")
segmentEditorWidget.setActiveEffectByName("Hollow")
effect = segmentEditorWidget.activeEffect()
effect.setParameter("ShellThicknessMm", 3.0)
effect.setParameter("ShellMode", "MEDIAL_SURFACE")
effect.self().onApply()
##################################
self.delayDisplay("Make segmentation results nicely visible in 3D")
segmentationDisplayNode = segmentationNode.GetDisplayNode()
segmentationDisplayNode.SetSegmentVisibility("Air", False)
segmentationDisplayNode.SetSegmentOpacity3D("Background",0.5)
self.delayDisplay('test_SegmentEditorHollow1 passed')
|
lassoan/SlicerSegmentEditorExtraEffects
|
SegmentEditorHollow/SegmentEditorHollow.py
|
Python
|
bsd-3-clause
| 5,507
|
[
"VTK"
] |
740d5397a8c63cc4997533e05a09b76691cae84f20120eb97efec8de4daa257a
|
"""
Experiments on various real datasets. Vary the sample sizes.
"""
__author__ = 'wittawat'
import fsic.data as data
import fsic.feature as fea
import fsic.indtest as it
import fsic.glo as glo
import fsic.util as util
import fsic.kernel as kernel
import exglobal as exglo
# need independent_jobs package
# https://github.com/karlnapf/independent-jobs
# The independent_jobs and fsic have to be in the global search path (.bashrc)
import independent_jobs as inj
from independent_jobs.jobs.IndependentJob import IndependentJob
from independent_jobs.results.SingleResult import SingleResult
from independent_jobs.aggregators.SingleResultAggregator import SingleResultAggregator
from independent_jobs.engines.BatchClusterParameters import BatchClusterParameters
from independent_jobs.engines.SerialComputationEngine import SerialComputationEngine
from independent_jobs.engines.SlurmComputationEngine import SlurmComputationEngine
from independent_jobs.tools.Log import logger
import logging
import numpy as np
import os
import re
import sys
#import tempfile
import time
def job_nfsicJ10_stoopt(paired_source, tr, te, r, n_permute=None, J=10):
k, l = kl_kgauss_median_bounds(tr)
medx2 = k.sigma2
medy2 = l.sigma2
fac_min = 1e-1
fac_max = 5e3
with util.ContextTimer() as t:
nfsic_opt_options = {'n_test_locs':J, 'max_iter':100,
'V_step':1, 'W_step':1, 'gwidthx_step':1, 'gwidthy_step':1,
'batch_proportion':1, 'tol_fun':1e-4, 'step_pow':0.5, 'seed':r+2,
'reg': 1e-6,
'gwidthx_lb': max(1e-2, medx2*1e-3),
'gwidthx_ub': min(1e6, medx2*1e3),
'gwidthy_lb': max(1e-2, medy2*1e-3),
'gwidthy_ub': min(1e6, medy2*1e3),
}
op_V, op_W, op_gwx, op_gwy, info = it.GaussNFSIC.optimize_locs_widths(tr,
alpha, **nfsic_opt_options )
# make sure the optimized widths are not too extreme
#last_gwx = info['gwidthxs'][-1]
#last_gwy = info['gwidthys'][-1]
#op_gwx = last_gwx
#op_gwy = last_gwy
op_gwx = max(fac_min*medx2, 1e-5, min(fac_max*medx2, op_gwx))
op_gwy = max(fac_min*medy2, 1e-5, min(fac_max*medy2, op_gwy))
nfsic_opt = it.GaussNFSIC(op_gwx, op_gwy, op_V, op_W, alpha=alpha,
reg='auto', n_permute=n_permute, seed=r+3)
nfsic_opt_result = nfsic_opt.perform_test(te)
return {
# nfsic_opt's V, W can take up a lot of memory when d is huge.
#'indtest': nfsic_opt,
'test_result': nfsic_opt_result, 'time_secs': t.secs}
def job_nfsicJ10_perm_stoopt(paired_source, tr, te, r):
"""
Use permutations to simulate from the null distribution.
"""
n_permute = 500
return job_nfsicJ10_stoopt(paired_source, tr, te, r, n_permute)
def job_nfsicJ1_perm_stoopt(paired_source, tr, te, r):
n_permute = 500
J = 1
return job_nfsicJ10_stoopt(paired_source, tr, te, r, n_permute, J)
def job_nfsicJ10_med(paired_source, tr, te, r, n_permute=None):
"""
NFSIC in which the test locations are randomized, and the Gaussian width
is set with the median heuristic. Use full sample. No training/testing splits.
J=10
"""
J = 10
pdata = tr + te
with util.ContextTimer() as t:
#V, W = it.GaussNFSIC.init_locs_2randn(pdata, J, seed=r+2)
V, W = it.GaussNFSIC.init_locs_marginals_subset(pdata, J, seed=r+2)
# May overfit and increase type-I errors?
#V, W = it.GaussNFSIC.init_locs_joint_randn(pdata, J, seed=r+2)
#with util.NumpySeedContext(seed=r+92):
# dx = pdata.dx()
# dy = pdata.dy()
# V = np.random.randn(J, dx)
# W = np.random.randn(J, dy)
k, l = kl_kgauss_median_bounds(pdata)
nfsic_med = it.NFSIC(k, l, V, W, alpha=alpha, reg='auto',
n_permute=n_permute, seed=r+3)
nfsic_med_result = nfsic_med.perform_test(pdata)
return {
#'indtest': nfsic_med,
'test_result': nfsic_med_result, 'time_secs': t.secs}
def job_nfsicJ10_perm_med(paired_source, tr, te, r):
n_permute = 500
return job_nfsicJ10_med(paired_source, tr, te, r, n_permute=n_permute)
def job_qhsic_med(paired_source, tr, te, r):
"""
Quadratic-time HSIC using the permutation test.
- Gaussian kernels.
- No parameter selection procedure. Use the median heuristic for both
X and Y.
- Use full sample for testing.
"""
# use full sample for testing. Merge training and test sets
pdata = tr + te
n_permute = 300
if pdata.sample_size() > 7000:
# give up. Too big.
k, l = kl_kgauss_median_bounds(pdata)
qhsic = it.QuadHSIC(k, l, n_permute, alpha=alpha, seed=r+1)
fake_result = {'alpha': alpha, 'pvalue': np.nan, 'test_stat': np.nan
, 'h0_rejected': np.nan, 'time_secs': np.nan, 'n_permute': n_permute}
return {'indtest': qhsic, 'test_result': fake_result, 'time_secs': np.nan}
# Actually do the test
with util.ContextTimer() as t:
k, l = kl_kgauss_median_bounds(pdata)
qhsic = it.QuadHSIC(k, l, n_permute, alpha=alpha, seed=r+1)
qhsic_result = qhsic.perform_test(pdata)
return {'indtest': qhsic, 'test_result': qhsic_result, 'time_secs': t.secs}
def job_fhsic_med(paired_source, tr, te, r):
"""
HSIC with random Fourier features. Simulate the null distribution
with the spectrums of the empirical cross covariance operators.
- Gaussian kernels.
- No parameter selection procedure. Use the median heuristic for both
X and Y.
- Use full sample for testing.
"""
n_simulate = 2000
# random features
n_features = 10
# use full sample for testing. Merge training and test sets
pdata = tr + te
with util.ContextTimer() as t:
X, Y = pdata.xy()
medx = util.meddistance(X, subsample=1000)
medy = util.meddistance(Y, subsample=1000)
sigmax2 = medx**2
sigmay2 = medy**2
fmx = fea.RFFKGauss(sigmax2, n_features=n_features, seed=r+1)
fmy = fea.RFFKGauss(sigmay2, n_features=n_features, seed=r+2)
ffhsic = it.FiniteFeatureHSIC(fmx, fmy, n_simulate=n_simulate, alpha=alpha, seed=r+89)
ffhsic_result = ffhsic.perform_test(pdata)
return {'indtest': ffhsic, 'test_result': ffhsic_result, 'time_secs': t.secs}
def job_nyhsic_med(paired_source, tr, te, r):
"""
HSIC with Nystrom approximation. Simulate the null distribution
with the spectrums of the empirical cross covariance operators.
- Gaussian kernels.
- No parameter selection procedure. Use the median heuristic for both
X and Y.
- Use full sample for testing.
"""
n_simulate = 2000
# random features
n_features = 10
# use full sample for testing. Merge training and test sets
pdata = tr + te
with util.ContextTimer() as t:
X, Y = pdata.xy()
k, l = kl_kgauss_median_bounds(pdata)
# randomly choose the inducing points from X, Y
induce_x = util.subsample_rows(X, n_features, seed=r+2)
induce_y = util.subsample_rows(Y, n_features, seed=r+3)
nyhsic = it.NystromHSIC(k, l, induce_x, induce_y, n_simulate=n_simulate, alpha=alpha, seed=r+89)
nyhsic_result = nyhsic.perform_test(pdata)
return {
#'indtest': nyhsic,
'test_result': nyhsic_result, 'time_secs': t.secs}
def job_rdcperm_med(paired_source, tr, te, r, n_features=10):
"""
The Randomized Dependence Coefficient test with permutations.
"""
pdata = tr + te
n_permute = 500
# n_features=10 from Lopez-Paz et al., 2013 paper.
with util.ContextTimer() as t:
# get the median distances
X, Y = pdata.xy()
# copula transform to both X and Y
cop_map = fea.MarginalCDFMap()
Xcdf = cop_map.gen_features(X)
Ycdf = cop_map.gen_features(Y)
medx = util.meddistance(Xcdf, subsample=1000)
medy = util.meddistance(Ycdf, subsample=1000)
sigmax2 = medx**2
sigmay2 = medy**2
fmx = fea.RFFKGauss(sigmax2, n_features=n_features, seed=r+19)
fmy = fea.RFFKGauss(sigmay2, n_features=n_features, seed=r+220)
rdcperm = it.RDCPerm(fmx, fmy, n_permute=n_permute, alpha=alpha, seed=r+100)
rdcperm_result = rdcperm.perform_test(pdata)
return {'indtest': rdcperm, 'test_result': rdcperm_result, 'time_secs': t.secs}
##-----------------------------------------------------------
def kl_kgauss_median_bounds(pdata):
#print str(pdata)
#print 'Y: '
#print np.unique(pdata.Y, return_counts=True)
k, l = it.kl_kgauss_median(pdata)
logging.info('medx2: %g', k.sigma2)
logging.info('medy2: %g', l.sigma2)
# make sure that the widths are not too small.
k.sigma2 = max(k.sigma2, 1e-1)
l.sigma2 = max(l.sigma2, 1e-1)
return k, l
# Define our custom Job, which inherits from base class IndependentJob
class Ex5Job(IndependentJob):
def __init__(self, aggregator, folder_path, prob_label, rep, job_func, n ):
#walltime = 60*59*24
walltime = 60*59
memory = int(tr_proportion*n*1e-2) + 100
IndependentJob.__init__(self, aggregator, walltime=walltime,
memory=memory)
self.folder_path = folder_path
self.prob_label = prob_label
self.rep = rep
self.job_func = job_func
self.n = n
# we need to define the abstract compute method. It has to return an instance
# of JobResult base class
def compute(self):
# randomly wait a few seconds so that multiple processes accessing the same
# Theano function do not cause a lock problem. I do not know why.
# I do not know if this does anything useful.
# Sleep in seconds.
time.sleep(np.random.rand(1)*2)
# load the data and construct a PairedSource here
# The data can be big. We have to load it in this job function i.e.,
# each computing node loads by itself (no data passing).
folder_path = self.folder_path
prob_label = self.prob_label
paired_source, _, is_h0 = exglo.get_problem_pickle(folder_path, prob_label + '.n0')
n = self.n
r = self.rep
job_func = self.job_func
pdata = paired_source.sample(n, seed=r)
with util.ContextTimer() as t:
logger.info("computing. %s. prob=%s, r=%d, n=%d"%(job_func.__name__,
pdata.label, r, n))
tr, te = pdata.split_tr_te(tr_proportion=tr_proportion, seed=r+21 )
prob_label = self.prob_label
job_result = job_func(paired_source, tr, te, r)
# create ScalarResult instance
result = SingleResult(job_result)
# submit the result to my own aggregator
self.aggregator.submit_result(result)
func_name = job_func.__name__
logger.info("done. ex1: %s, prob=%s, r=%d, n=%d. Took: %.3g s "%(func_name,
pdata.label, r, n, t.secs))
# save result
fname = '%s-%s-r%d_n%d_a%.3f_trp%.2f.p' \
%(prob_label, func_name, r, n, alpha, tr_proportion)
glo.ex_save_result(ex, job_result, prob_label, fname)
# This import is needed so that pickle knows about the class Ex5Job.
# pickle is used when collecting the results from the submitted jobs.
from fsic.ex.ex5_real_vary_n import job_nfsicJ10_stoopt
from fsic.ex.ex5_real_vary_n import job_nfsicJ10_perm_stoopt
from fsic.ex.ex5_real_vary_n import job_nfsicJ1_perm_stoopt
from fsic.ex.ex5_real_vary_n import job_nfsicJ10_perm_med
from fsic.ex.ex5_real_vary_n import job_nfsicJ10_med
from fsic.ex.ex5_real_vary_n import job_qhsic_med
from fsic.ex.ex5_real_vary_n import job_nyhsic_med
from fsic.ex.ex5_real_vary_n import job_fhsic_med
from fsic.ex.ex5_real_vary_n import job_rdcperm_med
from fsic.ex.ex5_real_vary_n import Ex5Job
#--- experimental setting -----
ex = 5
alpha = 0.01
tr_proportion = 0.5
# number of trials
reps = 200
method_job_funcs = [
job_nfsicJ10_stoopt,
job_nfsicJ10_med,
#job_nfsicJ10_perm_stoopt,
#job_nfsicJ10_perm_med,
job_qhsic_med,
job_nyhsic_med,
job_fhsic_med,
job_rdcperm_med,
]
# If is_rerun==False, do not rerun the experiment if a result file for the current
# setting already exists.
is_rerun = False
#---------------------------
def dataname2ns(data_name):
very_low_ns = [200, 300, 400, 500]
low_ns = [500, 1000, 1500, 2000, 5000]
data2ns = {
'msd50000': [500, 1000, 1500, 2000],
'data_n10000_td1878_vd2000': [2000, 4000, 6000, 8000],
#
'white_wine': very_low_ns,
'news_popularity': low_ns,
'skillcraft1': very_low_ns,
'chromatic_music': very_low_ns,
'music68': very_low_ns,
'movie_rating': very_low_ns,
'latlong_temp_y2013': low_ns,
'voice_gender': very_low_ns,
# Arizona feature selection data
# http://featureselection.asu.edu/datasets.php
'lung': [50, 100, 150], # lung max: 203
'carcinom': [50, 100, 150], # carcinom max: 174
'CLL_SUB_111': [50, 100 ], # max: 111
'SMK_CAN_187': [50, 100, 150], #max: 187
'TOX_171': [50, 100, 150], #max: 171
#-------
'higgs_no_deriv': very_low_ns,
}
if data_name not in data2ns:
raise ValueError('Unknown data name: %s. Need to be one of %s'%(data_name, str(data2ns.keys()) ))
return data2ns[data_name]
def run_problem(folder_path, prob_label):
"""Run the experiment"""
pl = exglo.parse_prob_label(prob_label + '.n0')
is_h0 = pl['is_h0']
is_classification = pl['is_classification']
data_name = pl['name']
sample_sizes = dataname2ns(data_name)
# /////// submit jobs //////////
# create folder name string
#result_folder = glo.result_folder()
#tmp_dir = tempfile.gettempdir()
from fsic.config import expr_configs
tmp_dir = expr_configs['scratch_dir']
foldername = os.path.join(tmp_dir, 'wj_slurm', 'e%d'%ex)
logger.info("Setting engine folder to %s" % foldername)
# create parameter instance that is needed for any batch computation engine
logger.info("Creating batch parameter instance")
batch_parameters = BatchClusterParameters(
foldername=foldername, job_name_base="e%d_"%ex, parameter_prefix="")
# Use the following line if Slurm queue is not used.
#engine = SerialComputationEngine()
engine = SlurmComputationEngine(batch_parameters)
n_methods = len(method_job_funcs)
# repetitions x sample_sizes x #methods
aggregators = np.empty((reps, len(sample_sizes), n_methods ), dtype=object)
for r in range(reps):
for ni, n in enumerate(sample_sizes):
for mi, f in enumerate(method_job_funcs):
# name used to save the result
func_name = f.__name__
fname = '%s-%s-r%d_n%d_a%.3f_trp%.2f.p' \
%(prob_label, func_name, r, n, alpha, tr_proportion)
if not is_rerun and glo.ex_file_exists(ex, prob_label, fname):
logger.info('%s exists. Load and return.'%fname)
job_result = glo.ex_load_result(ex, prob_label, fname)
sra = SingleResultAggregator()
sra.submit_result(SingleResult(job_result))
aggregators[r, ni, mi] = sra
else:
# result not exists or rerun
job = Ex5Job(SingleResultAggregator(), folder_path, prob_label, r,
f, n)
agg = engine.submit_job(job)
aggregators[r, ni, mi] = agg
# let the engine finish its business
logger.info("Wait for all call in engine")
engine.wait_for_all()
# ////// collect the results ///////////
logger.info("Collecting results")
job_results = np.empty((reps, len(sample_sizes), n_methods), dtype=object)
for r in range(reps):
for ni, n in enumerate(sample_sizes):
for mi, f in enumerate(method_job_funcs):
logger.info("Collecting result (%s, r=%d, n=%d)" %
(f.__name__, r, n))
# let the aggregator finalize things
aggregators[r, ni, mi].finalize()
# aggregators[i].get_final_result() returns a SingleResult instance,
# which we need to extract the actual result
job_result = aggregators[r, ni, mi].get_final_result().result
job_results[r, ni, mi] = job_result
#func_names = [f.__name__ for f in method_job_funcs]
#func2labels = exglobal.get_func2label_map()
#method_labels = [func2labels[f] for f in func_names if f in func2labels]
# save results
# - Do not store PairedSource because it can be very big.
results = {'job_results': job_results, 'sample_sizes': sample_sizes,
'is_h0': is_h0, 'is_classification': is_classification, 'alpha':
alpha, 'repeats': reps, 'tr_proportion': tr_proportion,
'method_job_funcs': method_job_funcs, 'prob_label': prob_label, }
# class name
fname = 'ex%d-%s-me%d_rs%d_nmi%d_nma%d_a%.3f_trp%.2f.p' \
%(ex, prob_label, n_methods, reps, min(sample_sizes),
max(sample_sizes), alpha, tr_proportion)
glo.ex_save_result(ex, results, fname)
logger.info('Saved aggregated results to %s'%fname)
def main():
logging.getLogger().setLevel(logging.DEBUG)
if len(sys.argv) != 3:
print('Usage: %s folder_path problem_label'%sys.argv[0])
print(' - folder_path: full path of the folder relative to fsic/data/ folder.')
sys.exit(1)
folder_path = sys.argv[1]
prob_label = sys.argv[2]
run_problem(folder_path, prob_label)
if __name__ == '__main__':
main()
|
wittawatj/fsic-test
|
fsic/ex/ex5_real_vary_n.py
|
Python
|
mit
| 18,035
|
[
"Gaussian"
] |
a72a63e2452dd6d2ba8987937b43afb93ca330aeb705b7530b68a85ecf33a4b2
|
r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
|
RPGOne/Skynet
|
scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/covariance/plot_mahalanobis_distances.py
|
Python
|
bsd-3-clause
| 6,232
|
[
"Gaussian"
] |
08086619f4b3563b173d1381a12aca73363fd55935d500a20d9440205dc2f8b7
|
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Swarming bot code. Includes bootstrap and swarming_bot.zip.
It includes everything that is AppEngine specific. The non-GAE code is in
bot_archive.py.
"""
import ast
import collections
import hashlib
import logging
import os.path
from six.moves import urllib
from google.appengine.api import memcache
from google.appengine.ext import ndb
from components import auth
from components import config
from components import utils
from server import bot_archive
from server import config as local_config
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# In theory, a memcache entry can be 1MB in size, and this sometimes works, but
# in practice we found that it's flaky at 500kb or above. 250kb seems to be safe
# though and doesn't appear to have any runtime impact.
# - aludwin@, June 2017
MAX_MEMCACHED_SIZE_BYTES = 250000
### Models.
File = collections.namedtuple('File', ('content', 'who', 'when', 'version'))
### Public APIs.
def get_bootstrap(host_url, bootstrap_token=None):
"""Returns the mangled version of the utility script bootstrap.py.
Try to find the content in the following order:
- get the file from luci-config
- return the default version
Returns:
File instance.
"""
# Calculate the header to inject at the top of the file.
if bootstrap_token:
quoted = urllib.parse.quote_plus(bootstrap_token)
assert bootstrap_token == quoted, bootstrap_token
header = (
'#!/usr/bin/env python\n'
'# coding: utf-8\n'
'host_url = %r\n'
'bootstrap_token = %r\n') % (host_url or '', bootstrap_token or '')
# Check in luci-config imported file if present.
rev, cfg = config.get_self_config(
'scripts/bootstrap.py', store_last_good=True)
if cfg:
return File(header + cfg, config.config_service_hostname(), None, rev)
# Fallback to the one embedded in the tree.
path = os.path.join(ROOT_DIR, 'swarming_bot', 'config', 'bootstrap.py')
with open(path, 'rb') as f:
return File(header + f.read(), None, None, None)
def get_bot_config():
"""Returns the current version of bot_config.py.
Try to find the content in the following order:
- get the file from luci-config
- return the default version
Returns:
File instance.
"""
# Check in luci-config imported file if present.
rev, cfg = config.get_self_config(
'scripts/bot_config.py', store_last_good=True)
if cfg:
return File(cfg, config.config_service_hostname(), None, rev), rev
# Fallback to the one embedded in the tree.
path = os.path.join(ROOT_DIR, 'swarming_bot', 'config', 'bot_config.py')
with open(path, 'rb') as f:
return File(f.read(), None, None, None), rev
def get_bot_version(host):
"""Retrieves the current bot version (SHA256) loaded on this server.
The memcache is first checked for the version, otherwise the value
is generated and then stored in the memcache.
Returns:
version: hash of the current bot version.
additionals: dict of additional files.
bot_config_rev: revision of the bot_config.py.
"""
signature = _get_signature(host)
version = memcache.get('version-' + signature, namespace='bot_code')
bot_config_rev = memcache.get(
'bot_config_rev-' + signature, namespace='bot_code')
if version and bot_config_rev:
return version, None, bot_config_rev
# Need to calculate it.
bot_config, bot_config_rev = get_bot_config()
additionals = {'config/bot_config.py': bot_config.content}
bot_dir = os.path.join(ROOT_DIR, 'swarming_bot')
version = bot_archive.get_swarming_bot_version(
bot_dir, host, utils.get_app_version(), additionals,
local_config.settings())
memcache.set('version-' + signature, version, namespace='bot_code', time=60)
memcache.set(
'bot_config_rev-' + signature,
bot_config_rev,
namespace='bot_code',
time=60)
return version, additionals, bot_config_rev
def get_swarming_bot_zip(host):
"""Returns a zipped file of all the files a bot needs to run.
Returns:
A string representing the zipped file's contents.
"""
version, additionals, bot_config_rev = get_bot_version(host)
cached_content, cached_bot_config_rev = get_cached_swarming_bot_zip(version)
# TODO(crbug.com/1087981): Compare the bot config revisions.
# Separate deployment to be safe.
if cached_content and cached_bot_config_rev:
logging.debug('memcached bot code %s; %d bytes with bot_config.py rev: %s',
version, len(cached_content), cached_bot_config_rev)
return cached_content
# Get the start bot script from the database, if present. Pass an empty
# file if the files isn't present.
bot_config, bot_config_rev = get_bot_config()
additionals = additionals or {
'config/bot_config.py': bot_config.content,
}
bot_dir = os.path.join(ROOT_DIR, 'swarming_bot')
content, version = bot_archive.get_swarming_bot_zip(
bot_dir, host, utils.get_app_version(), additionals,
local_config.settings())
logging.info('generated bot code %s; %d bytes with bot_config.py rev: %s',
version, len(content), bot_config_rev)
cache_swarming_bot_zip(version, content, bot_config_rev)
return content
def get_cached_swarming_bot_zip(version):
"""Returns the bot contents if its been cached, or None if missing."""
# see cache_swarming_bot_zip for how the "meta" entry is set
meta = bot_memcache_get(version, 'meta').get_result()
if meta is None:
logging.info('memcache did not include metadata for version %s', version)
return None, None
num_parts, true_sig = meta.split(':')
# Get everything asynchronously. If something's missing, the hash will be
# wrong so no need to check that we got something from each call.
futures = [bot_memcache_get(version, 'content', p)
for p in range(int(num_parts))]
content = ''
missing = 0
for idx, f in enumerate(futures):
chunk = f.get_result()
if chunk is None:
logging.debug(
'bot code %s was missing chunk %d/%d', version, idx, len(futures))
missing += 1
else:
content += chunk
if missing:
logging.warning(
'bot code %s was missing %d/%d chunks', version, missing, len(futures))
return None, None
h = hashlib.sha256()
h.update(content)
if h.hexdigest() != true_sig:
logging.error('bot code %s had signature %s instead of expected %s',
version, h.hexdigest(), true_sig)
return None, None
bot_config_rev = bot_memcache_get(version, 'bot_config_rev').get_result()
return content, bot_config_rev
def cache_swarming_bot_zip(version, content, bot_config_rev):
"""Caches the bot code to memcache."""
h = hashlib.sha256()
h.update(content)
p = 0
futures = []
while len(content) > 0:
chunk_size = min(MAX_MEMCACHED_SIZE_BYTES, len(content))
futures.append(bot_memcache_set(content[:chunk_size],
version, 'content', p))
content = content[chunk_size:]
p += 1
for f in futures:
f.check_success()
meta = "%s:%s" % (p, h.hexdigest())
bot_memcache_set(meta, version, 'meta').check_success()
bot_memcache_set(bot_config_rev, version, 'bot_config_rev').check_success()
logging.info(
'bot %s with sig %s with bot_config rev: %s saved in memcached in %d '
'chunks', version, h.hexdigest(), bot_config_rev, p)
def bot_memcache_get(version, desc, part=None):
"""Mockable async memcache getter."""
return ndb.get_context().memcache_get(bot_key(version, desc, part),
namespace='bot_code')
def bot_memcache_set(value, version, desc, part=None):
"""Mockable async memcache setter."""
return ndb.get_context().memcache_set(bot_key(version, desc, part),
value, namespace='bot_code')
def bot_key(version, desc, part=None):
"""Returns a memcache key for bot entries."""
key = 'code-%s-%s' % (version, desc)
if part is not None:
key = '%s-%d' % (key, part)
return key
### Bootstrap token.
class BootstrapToken(auth.TokenKind):
expiration_sec = 3600
secret_key = auth.SecretKey('bot_bootstrap_token')
version = 1
def generate_bootstrap_token():
"""Returns a token that authenticates calls to bot bootstrap endpoints.
The authenticated bootstrap workflow looks like this:
1. An admin visit Swarming server root page and copy-pastes URL to
bootstrap.py that has a '?tok=...' parameter with the bootstrap token,
generated by this function.
2. /bootstrap verifies the token and serves bootstrap.py, with same token
embedded into it.
3. The modified bootstrap.py is executed on the bot. It fetches bot code
from /bot_code, passing it the bootstrap token again.
4. /bot_code verifies the token and serves the bot code zip archive.
This function assumes the caller is already authorized.
"""
# The embedded payload is mostly FYI. The important expiration time is added
# by BootstrapToken already.
return BootstrapToken.generate(message=None, embedded={
'for': auth.get_current_identity().to_bytes(),
})
def validate_bootstrap_token(tok):
"""Returns a token payload if the token is valid or None if not.
The token is valid if its HMAC signature is correct and it hasn't expired yet.
Doesn't recheck ACLs. Logs errors.
"""
try:
return BootstrapToken.validate(tok, message=None)
except auth.InvalidTokenError as exc:
logging.warning('Failed to validate bootstrap token: %s', exc)
return None
### Private code
def _validate_python(content):
"""Returns True if content is valid python script."""
try:
ast.parse(content)
except (SyntaxError, TypeError):
return False
return True
def _get_signature(host):
# CURRENT_VERSION_ID is unique per appcfg.py upload so it can be trusted.
return hashlib.sha256(host + os.environ['CURRENT_VERSION_ID']).hexdigest()
## Config validators
@config.validation.self_rule('regex:scripts/.+\\.py')
def _validate_scripts(content, ctx):
try:
ast.parse(content)
except (SyntaxError, TypeError) as e:
ctx.error('invalid %s: %s' % (ctx.path, e))
|
luci/luci-py
|
appengine/swarming/server/bot_code.py
|
Python
|
apache-2.0
| 10,315
|
[
"VisIt"
] |
780fe4d7c669b68082a7da75c14d3742273e45ada5f6fc8632714c3a0531eed5
|
from ase.dft import Wannier
from gpaw import restart
atoms, calc = restart('benzene.gpw', txt=None)
wan = Wannier(nwannier=18, calc=calc, fixedstates=15, file='wan18.pickle')
import pylab as pl
weight_n = pl.sum(abs(wan.V_knw[0])**2, 1)
N = len(weight_n)
F = wan.fixedstates_k[0]
pl.figure(1, figsize=(12, 4))
pl.bar(range(1, N+1), weight_n, width=0.65, bottom=0,
color='k', edgecolor='k', linewidth=None,
align='center', orientation='vertical')
pl.plot([F+.5, F+.5], [0, 1], 'k--')
pl.axis(xmin=.32, xmax=N+1.33, ymin=0, ymax=1)
pl.xlabel('Eigenstate')
pl.ylabel('Projection of wannier functions')
pl.savefig('spectral_weight.png')
pl.show()
|
grhawk/ASE
|
tools/doc/tutorials/wannier/plot_spectral_weight.py
|
Python
|
gpl-2.0
| 660
|
[
"ASE",
"GPAW"
] |
265b57390d62887b6e3d41971a9be34bdda2fdc7b367aee158e29715007d1608
|
# f90wrap: F90 to Python interface generator with derived type support
#
# Copyright James Kermode 2011-2018
#
# This file is part of f90wrap
# For the latest version see github.com/jameskermode/f90wrap
#
# f90wrap is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# f90wrap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with f90wrap. If not, see <http://www.gnu.org/licenses/>.
#
# If you would like to license the source code under different terms,
# please contact James Kermode, james.kermode@gmail.com
# Originally based on:
# f90doc - automatic documentation generator for Fortran 90
# Copyright (C) 2004 Ian Rutt
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
import logging
import string
import sys
import os
import re
from f90wrap.fortran import (Fortran, Root, Program, Module,
Procedure, Subroutine, Function, Interface,
Prototype, Declaration,
Argument, Element, Type,
fix_argument_attributes,
LowerCaseConverter,
RepeatedInterfaceCollapser,
Binding,)
log = logging.getLogger(__name__)
# Define some regular expressions
module = re.compile('^module', re.IGNORECASE)
module_end = re.compile('^end\s*module|end$', re.IGNORECASE)
program = re.compile('^program', re.IGNORECASE)
program_end = re.compile('^end\s*program|end$', re.IGNORECASE)
attribs = r'allocatable|pointer|save|dimension *\(.*?\)|parameter|target|public|private|extends *\(.*?\)' # jrk33 added target
type_re = re.compile(r'^type((,\s*(' + attribs + r')\s*)*)(::)?\s*(?!\()', re.IGNORECASE)
type_end = re.compile('^end\s*type|end$', re.IGNORECASE)
dummy_types_re = re.compile('recursive|pure|elemental', re.IGNORECASE)
prefixes = r'elemental|impure|module|non_recursive|pure|recursive'
types = r'double precision|(real\s*(\(.*?\))?)|(complex\s*(\(.*?\))?)|(integer\s*(\(.*?\))?)|(logical)|(character\s*(\(.*?\))?)|(type\s*\().*?(\))|(class\s*\().*?(\))'
a_attribs = r'allocatable|pointer|save|dimension\(.*?\)|intent\(.*?\)|optional|target|public|private'
types_re = re.compile(types, re.IGNORECASE)
quoted = re.compile('(\".*?\")|(\'.*?\')') # A quoted expression
comment = re.compile('!.*') # A comment
whitespace = re.compile(r'^\s*') # Initial whitespace
c_ret = re.compile(r'\r')
iface = re.compile('^interface', re.IGNORECASE)
iface_end = re.compile('^end\s*interface|end$', re.IGNORECASE)
subt = re.compile(r'^((' + prefixes + r')\s+)*subroutine', re.IGNORECASE)
subt_end = re.compile(r'^end\s*subroutine\s*(\w*)|end$', re.IGNORECASE)
funct = re.compile(r'^((' + types + '|' + prefixes + r')\s+)*function', re.IGNORECASE)
# funct = re.compile('^function',re.IGNORECASE)
funct_end = re.compile('^end\s*function\s*(\w*)|end$', re.IGNORECASE)
prototype = re.compile(r'^module procedure\s*(::)?\s*([a-zA-Z0-9_,\s]*)', re.IGNORECASE)
binding_types = r'procedure|generic|final'
binding = re.compile(
r'^(' + binding_types + r')' +
r'\s*((,([^:]*))?(::))?' +
r'\s*(.*)',
re.IGNORECASE
)
contains = re.compile('^contains', re.IGNORECASE)
uses = re.compile('^use\s+', re.IGNORECASE)
only = re.compile('only\s*:\s*', re.IGNORECASE)
decl = re.compile('^(' + types + r')\s*(,\s*(' + attribs + r')\s*)*(::)?\s*\w+(\s*,\s*\w+)*', re.IGNORECASE)
d_colon = re.compile('::')
attr_re = re.compile('(,\s*(' + attribs + r')\s*)+', re.IGNORECASE)
s_attrib_re = re.compile(attribs, re.IGNORECASE)
decl_a = re.compile('^(' + types + r')\s*(,\s*(' + a_attribs + r')\s*)*(::)?\s*\w+(\s*,\s*\w+)*', re.IGNORECASE)
attr_re_a = re.compile('(,\s*(' + a_attribs + r')\s*)+', re.IGNORECASE)
s_attrib_re_a = re.compile(a_attribs, re.IGNORECASE)
cont_line = re.compile('&')
fdoc_comm = re.compile(r'^!\s*\*FD')
fdoc_comm_mid = re.compile(r'!\s*\*FD')
fdoc_mark = re.compile('_FD\s*')
fdoc_rv_mark = re.compile('_FDRV\s*')
result_re = re.compile(r'result\s*\((.*?)\)', re.IGNORECASE)
arg_split = re.compile(r'\s*(\w*)\s*(\(.+?\))?\s*(=\s*[\w\.]+\s*)?,?\s*')
size_re = re.compile(r'size\(([^,]+),([^\)]+)\)', re.IGNORECASE)
dimension_re = re.compile(r'^([-0-9.e]+)|((rank\(.*\))|(size\(.*\))|(len\(.*\))|(slen\(.*\)))$', re.IGNORECASE)
alnum = string.ascii_letters + string.digits + '_'
valid_dim_re = re.compile(r'^(([-0-9.e]+)|(size\([_a-zA-Z0-9\+\-\*\/]*\))|(len\(.*\)))$', re.IGNORECASE)
public = re.compile('(^public$)|(^public\s*(\w+)\s*$)|(^public\s*::\s*(\w+)(\s*,\s*\w+)*$)', re.IGNORECASE)
private = re.compile('(^private$)|(^private\s*(\w+)\s*$)|(^private\s*::\s*(\w+)(\s*,\s*\w+)*$)', re.IGNORECASE)
rmspace = re.compile(r'(\w+)\s+\(', re.IGNORECASE)
def remove_delimited(line, d1, d2):
bk = 0
temp_str = ''
undel_str = ''
delimited = []
for i in range(len(line)):
if bk == 1:
if line[i] == d2:
bk = 0
delimited.append(temp_str[:])
temp_str = ''
undel_str = undel_str + line[i]
continue
temp_str = temp_str + line[i]
continue
if line[i] == d1:
bk = 1
undel_str = undel_str + line[i]
if bk == 1:
undel_str = undel_str + temp_str
return delimited, undel_str
def recover_delimited(line, d1, d2, delimited):
if delimited == []:
return line, []
i = 0
while i < len(line):
if line[i] == d1:
line = line[0:i + 1] + delimited[0] + line[i + 1:]
i = i + len(delimited[0]) + 1
delimited = delimited[1:]
i = i + 1
return line, delimited
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def split_attribs(atr):
atr = atr.strip()
if re.match('[,]', atr) != None:
atr = atr[1:]
atr = atr.strip()
atrc = atr
bk = 0
atrl = []
for i in range(len(atrc)):
if atrc[i] == '(':
bk = bk + 1
if bk == 1:
continue
if atrc[i] == ')':
bk = bk - 1
if bk > 0:
atrc = atrc[:i] + '0' + atrc[i + 1:]
while re.search('[,]', atrc) != None:
atrl.append(atr[:re.search('[,]', atrc).start()]) # jrk33 changed [\s,] to [,]
atr = atr[re.search('[,]', atrc).end():]
atrc = atrc[re.search('[,]', atrc).end():]
if atr != '':
atrl.append(atr)
return list(map(lambda s: s.strip(), atrl)) # jrk33 added strip
hold_doc = None
class F90File(object):
def __init__(self, fname):
self.filename = fname
self.file = open(fname, 'r', encoding='utf-8', errors='ignore')
self.lines = self.file.readlines()
self._lineno = 0
self._lineno_offset = 0
self.file.close()
self.dquotes = []
self.squotes = []
@property
def lineno(self):
return self._lineno + self._lineno_offset
def next(self):
cline = ''
while (cline == '' and len(self.lines) != 0):
cline = self.lines[0].strip()
while (cline == '' and len(self.lines) != 1): # issue105 - rm empty lines
self.lines = self.lines[1:]
cline = self.lines[0].strip()
if cline.find('_FD') == 1:
break
# jrk33 - join lines before removing delimiters
# Join together continuation lines
FD_index = cline.find('_FD')
com2_index = cline.find('_COMMENT')
if (FD_index == 0 or com2_index == 0):
pass
else:
cont_index = cline.find('&')
try:
cont2 = self.lines[1].strip()
while (cont2 == '' and len(self.lines) != 2): # issue105 - rm empty lines
self.lines[1:] = self.lines[2:]
cont2 = self.lines[1].strip()
if cont2.startswith('&'):
cont2_index = 0
else:
cont2_index = -1
except:
cont2_index = -1
comm_index = cline.find('!')
while (cont_index != -1 and (comm_index == -1 or comm_index > cont_index)) or \
(cont2_index != -1):
cont2 = self.lines[1].strip()
while (cont2 == '' and len(self.lines) != 2): # issue105 - rm empty lines
self.lines[1:] = self.lines[2:]
cont2 = self.lines[1].strip()
if cont2.startswith('&'):
cont2 = cont2[1:].strip()
# Skip interleaved comments starting with `!`
if cont_index != -1 and not cont2.startswith('!'):
cont = cline[:cont_index].strip()
else:
cont = cline.strip()
if not cont2.startswith('!'):
cont = cont + cont2
self.lines = [cont] + self.lines[2:]
self._lineno = self._lineno + 1
cline = self.lines[0].strip()
while (cline == '' and len(self.lines) != 1): # issue105 - rm empty lines
self.lines = self.lines[1:]
cline = self.lines[0].strip()
cont_index = cline.find('&')
try:
cont2 = self.lines[1].strip()
while (cont2 == '' and len(self.lines) != 2): # issue105 - rm empty lines
self.lines[1:] = self.lines[2:]
cont2 = self.lines[1].strip()
if cont2.startswith('&'):
cont2_index = 0
else:
cont2_index = -1
except:
cont2_index = -1
comm_index = cline.find('!')
# split by '!', if necessary
comm_index = cline.find('!')
if comm_index != -1:
self.lines = [cline[:comm_index], cline[comm_index:]] + self.lines[1:]
cline = self.lines[0].strip()
# jrk33 - changed comment mark from '!*FD' to '!%'
if self.lines[1].find('!%') != -1:
self.lines = [self.lines[0]] + ['_FD' + self.lines[1][2:]] + self.lines[2:]
self._lineno_offset = 1
else:
self.lines = [self.lines[0]] + ['_COMMENT' + self.lines[1][1:]] + self.lines[2:]
self._lineno_offset = 1
else:
self._lineno_offset = 0
self._lineno = self._lineno + 1
self.lines = self.lines[1:]
cline = rmspace.sub(r'\1(', cline)
if cline == '':
return None
else:
return cline
def check_uses(cline, file):
if re.match(uses, cline) != None:
cline = uses.sub('', cline)
cline = cline.strip()
out = re.match(re.compile(r"\w+"), cline).group()
cline = file.next()
return [out, cline]
else:
return [None, cline]
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def check_doc(cline, file):
if cline and re.match(fdoc_mark, cline) != None:
out = fdoc_mark.sub('', cline)
out = out.rstrip()
cline = file.next()
return [out, cline]
else:
return [None, cline]
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def check_doc_rv(cline, file):
cl = cline
if cl is None:
return [None, cl]
if re.match(fdoc_rv_mark, cl) != None:
out = fdoc_rv_mark.sub('', cl)
out = out.rstrip()
cl = file.next()
return [out, cl]
else:
return [None, cl]
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def check_cont(cline, file):
cl = cline
if re.match(contains, cl) != None:
cl = file.next()
return ['yes', cl]
else:
return [None, cl]
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def check_program(cl, file):
global doc_plugin_module
global hold_doc
out = Program()
cont = 0
if re.match(program, cl) != None:
# Get program name
cl = program.sub('', cl)
out.name = re.search(re.compile('\w+'), cl).group().strip()
if out.name == '':
out.name = '<Unnamed>'
out.filename = file.filename
out.lineno = file.lineno
# Get next line, and check each possibility in turn
cl = file.next()
while re.match(program_end, cl) == None:
# contains statement
check = check_cont(cl, file)
if check[0] != None:
cont = 1
cl = check[1]
continue
if cont == 0:
# use statements
check = check_uses(cl, file)
if check[0] != None:
out.uses.append(check[0])
cl = check[1]
continue
# Doc comment
check = check_doc(cl, file)
if check[0] != None:
out.doc.append(check[0])
cl = check[1]
continue
else:
# jrk33 - hold doc comment relating to next subrt or funct
check = check_doc(cl, file)
if check[0] != None:
if hold_doc == None:
hold_doc = [check[0]]
else:
hold_doc.append(check[0])
cl = check[1]
continue
# Subroutine definition
check = check_subt(cl, file)
if check[0] != None:
log.debug(' program subroutine ' + check[0].name)
out.procedures.append(check[0])
cl = check[1]
continue
# Function definition
check = check_funct(cl, file)
if check[0] != None:
log.debug(' program function ' + check[0].name)
out.procedures.append(check[0])
cl = check[1]
continue
# If no joy, get next line
cl = file.next()
cl = file.next()
out.lineno = slice(out.lineno, file.lineno - 1)
return [out, cl]
else:
return [None, cl]
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def check_module(cl, file):
global doc_plugin_module
global hold_doc
out = Module()
cont = 0
if re.match(module, cl) != None:
out.filename = file.filename
out.lineno = file.lineno
# jrk33 - if we're holding a doc comment from before
# subroutine definition, spit it out now
if hold_doc is not None:
for line in hold_doc:
out.doc.append(line)
hold_doc = None
# Get module name
cl = module.sub('', cl)
out.name = re.search(re.compile('\w+'), cl).group()
# Get next line, and check each possibility in turn
cl = file.next()
while re.match(module_end, cl) == None:
# contains statement
check = check_cont(cl, file)
if check[0] != None:
cont = 1
cl = check[1]
continue
if cont == 0:
# use statements
check = check_uses(cl, file)
if check[0] != None:
out.uses.append(check[0])
cl = check[1]
continue
# Doc comment
check = check_doc(cl, file)
if check[0] != None:
if hold_doc == None:
hold_doc = [check[0]]
else:
hold_doc.append(check[0])
cl = check[1]
continue
# jrk33 - Interface definition
check = check_interface(cl, file)
if check[0] != None:
log.debug(' interface ' + check[0].name)
check[0].mod_name = out.name
out.interfaces.append(check[0])
cl = check[1]
continue
# Type definition
check = check_type(cl, file)
if check[0] != None:
log.debug(' type ' + check[0].name)
check[0].mod_name = out.name
out.types.append(check[0])
cl = check[1]
continue
# Module variable
check = check_decl(cl, file)
if check[0] != None:
for el in check[0]:
out.elements.append(el)
cl = check[1]
continue
# public and private access specifiers
m = public.match(cl)
if m is not None:
line = m.group()
if line.lower() == 'public':
log.info('marking module %s as default public' % out.name)
out.default_access = 'public'
else:
line = line.lower().replace('public', '')
line = line.replace('::', '')
line = line.strip()
out.public_symbols.extend([field.strip() for field in line.split(',')])
m = private.match(cl)
if m is not None:
line = m.group()
if line.lower() == 'private':
log.info('marking module %s as default private' % out.name)
out.default_access = 'private'
else:
line = line.replace('private', '')
line = line.replace('::', '')
line = line.strip()
out.private_symbols.extend([field.strip() for field in line.split(',')])
else:
# jrk33 - hold doc comment relating to next subrt or funct
check = check_doc(cl, file)
if check[0] != None:
if hold_doc == None:
hold_doc = [check[0]]
else:
hold_doc.append(check[0])
cl = check[1]
continue
# Subroutine definition
check = check_subt(cl, file)
if check[0] != None:
log.debug(' module subroutine ' + check[0].name)
check[0].mod_name = out.name
out.procedures.append(check[0])
cl = check[1]
continue
# Function definition
check = check_funct(cl, file)
if check[0] != None:
log.debug(' module function ' + check[0].name)
check[0].mod_name = out.name
out.procedures.append(check[0])
cl = check[1]
continue
# If no joy, get next line
cl = file.next()
cl = file.next()
out.lineno = slice(out.lineno, file.lineno - 1)
return [out, cl]
else:
return [None, cl]
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def check_subt(cl, file, grab_hold_doc=True):
global doc_plugin_module
global hold_doc
out = Subroutine()
if re.match(subt, cl) != None:
out.filename = file.filename
out.lineno = file.lineno
# Check if recursive, elemental or pure
m = re.match(dummy_types_re, cl)
if m != None:
out.attributes.append(m.group())
# Get subt name
cl = subt.sub('', cl)
out.name = re.search(re.compile('\w+'), cl).group()
log.debug(' module subroutine checking ' + out.name)
# Test in principle whether we can have a 'do not wrap' list
if out.name.lower() == 'debugtype_stop_if':
return [None, cl]
# Check to see if there are any arguments
has_args = 0
if re.search(r'\(.+', cl) != None:
has_args = 1
in_block_doc = False
had_block_doc = False
# get argument list
if has_args:
cl = cl[:cl.find(')', 0)+1]
cl = re.sub('\w+', '', cl, count=1)
argl = re.split('[\W]+', cl)
del (argl[0])
del (argl[len(argl) - 1])
while cl.strip() == '' or re.search('&', cl) != None:
cl = file.next()
if cl.startswith('_COMMENT'):
cl = file.next()
if cl.strip() == '': continue
arglt = re.split('[\W]+', cl)
del (arglt[len(arglt) - 1])
for a in arglt:
argl.append(a)
else:
argl = []
argl = list(map(lambda s: s.lower(), argl))
# Get next line, and check each possibility in turn
cl = file.next()
cont = 0
subroutine_lines = []
while True:
# Use statement
# #check=check_uses(cl,file)
# #if check[0]!=None:
# # out.uses.append(check[0])
# # cl=check[1]
# # continue
# Look for block comments starting with a line of ======= or -------
if cl is not None and not in_block_doc and not had_block_doc:
if cl.startswith('_COMMENT=====') or cl.startswith('_COMMENT-----'):
in_block_doc = True
if cl is not None and in_block_doc:
if not cl.startswith('_COMMENT'):
in_block_doc = False
had_block_doc = True
else:
rep = cl.strip().replace('_COMMENT', '')
if rep:
out.doc.append(rep)
cl = file.next()
continue
# contains statement
check = check_cont(cl, file)
if check[0] is not None:
cont = 1
cl = check[1]
if cont == 0:
# Doc comment
check = check_doc(cl, file)
if check[0] != None:
out.doc.append(check[0])
cl = check[1]
continue
if has_args:
# Argument
check = check_arg(cl, file)
if check[0] != None:
for a in check[0]:
out.arguments.append(a)
cl = check[1]
continue
# Interface section
check = check_interface_decl(cl, file)
if check[0] != None:
for a in check[0].procedures:
out.arguments.append(a)
cl = check[1]
continue
else:
while True :
# Subroutine definition
check = check_subt(cl, file)
if check[0] is not None:
# Discard contained subroutine
cl = check[1]
continue
# Function definition
check = check_funct(cl, file)
if check[0] is not None:
# Discard contained function
cl = check[1]
continue
break
m = subt_end.match(cl)
subroutine_lines.append(cl)
if m == None:
cl = file.next()
continue
else:
if doc_plugin_module is not None:
extra_doc = doc_plugin_module.doc_plugin(subroutine_lines, out.name, 'subroutine')
out.doc.extend(extra_doc)
break
# If no joy, get next line
cl = file.next()
# Select only first declaration that matches entries
# in argument list
if has_args:
# t_re_str='(^'
ag_temp = []
# for a in argl:
# t_re_str=t_re_str+a+'$)|(^'
# t_re_str=t_re_str[:-3]
# t_re=re.compile(t_re_str,re.IGNORECASE)
for i in out.arguments:
if (i.name.lower() in argl and
len([a for a in ag_temp if a.name.lower() == i.name.lower()]) == 0):
ag_temp.append(i)
implicit_to_explicit_arguments(argl, ag_temp)
out.arguments = ag_temp
out.arguments.sort(key=lambda x: argl.index(x.name.lower()))
else:
out.arguments = []
cl = file.next()
# jrk33 - if we're holding a doc comment from before
# subroutine definition, spit it out now
if grab_hold_doc and hold_doc is not None:
for line in hold_doc:
out.doc.append(line)
hold_doc = None
out.lineno = slice(out.lineno, file.lineno - 1)
return [out, cl]
else:
return [None, cl]
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def implicit_to_explicit_arguments(argl, ag_temp):
# YANN: Give a Type to undeclared arguments in the arguments list, following the implicit arguments type rule
implicit_arguments = set(argl) - set(a.name.lower() for a in ag_temp)
for i in implicit_arguments:
ag_temp.append(
Argument(name=i, doc=None, type=implicit_type_rule(i), attributes=None, filename=None, lineno=None))
def implicit_type_rule(var):
# YANN: implicit arguments type rule
tp = 'integer' if var[0] in ('i', 'j', 'k', 'l', 'm', 'n') else 'real'
log.debug(' implicit type of "%s" inferred from its name as "%s"' % (var, tp))
return tp
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def check_funct(cl, file, grab_hold_doc=True):
global doc_plugin_module
global hold_doc
out = Function()
if re.match(funct, cl) != None:
out.filename = file.filename
out.lineno = file.lineno
out.ret_val.filename = out.filename
out.ret_val.lineno = out.lineno
# Check if recursive, elemental or pure
m = re.search(dummy_types_re, cl)
if m != None:
out.attributes.append(m.group())
cl = dummy_types_re.sub('', cl)
# Get return type, if present
cl = cl.strip()
if re.match(types_re, cl) != None:
out.ret_val.type = re.match(types_re, cl).group()
# jrk33 - Does function header specify alternate name of
# return variable?
ret_var = None
m = re.search(result_re, cl)
if m != None:
ret_var = m.group(1)
cl = result_re.sub('', cl)
# Get func name
cl = funct.sub('', cl)
out.name = re.search(re.compile('\w+'), cl).group()
log.debug(' module function checking ' + out.name)
# Default name of return value is function name
out.ret_val.name = out.name
# If return type not present, infer type from function name
if out.ret_val.type == '':
out.ret_val.type = implicit_type_rule(out.name)
# Check to see if there are any arguments
# Find "(" followed by anything else than ")"
if re.search(r'\([^\)]+', cl) != None:
has_args = 1
else:
has_args = 0
if has_args:
# get argument list
# substitue 'consecutive words' by '' in cl, at most 1 time
cl = re.sub('\w+', '', cl, count=1)
argl = re.split('[\W]+', cl)
del (argl[0])
del (argl[len(argl) - 1])
while cl.strip() == '' or re.search('&', cl) != None:
cl = file.next()
if cl.startswith('_COMMENT'):
cl = file.next()
if cl.strip() == '':
continue
arglt = re.split('[\W]+', cl)
del (arglt[len(arglt) - 1])
for a in arglt:
argl.append(a.lower())
else:
argl = []
argl = list(map(lambda s: s.lower(), argl))
# Get next line, and check each possibility in turn
in_block_doc = False
had_block_doc = False
cl = file.next()
subroutine_lines = []
while True:
# Use statement
# #check=check_uses(cl,file)
# #if check[0]!=None:
# # out.uses.append(check[0])
# # cl=check[1]
# # continue
# Look for block comments starting with a line of ======= or -------
if cl is not None and not in_block_doc and not had_block_doc:
if cl.startswith('_COMMENT=====') or cl.startswith('_COMMENT-----'):
in_block_doc = True
if cl is not None and in_block_doc:
if not cl.startswith('_COMMENT'):
in_block_doc = False
had_block_doc = True
else:
rep = cl.strip().replace('_COMMENT', '')
if rep:
out.doc.append(rep)
cl = file.next()
continue
# Doc comment - return value
check = check_doc_rv(cl, file)
if check[0] != None:
out.ret_val_doc.append(check[0])
cl = check[1]
continue
# Doc comment
check = check_doc(cl, file)
if check[0] != None:
out.doc.append(check[0])
cl = check[1]
continue
# Interface section
check = check_interface_decl(cl, file)
if check[0] != None:
for a in check[0].procedures:
out.arguments.append(a)
cl = check[1]
continue
# Argument
check = check_arg(cl, file)
if check[0] != None:
for a in check[0]:
out.arguments.append(a)
cl = check[1]
continue
m = re.match(funct_end, cl)
subroutine_lines.append(cl)
if m == None:
cl = file.next()
continue
else:
if doc_plugin_module is not None:
extra_doc = doc_plugin_module.doc_plugin(subroutine_lines, out.name, 'function')
out.doc.extend(extra_doc)
break
cl = file.next()
# Select only first declaration that matches entries
# in argument list
ag_temp = []
# if has_args:
# t_re_str='(^'
# for a in argl:
# t_re_str=t_re_str+a+'$)|(^'
# t_re_str=t_re_str[:-3]
# t_re=re.compile(t_re_str,re.IGNORECASE)
name_re = re.compile(out.name, re.IGNORECASE)
for i in out.arguments:
if has_args and i.name.lower() in argl and \
len([a for a in ag_temp if a.name.lower() == i.name.lower()]) == 0:
ag_temp.append(i)
if re.search(name_re, i.name) != None:
out.ret_val = i
if ret_var != None and i.name.lower().strip() == ret_var.lower().strip():
out.ret_val = i
implicit_to_explicit_arguments(argl, ag_temp)
out.arguments = ag_temp
out.arguments.sort(key=lambda x: argl.index(x.name.lower()))
cl = file.next()
# jrk33 - if we're holding a doc comment from before
# subroutine definition, spit it out now
if grab_hold_doc and hold_doc is not None:
for line in hold_doc:
out.doc.append(line)
hold_doc = None
out.lineno = slice(out.lineno, file.lineno - 1)
return [out, cl]
else:
return [None, cl]
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def check_type(cl, file):
# global hold_doc
out = Type()
m = re.match(type_re, cl)
current_access = None
cont = 0
if m is not None:
out.filename = file.filename
out.lineno = file.lineno
# jrk33 - see if it's a global variable of this type.
# if so, do nothing - it will be found by check_decl
if decl.match(cl) != None:
return [None, cl]
# if hold_doc != None:
# for line in hold_doc:
# out.doc.append(line)
# hold_doc = None
# Get type name
cl = type_re.sub('', cl)
# Check if there are any type attributes
out.attributes = []
if m.group(1):
out.attributes = split_attribs(m.group(1))
out.name = re.search(re.compile('\w+'), cl).group()
log.info('parser reading type %s' % out.name)
# Get next line, and check each possibility in turn
cl = file.next()
while re.match(type_end, cl) == None:
# contains statement
check = check_cont(cl, file)
if check[0] != None:
log.debug('parser reading type %s bound procedures', out.name)
cont = 1
cl = check[1]
continue
if cont == 0:
check = check_doc(cl, file)
if check[0] != None:
out.doc.append(check[0])
cl = check[1]
continue
check = check_decl(cl, file)
if check[0] != None:
for a in check[0]:
if current_access is not None:
a.attributes.append(current_access)
out.elements.append(a)
cl = check[1]
continue
else:
check = check_binding(cl, file)
if check[0] != None:
out.bindings.extend(check[0])
cl = check[1]
continue
if cl.lower() == 'public':
current_access = 'public'
elif cl.lower() == 'private':
current_access = 'private'
cl = file.next()
cl = file.next()
out.lineno = slice(out.lineno, file.lineno - 1)
return [out, cl]
else:
return [None, cl]
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def check_interface(cl, file):
global hold_doc
out = Interface()
if re.match(iface, cl) != None:
out.filename = file.filename
out.lineno = file.lineno
cl = iface.sub('', cl)
out.name = cl.strip()
# if out.name == '':
# return [None, cl]
if hold_doc is not None:
for line in hold_doc:
out.doc.append(line)
hold_doc = None
cl = file.next()
while re.match(iface_end, cl) == None:
check = check_doc(cl, file)
if check[0] != None:
out.doc.append(check[0])
cl = check[1]
continue
check = check_prototype(cl, file)
if check[0] != None:
for a in check[0]:
out.procedures.append(a)
cl = check[1]
continue
cl = file.next()
cl = file.next()
out.lineno = slice(out.lineno, file.lineno - 1)
return [out, cl]
else:
return [None, cl]
def check_interface_decl(cl, file):
global doc_plugin_module
out = Interface()
if cl and re.match(iface, cl) != None:
out.filename = file.filename
out.lineno = file.lineno
cl = file.next()
while re.match(iface_end, cl) == None:
# Subroutine declaration
check = check_subt(cl, file)
if check[0] != None:
out.procedures.append(check[0])
cl = check[1]
continue
# Function declaration
check = check_funct(cl, file)
if check[0] != None:
out.procedures.append(check[0])
cl = check[1]
continue
cl = file.next()
cl = file.next()
out.lineno = slice(out.lineno, file.lineno - 1)
return [out, cl]
else:
return [None, cl]
def check_prototype(cl, file):
m = prototype.match(cl)
if m != None:
out = map(lambda s: s.strip().lower(), m.group(2).split(','))
out = [Prototype(name=name, lineno=file.lineno, filename=file.filename) for name in out]
cl = file.next()
return [out, cl]
else:
return [None, cl]
def check_binding(cl, file):
m = binding.match(cl)
if m != None:
type = m.group(1).strip().lower()
attrs = m.group(4)
bindings = m.group(6)
if attrs:
attrs = [a.strip().lower() for a in attrs.split(',')]
out = []
if type == 'generic':
name, targets = bindings.split('=>')
name = name.strip().lower()
log.debug('found generic binding %s => %s', name, targets)
out.append(Binding(
name=name,
lineno=file.lineno,
filename=file.filename,
type=type,
attributes=attrs,
procedures=[
Prototype(
name=t.strip().lower(),
lineno=file.lineno,
filename=file.filename
)
for t in targets.split(',')
],
))
else:
for b in bindings.split(','):
name, *target = [ word.strip().lower() for word in b.split('=>')]
name = name.strip().lower()
target = target[0] if target else name
log.debug('found %s binding %s => %s', type, name, target)
out.append(Binding(
name=name,
lineno=file.lineno,
filename=file.filename,
type=type,
attributes=attrs,
procedures=[
Prototype(
name=target.strip().lower(),
lineno=file.lineno,
filename=file.filename,
),
],
))
cl = file.next()
return [out, cl]
else:
return [None, cl]
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def check_decl(cl, file):
out = []
if re.match(decl, cl) != None:
filename = file.filename
lineno = file.lineno
tp = re.match(types_re, cl).group()
atr = re.search(attr_re, cl)
if atr != None:
atrl = s_attrib_re.findall(atr.group())
for j in range(len(atrl)):
atrl[j] = atrl[j].rstrip()
else:
atrl = []
m = re.search(d_colon, cl)
if m is not None:
names = cl[m.end():]
else:
names = types_re.sub('', cl)
# old line - doesn't handle array constants
# nl=re.split(r'\s*,\s*',names)
nl = split_attribs(names)
alist = []
for j in range(len(atrl)):
alist.append(atrl[j])
cl = file.next()
check = check_doc(cl, file)
dc = []
while check[0] != None:
# Doc comment
dc.append(check[0])
cl = check[1]
check = check_doc(cl, file)
cl = check[1]
for i in range(len(nl)):
nl[i] = nl[i].strip()
nlv = re.split(r'\s*=\s*', nl[i])
names, sizes = splitnames(nlv[0])
temp = Element(name=names[0], type=tp, doc=dc, attributes=alist[:],
filename=filename, lineno=lineno)
if len(nlv) == 2:
temp.value = nlv[1]
if sizes[0] != '':
temp.attributes.append('dimension' + sizes[0])
out.append(temp)
return [out, cl]
else:
return [None, cl]
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def splitnames(names):
nl = []
sizes = []
b = 0
namestart = 0
sizestart = 0
name = ''
size = ''
for i, n in enumerate(names):
if n == '(':
b += 1
size += '('
elif n == ')':
b -= 1
size += ')'
elif n == ',' and b == 0:
nl.append(name)
name = ''
sizes.append(size)
size = ''
elif b == 0:
name += n
else:
size += n
nl.append(name)
sizes.append(size)
return nl, sizes
def check_arg(cl, file):
out = []
if cl and re.match(decl_a, cl) != None:
filename = file.filename
lineno = file.lineno
tp = re.match(types_re, cl).group()
m = re.search(d_colon, cl)
if m is not None:
atr_temp = cl[re.match(types_re, cl).end():m.start()]
names = cl[m.end():]
else:
atr_temp = ''
# Need to remove ONLY THE FIRST type string (the name may have the type in it)
names = types_re.sub('', cl, 1)
atrl = split_attribs(atr_temp)
# names=cl[re.search(d_colon,cl).end():]
# # nl=re.split(',',names)
# # for i in range(len(nl)):
# # nl[i]=nl[i].strip()
# jrk33 - added code to cope with array declarations with
# size after variable name, e.g. matrix(3,3) etc.
# Remove values
names = re.sub(r'=.*$', '', names)
nl, sizes = splitnames(names)
alist = []
for j in range(len(atrl)):
alist.append(atrl[j])
cl = file.next()
check = check_doc(cl, file)
dc = []
while check[0] != None:
# Doc comment
dc.append(check[0])
cl = check[1]
check = check_doc(cl, file)
cl = check[1]
for i in range(len(nl)):
nl[i] = nl[i].strip()
temp = Argument(name=nl[i], doc=dc, type=tp, attributes=alist[:],
filename=filename, lineno=lineno)
# Append dimension if necessary
if sizes[i] != '':
temp.attributes.append('dimension' + sizes[i])
out.append(temp)
return [out, cl]
else:
return [None, cl]
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def read_files(args, doc_plugin_filename=None):
global doc_plugin_module
global hold_doc
if doc_plugin_filename is not None:
sys.path.insert(0, os.path.dirname(doc_plugin_filename))
doc_plugin_module = __import__(os.path.splitext(os.path.basename(doc_plugin_filename))[0])
sys.path = sys.path[1:]
else:
doc_plugin_module = None
root = Root()
for fn in args:
fname = fn
# Open the filename for reading
log.debug('processing file ' + fname)
file = F90File(fname)
# Get first line
cline = file.next()
while cline != None:
# programs
check = check_program(cline, file)
if check[0] != None:
log.debug(' program ' + check[0].name)
root.programs.append(check[0])
cline = check[1]
continue
# modules
check = check_module(cline, file)
if check[0] != None:
log.debug(' module ' + check[0].name)
root.modules.append(check[0])
cline = check[1]
continue
# jrk33 - hold doc comment relating to next module, subrt or funct
check = check_doc(cline, file)
if check[0] != None:
if hold_doc == None:
hold_doc = [check[0]]
else:
hold_doc.append(check[0])
cline = check[1]
continue
# stand-alone subroutines
check = check_subt(cline, file)
if check[0] != None:
# log.debug(' subroutine ' + check[0].name)
root.procedures.append(check[0])
cline = check[1]
continue
# stand-alone functions
check = check_funct(cline, file)
if check[0] != None:
# log.debug(' function ' + check[0].name)
root.procedures.append(check[0])
cline = check[1]
continue
cline = file.next()
# apply some rules to the parsed tree
root = fix_argument_attributes(root)
root = LowerCaseConverter().visit(root)
root = RepeatedInterfaceCollapser().visit(root)
return root
|
jameskermode/f90wrap
|
f90wrap/parser.py
|
Python
|
lgpl-3.0
| 46,940
|
[
"VisIt"
] |
c0aafaa8344439ad448dec32773943e147b553961dafed4695bf05b0fde2412c
|
# -*- coding: utf-8 -*-
"""
Functions to estimate specific differential phase (KDP) using various methods.
"""
from __future__ import division, print_function
import numpy as np
from numpy import linalg
from scipy.signal import firwin
from warnings import warn
from .calc_kdp_ray_fir import calc_kdp_ray_fir
# import time
VERSION = '1.6'
# Used by FIR coefficient function (get_fir)
FIR_GS = 150.0
FIR_WIN = 3.0
FIR_ORDER = 20
FIR_GAIN = 1.0
FIR_FREQ = 0.08
FIR_STD = 28.0
KM2M = 1000.0
STD_GATE = 11
def calc_kdp_bringi(dp=None, dz=None, rng=None, thsd=12, nfilter=1,
bad=-32768, gs=FIR_GS, window=FIR_WIN, std_gate=STD_GATE):
"""
Overview
--------
This is an old algorithm that uses an FIR filter to process differential
phase and extract specific differential phase. It works on polarimetric
radar data. It is based on code provided by V. N. Bringi and Yanting Wang
of CSU Electrical Engineering. It assumes differential phase has been
unfolded already. You can send this function either 1D or 2D arrays of
data. If 2D, it assumes the first index is azimuth so it will loop over
that, calculating KDP along individual rays.
Steps
-----
1. Standard deviation of differential phase is calculated and used to
QC the phase data. The stdev calculation uses up to std_gate consecutive
gates regardless of gate spacing.
2. Differential phase is filtered using the FIR filter, which has been
tuned to the number of gates contained within the FIR window. This
algorithm only works for window / gate spacing = even number.
3. Specific differential phase is calculated by consulting reflectivity.
As reflectivity declines progressively more and more gates are needed
in the window used to fit a line to the filtered phase. Specific
differential phase is half the slope of that line.
Reference
---------
Timothy J. Lang, David A. Ahijevych, Stephen W. Nesbitt, Richard E.
Carbone, Steven A. Rutledge, and Robert Cifelli, 2007: Radar-Observed
Characteristics of Precipitating Systems during NAME 2004. J. Climate,
20, 1713–1733. doi: https://dx.doi.org/10.1175/JCLI4082.1
Arguments
---------
dp = Differential phase (deg, 1D or 2D array)
dz = Reflectivity (dBZ, 1D or 2D array)
rng = Range (km, 1D or 2D array -
use np.meshgrid() first tp make rng 2D if needed)
thsd = Threshold for standard deviation of differential phase, above which
the data are not considered when filtering or calculating specific
differential phase. The user can specify a 1D vector of spatially
varying thresholds instead (i.e., vary by range).
nfilter = Number of times to apply the FIR filter
bad = Value for bad/missing data
gs = Gate spacing of radar (meters)
window = Changes window over which FIR filter is applied (km). Also affects
the width of the adaptive KDP calculations.
std_gate = Number of gates for standard deviation of phase calculation.
Must be odd or function will just set it to the default value.
Returns
-------
kd_lin = Specific differential phase (deg/km, 1D or 2D array)
dp_lin = Filtered differential phase (deg, 1D or 2D array)
sd_lin = Standard deviation of diff. phase (deg, 1D or 2D array)
"""
# Quick check on all vars. Used keywords so order doesn't matter.
if dp is None or dz is None or rng is None:
warn('Missing needed variables (dp, dz, and/or rng), failing ...')
return
if np.ndim(dp) != np.ndim(dz) or np.ndim(dp) != np.ndim(rng):
warn('Array sizes don\'t match, failing ...')
return
if std_gate % 2 != 1:
warn('std_gate must be odd, using ' + str(STD_GATE) +
' gates as the default window')
std_gate = STD_GATE
fir = get_fir(gs=gs, window=window)
if fir is None:
print('Fix window/gs to be even, failing ...')
return None, None, None
# Following lines ensure right dtype passed to Cython extensions (if used)
dp = np.array(dp).astype('float32')
dz = np.array(dz).astype('float32')
rng = np.array(rng).astype('float32')
fir['coef'] = np.array(fir['coef']).astype('float64')
if not hasattr(thsd, '__len__'):
thsd = np.zeros_like(dp) + thsd
# If array is 2D, then it assumes the first index refers to azimuth.
# Thus it loops over that.
if np.ndim(dp) == 2:
kd_lin = np.zeros_like(dp) + bad
dp_lin = np.zeros_like(dp) + bad
sd_lin = np.zeros_like(dp) + 100.0
for ray in np.arange(np.shape(dp)[0]):
dpl = len(dp[ray])
kd_lin[ray], dp_lin[ray], sd_lin[ray] = calc_kdp_ray_fir(
dpl, dp[ray], dz[ray], rng[ray], thsd[ray],
nfilter, bad, fir['order'], fir['gain'], fir['coef'], std_gate)
# kd_lin[ray], dp_lin[ray], sd_lin[ray] = \
# _calc_kdp_ray(dp[ray], dz[ray], rng[ray], thsd=thsd,
# nfilter=nfilter, bad=bad, fir=fir)
# Or
elif np.ndim(dp) == 1:
kd_lin, dp_lin, sd_lin = calc_kdp_ray_fir(
len(dp), dp, dz, rng, thsd, nfilter, bad,
fir['order'], fir['gain'], fir['coef'], std_gate)
# kd_lin, dp_lin, sd_lin = _calc_kdp_ray(
# dp, dz, rng, thsd=thsd, fir=fir, nfilter=nfilter, bad=bad)
else:
warn('Need 2D or 1D array, failing ...')
return
return kd_lin, dp_lin, sd_lin
def get_fir(gs=FIR_GS, window=FIR_WIN):
"""
gs = Gate Spacing (m)
window = Filter Window (km)
window divided by gs should be an even number!
"""
fir = {}
fir['order'] = np.int32(window * KM2M / gs)
if fir['order'] % 2 != 0:
warn('gs / window must be an even number! #Failing ...')
return
fir['gain'] = FIR_GAIN
# ratio = FIR_GS / gs
ratio = fir['order'] / FIR_ORDER
freq = FIR_FREQ / ratio
std = ratio * FIR_STD
fir['coef'] = firwin(fir['order'] + 1, freq, window=('gaussian', std))
# print('debug', fir)
return fir
def _calc_kdp_ray(dp, dz, rng, thsd=12, nfilter=1, bad=-32768, fir=None):
"""
Pure Python approach to filtering phase and estimating KDP. Currently
disabled due to performance issues.
Arguments
---------
dp = 1D ray of differential phase
dz = 1D ray of reflectivity
rng = 1D ray of range
thsd = Scalar or 1D ray of diff phase standard deviation thresholds
nfilter = Number of times to filter the data
bad = Bad/missing data value
fir = Dictionary containing FIR filter parameters
Returns
-------
kd_lin = Specific differential phase (deg/km, 1D array)
dp_lin = Filtered differential phase (deg, 1D array)
sd_lin = Standard deviation of diff. phase (deg, 1D array)
"""
# Define needed variables
kd_lin = np.zeros_like(rng) + bad
sd_lin = np.zeros_like(rng) + 100.0
# User can provide a spatially varying stddev(dp) threshold
if not hasattr(thsd, '__len__'):
thsd = np.zeros_like(rng) + thsd
length = len(rng)
lin = np.arange(length)
# Half window size for calculating stdev of phase (fixed @ 11 gates)
half_std_win = 5
half_fir_win = fir['order'] // 2 # Half window size for FIR filtering
y = np.zeros(length) + bad # Dummy variable to store filtered phase
z = 1.0 * dp # Dummy variable to store un/pre-processed phase
# print(time.time() - begin_time, 'seconds since start (DEF)')
#####################################################################
# Calculate standard deviation of phidp
mask = dp != bad
for i in lin[mask]:
index1 = np.int32(i - half_std_win)
index2 = np.int32(i + half_std_win)
if index1 >= 0 and index2 < length - 1:
yy = dp[index1:index2]
tmp_mask = mask[index1:index2]
if len(yy[tmp_mask]) > half_std_win:
sd_lin[i] = _quick_std(yy, tmp_mask)
# ------------- MAIN LOOP of Phidp Adaptive Filtering ------------------
# FIR FILTER SECTION
for mloop in np.arange(nfilter):
mask = np.logical_and(sd_lin <= thsd, z != bad)
for i in lin[mask]:
index1 = np.int32(i - half_fir_win)
index2 = np.int32(i + half_fir_win)
if index1 >= 0 and index2 < length - 1:
yy = z[index1:index2+1]
xx = rng[index1:index2+1]
tmp_mask = mask[index1:index2+1]
siz = len(yy[tmp_mask])
if siz > 0.8 * fir['order']:
if siz < fir['order'] + 1:
result = _leastsqrs(xx, yy, siz, tmp_mask)
yy[~tmp_mask] = result[0] * xx[~tmp_mask] + result[1]
y[i] = fir['gain'] * np.dot(fir['coef'], yy)
z = 1.0 * y # Enables re-filtering of processed phase
dp_lin = 1.0 * z
# print(time.time() - begin_time, 'seconds since start (FDP)')
# *****************END LOOP for Phidp Adaptive Filtering******************
# CALCULATE KDP
# Default value for nadp is half_fir_win, but varies based on Zh
nadp = np.int16(0 * dz + half_fir_win)
tmp_mask = dz < 35
nadp[tmp_mask] = 3 * half_fir_win
tmp_mask = np.logical_and(dz >= 35, dz < 45)
nadp[tmp_mask] = 2 * half_fir_win
mask = dp_lin != bad
for i in lin[mask]:
index1, index2 = _get_nadp_indices(nadp, i)
if index1 >= 0 and index2 <= length:
tmp_mask = mask[index1:index2]
xx = rng[index1:index2]
siz = len(xx[tmp_mask])
# Improved Kdp based on LSE fit to Adap filt Phidp
if siz >= 0.8 * nadp[i]:
yy = dp_lin[index1:index2]
kd_lin[i] = _fit_line_and_get_kdp(xx, yy, siz, tmp_mask)
# *******************END KDP CALCULATION****************************
# print(time.time() - begin_time, 'seconds since start (KDP/Done)')
return kd_lin, dp_lin, sd_lin
def _leastsqrs(xx, yy, siz, tmp_mask):
"""
Following is faster than np.polyfit
e.g., return np.polyfit(xx[tmp_mask], yy[tmp_mask], 1)
"""
A = np.array([xx[tmp_mask], np.ones(siz)])
return linalg.lstsq(A.T, yy[tmp_mask])[0]
def _get_nadp_indices(nadp, i):
half_nadp = nadp[i] / 2
return np.int32(i - half_nadp), np.int32(i + half_nadp + 1)
def _fit_line_and_get_kdp(xx, yy, siz, tmp_mask):
result = _leastsqrs(xx, yy, siz, tmp_mask)
return 0.5 * result[0]
def _quick_std(array, mask):
"""Following is faster than np.std()"""
a = array[mask]
m = a.mean()
c = a - m
return (np.dot(c, c) / a.size)**0.5
|
tjlang/CSU_RadarTools
|
csu_radartools/csu_kdp.py
|
Python
|
gpl-2.0
| 10,729
|
[
"Gaussian"
] |
553dc3db0196c7c54c643ea552c5d5420eacd1d6e148433f32bd8ef8aae26f4a
|
#!/usr/bin/env python
'''!
@package rpgtoolbox.rpgtools
@file rpgtools.py
@brief RPG helpful functions
This module contains some helpful functions for role-playing games like:
- dice() for dic rolling
- getLvl() for calculate actual level of char
@date (C) 2015-2021
@license GNU V3.0
@author Marcus Schwamberger
@email marcus@lederzeug.de
@version 1.0
'''
__updated__ = "28.12.2020"
import random
from rpgtoolbox.globaltools import readCSV
from rpgtoolbox.rolemaster import rankbonus
import re
def dice(sides = 6, number = 1):
'''!
This function delivers the result of (number of) dice roll(s) as a list.
@param sides number of sides of the used dice
@param number number of used dices/rolls
@retval result list containing integer numbers of the dice rolls
'''
i = 0
result = []
while i < number:
roll = random.randint(1, sides)
result.append(roll)
i += 1
return result
def getLvl(ep = 10000):
'''!
This function calculates the level of a MERP/RM character.
@param ep experience point of the character
\return level of character as integer ValueError
'''
if ep <= 50000:
lvl = int(ep / 10000)
elif ep > 50000 and ep <= 150000:
lvl = round((ep - 50000) / 20000) + 5
elif ep > 150000 and ep <= 300000:
lvl = round((ep - 150000) / 30000) + 10
elif ep > 300000 and ep <= 500000:
lvl = round((ep - 300000) / 40000) + 15
else:
lvl = round((ep - 500000) / 50000) + 20
return round(lvl)
def calcTotals(charval = {}):
'''!
This function calculates total bonusses for all categories and skills of a
character.
It saves rank bonusses and totals in the character's data dictionary
@param charval the character's (whole) data in JSON format
@retval result updated character's data concerning the total bonusses.
'''
for cat in charval['cat']:
progression = charval['cat'][cat]['Progression']
rank = charval['cat'][cat]['rank']
catrankbonus = rankbonus(rank = rank, progression = progression)
charval['cat'][cat]['rank bonus'] = catrankbonus
statbonus = 0
itembonus = charval['cat'][cat]['item bonus']
if 'prof bonus' in charval['cat'][cat]:
profbonus = charval['cat'][cat]['prof bonus']
else:
profbonus = 0
specbonus = charval['cat'][cat]['spec bonus']
if charval['cat'][cat]['Stats'] == [""] or charval['cat'][cat]['Stats'] == '':
pass
elif type(charval['cat'][cat]['Stats']) != type([]):
statbonus += charval[charval['cat'][cat]['Stats']]['total']
else:
for s in charval['cat'][cat]['Stats']:
if s != "SD":
statbonus += charval[s.strip(" ").capitalize()]['total']
else:
statbonus += charval[s]['total']
charval['cat'][cat]['stat bonus'] = statbonus
charval['cat'][cat]['total bonus'] = rankbonus(rank = rank,
profession = profbonus,
special = specbonus,
progression = progression
) + statbonus + itembonus
for skill in charval['cat'][cat]['Skill']:
#DEBUG
print("calc total: {} - {}".format(cat, skill))
if (skill != "Progression" and "Spell" not in cat) or ("Spell" in cat and skill not in ['Stats', 'Progression']):
progression = charval['cat'][cat]['Skill'][skill]['Progression']
if type(progression) == type(2):
progression = [progression]
rank = charval['cat'][cat]['Skill'][skill]['rank']
bonus = rankbonus(rank = rank, progression = progression)
charval['cat'][cat]['Skill'][skill]['rank bonus'] = bonus
total = bonus + charval['cat'][cat]['total bonus'] + charval['cat'][cat]['Skill'][skill]['item bonus'] + charval['cat'][cat]['Skill'][skill]['spec bonus']
charval['cat'][cat]['Skill'][skill]['total bonus'] = total
if skill == "Body Development":
charval['cat'][cat]['Skill'][skill]['total bonus'] += 10
return charval
def RRroll(attacklvl, targetlvl, roll):
'''!
This function checks out whether a RR roll has worked out.
@param attacklvl level of the attack or attacker
@param targetlvl level of the target
@param roll final result of a resistance roll (all modifiers and bonusses ect)
@retval resisted RR was successful True/False
@retval value the value of the RR roll table
'''
resisted = False
value = 0
if attacklvl < 6:
value = 50 + (attacklvl - 1) * 5
elif 5 < attacklvl < 11:
value = 70 + (attacklvl - 5) * 3
elif 10 < attacklvl < 15:
value = 85 + (attacklvl - 10) * 2
elif attacklvl >= 15:
value = 90 + (attacklvl - 15)
if targetlvl < 6:
value -= (targetlvl - 1) * 5
elif 5 < targetlvl < 11:
value -= (20 + (targetlvl - 5) * 3)
elif 10 < targetlvl < 16:
value -= (32 + (targetlvl - 10) * 2)
elif targetlvl > 15:
value -= (39 + (targetlvl - 15))
if value <= roll:
resisted = True
return resisted, value
def statGain(dicelow = 1, dicehigh = 1, temp = 50, pot = 75):
'''!
This function calculates the stat gain roll: a temporary steat could raise ore fall.
@param dicelow result of the lower result of the d10 roll
@param dicehight result of the higher result of the d10 roll
@param temp temp value of stat
@param pot pot value of stat
@retval result new temp value of stat
'''
result = temp
statdif = pot - temp
if dicelow >= dicehigh:
dummy = int(dicehigh)
dicehigh = int(dicelow)
dicelow = int(dummy)
if statdif <= 10:
if dicelow < 6:
result -= dicelow
else:
result += 2 * dicelow
elif 10 < statdif <= 20:
if dicehigh < 6:
result -= dicehigh
else:
result += 2 * dicehigh
else:
if (dicehigh + dicelow) < 6:
result -= dicehigh + dicelow
else:
result += dicehigh + dicelow
if result > pot:
result = pot
return result
class equipmentPenalities(object):
"""
This class calculates MMP and armor penalties.
----
@bug sometimes armor penalties are not correctly calculated
"""
def __init__(self, character = {}):
"""
Constructor equipmentPenalties
@param character JSON structure with all character data
"""
self.character = character
if "inventory" not in character.keys():
exit()
self.weightpenalty = 0
self.AT = 1
self.calcWeightPen()
self.getAT()
self.calcArmorPen()
def calcWeightPen(self):
"""
This calculates the weight penalty
"""
rex = r"^([0-9]{1,6}[\.]{0,1}[0-9]{0,2})( +|)(lbs.|lbs|kg|)$"
checker = re.compile(rex)
chk = checker.match(self.character["background"]["weight"])
if chk:
if chk.group(3) in ["", "kg"]:
BWA = round(float(chk.group(1)) * 2.2 / 10.0, 2)
else:
BWA = round(float(chk.group(1)) / 10.0, 2)
limitfactor = 1
while self.character["carried"] > BWA * limitfactor:
self.weightpenalty -= 8
limitfactor += 1
def getAT(self):
"""
This gets the armor type of equipped armor.
"""
greaves = 0
for armor in self.character["inventory"]["armor"]:
if armor["location"] == "equipped":
if type(armor["AT"]) == type(2):
if armor["AT"] > self.AT:
self.AT = armor["AT"]
if "greaves" in armor["name"].lower():
greaves += 0.5
if greaves > 1:
greaves = 1
self.AT += int(greaves)
def calcArmorPen(self):
"""
This calculates all armor penalties.
"""
## @var self.minmanmod
# minimum maneuver modification
self.minmanmod = 0
## @var self.maxmanmod
# maximum maneuver modification
self.maxmanmod = 0
## @var self.msatpen
# missile attack penalty
self.misatpen = 0
## @var self.armqupen
# armor quickness penalty
self.armqupen = 0
if self.AT == 6:
self.maxmanmod = -20 + self.character["cat"]["Armor - Light"]["Skill"]["Soft Leather"]["total bonus"]
self.misatpen = -5
elif self.AT == 7:
self.minmanmod = -10
self.maxmanmod = -40 + self.character["cat"]["Armor - Light"]["Skill"]["Soft Leather"]["total bonus"]
self.misatpen = -15
self.armqupen = -10
elif self.AT == 8:
self.minmanmod = -15
self.maxmanmod = -50 + self.character["cat"]["Armor - Light"]["Skill"]["Soft Leather"]["total bonus"]
self.misatpen = -15
self.armqupen = -15
elif self.AT == 9:
self.minmanmod = -5
self.maxmanmod = -50 + self.character["cat"]["Armor - Light"]["Skill"]["Rigid Leather"]["total bonus"]
elif self.AT == 10:
self.minmanmod = -10
self.maxmanmod = -70 + self.character["cat"]["Armor - Light"]["Skill"]["Rigid Leather"]["total bonus"]
self.misatpen = -10
self.armqupen = -5
elif self.AT == 11:
self.minmanmod = -15
self.maxmanmod = -90 + self.character["cat"]["Armor - Light"]["Skill"]["Rigid Leather"]["total bonus"]
self.misatpen = -20
self.armqupen = -15
elif self.AT == 12:
self.minmanmod = -15
self.maxmanmod = -110 + self.character["cat"]["Armor - Light"]["Skill"]["Rigid Leather"]["total bonus"]
self.misatpen = -30
self.armqupen = -15
elif self.AT == 13:
self.minmanmod = -10
self.maxmanmod = -70 + self.character["cat"]["Armor - Medium"]["Skill"]["Chain"]["total bonus"]
self.armqupen = -5
elif self.AT == 14:
self.minmanmod = -15
self.maxmanmod = -90 + self.character["cat"]["Armor - Medium"]["Skill"]["Chain"]["total bonus"]
self.misatpen = -10
self.armqupen = -10
elif self.AT == 15:
self.minmanmod = -25
self.maxmanmod = -120 + self.character["cat"]["Armor - Medium"]["Skill"]["Chain"]["total bonus"]
self.misatpen = -20
self.armqupen = -20
elif self.AT == 16:
self.minmanmod = -25
self.maxmanmod = -130 + self.character["cat"]["Armor - Medium"]["Skill"]["Chain"]["total bonus"]
self.misatpen = -20
self.armqupen = -20
elif self.AT == 17:
self.minmanmod = -15
self.maxmanmod = -90 + self.character["cat"]["Armor - Heavy"]["Skill"]["Plate"]["total bonus"]
self.armqupen = -10
elif self.AT == 18:
self.minmanmod = -20
self.maxmanmod = -110 + self.character["cat"]["Armor - Heavy"]["Skill"]["Plate"]["total bonus"]
self.misatpen = -10
self.armqupen = -20
elif self.AT == 19:
self.minmanmod = -35
self.maxmanmod = -150 + self.character["cat"]["Armor - Heavy"]["Skill"]["Plate"]["total bonus"]
self.misatpen = -30
self.armqupen = -30
elif self.AT == 20:
self.minmanmod = -45
self.maxmanmod = -165 + self.character["cat"]["Armor - Heavy"]["Skill"]["Plate"]["total bonus"]
self.misatpen = -40
self.armqupen = -40
if self.maxmanmod > self.minmanmod:
self.maxmanmod = self.minmanmod
class statManeuver(object):
'''
This class handles static maneuver roll results. An object of it operates as single static maneuver table where a
given roll (already modified by severity and other modifiers) is checked and the result returned.
'''
def __init__(self, tablefile = "./data/default/tables/general_smt.csv"):
'''!
Constructor statManeuver
@param tablefile CSV containing the table which shall be used for static maneuver rolls.
'''
self.table = readCSV(tablefile)
def checkRoll(self, roll):
'''!
Checks the rolled number + bonusses for success.
@param roll the modified roll result (number)
@retval result table row as dictionary.
'''
result = {}
for row in range(0, len(self.table)):
lower, upper = self.table[row]['roll'].split(" < ")
if lower == "UM" and roll == int(upper):
result = dict(self.table[row])
break
elif lower == "" and roll <= int(upper):
result = dict(self.table[row])
elif upper == "" and int(lower) <= roll:
result = dict(self.table[row])
elif lower != "UM" and lower != "" and upper != "":
if int(lower) <= roll <= int(upper):
result = dict(self.table[row])
if "roll" in list(result.keys()):
del(result['roll'])
return result
|
olmongol/rpg-tools
|
src/rpgtoolbox/rpgtools.py
|
Python
|
gpl-3.0
| 13,644
|
[
"BWA"
] |
924774abfb1899b405575ef197a8dc46d7edfa99668c0ec8056c61e5c8a79a3f
|
#! /usr/bin/env python3
# Script to clean out a file system such as scratch based on certain criteria
# (e.g. not accessed or modified in x days)
#
# archiving option
# /scratch/delete30/lastname_f/projectx/.archive-me is archived to
# /economy/lastname_f/archive/delete30/projectx-2014-08-21/
#
# fs-cleaner dirkpetersen / Sept 2014 - Oct 2017
#
import sys, os, pwd, argparse, subprocess, re, time, datetime, tempfile
try:
from scandir import walk
except:
#print('importing os.walk instead of scandir.walk')
from os import walk
class KeyboardInterruptError(Exception): pass
def main():
log = logger('fs-cleaner', args.debug)
log.info('starting to check folder %s for files older than %s days...' % (args.folder, args.days))
log.debug('Parsed arguments: %s' % args)
currdir = os.getcwd()
#curruser = os.getenv('USER') does not work in many cron jobs
curruser = pwd.getpwuid(os.getuid()).pw_name
tmpdir = tempfile.gettempdir()
days_back_as_secs = time.time() - (args.days * 24 * 3600)
days_back_warn_secs = days_back_as_secs + args.warndays * 24 * 3600
days_back_warn_secs_minus1 = days_back_as_secs + (args.warndays - 1) * 24 * 3600 # warndays - 1
days_back_datestr = str(datetime.date.today() + datetime.timedelta(args.days * -1)) # e.g. '2014-07-01'
#print('days_back_as_secs: %s' % time.ctime(days_back_as_secs))
#print('days_back_warn_secs: %s' % time.ctime(days_back_warn_secs))
filedict = {} # list of files to delete (grouped by key uid)
warndict = {} # list of files to warn about (grouped by key uid)
archdict = {} # list of files to archive (grouped by key uid)
infodict = {} # contains list per uid: numfiles, sizefiles, numwarnfiles, sizewarnfiles
arch_roots = [] # direcories that contain a flag file '.archive-me'
#print ('\nScanning folder %s for files older than %s days...' % (args.folder, args.days))
if args.folder == '/':
print('root folder not allowed !')
return False
for root, folders, files in mywalk(args.folder):
#print(root)
#for folder in folders:
#print ('...folder:%s' % folder)
# check if the user wanted to archive
if args.delete_folders:
if not folders and not files and root != os.path.normpath(args.folder):
stat=getstat(root)
if stat.st_mtime <= days_back_as_secs:
if not args.debug:
os.rmdir(root)
#print('would delete %s' % root)
continue
if os.path.exists(os.path.join(root, '.archive-me')):
if not root in arch_roots:
arch_roots.append(os.path.join(root, '')) # make sure trailing slash is added
for f in files:
p=os.path.join(root,f)
stat=getstat(p)
if not stat:
continue
if stat.st_atime <= 0:
setfiletime(p, "atime")
if args.debug:
sys.stderr.write('atime reset to current time:\n%s' % p)
continue
if stat.st_mtime <= 0:
setfiletime(p, "mtime")
if args.debug:
sys.stderr.write('atime reset to current time:\n%s' % p)
continue
recent_time = stat.st_atime
if stat.st_mtime > recent_time:
recent_time = stat.st_mtime
if stat.st_ctime > recent_time:
recent_time = stat.st_ctime
if stat.st_uid not in infodict:
infodict[stat.st_uid] = [0, 0, 0, 0]
if recent_time <= days_back_as_secs:
# file reaches threshold defined by args.days
#print('owner:%s file:%s atime:%s timeback:%s' % (stat.st_uid, p, recent_time, days_back_as_secs))
if args.del_adoubles and f.startswith('._'):
if args.debug:
print("DEBUG: would delete AppleDouble file '%s' !" % p)
else:
os.remove(p)
if os.path.exists(p):
sys.stderr.write('file not removed:%s\n' % p)
continue
startpath = getstartpath(arch_roots,root)
if startpath != '':
#file is archived, dict key is source folder minus root.
subpath = startpath[len(os.path.join(args.folder, '')):-1] #subpath without trailing slashes
if subpath not in archdict:
archdict[subpath] = list()
archdict[subpath].append(p)
else:
#file is deleted
if stat.st_uid not in filedict:
filedict[stat.st_uid] = list()
if args.touchnotdel:
#touch a file with current time stamp
setfiletime(p)
args.suppress_emails = True
sys.stderr.write('atime reset:\n%s' % p)
else:
#really delete the file
if not args.debug:
os.remove(p)
if os.path.exists(p):
sys.stderr.write('file not removed:%s\n' % p)
filedict[stat.st_uid].append(p)
infodict[stat.st_uid][0]+=1
infodict[stat.st_uid][1]+=stat.st_size
if args.warndays > 0 and not startswithpath(arch_roots,root):
# no warn if .archive-me path in root
if (recent_time <= days_back_warn_secs and recent_time >= days_back_warn_secs_minus1):
if stat.st_uid not in warndict:
warndict[stat.st_uid] = list()
warndict[stat.st_uid].append(p)
infodict[stat.st_uid][2]+=1
infodict[stat.st_uid][3]+=stat.st_size
#print(len(warndict),len(filedict))
if not os.path.exists(tmpdir+'/'+curruser+'/fs-cleaner'):
os.makedirs(tmpdir+'/'+curruser+'/fs-cleaner')
# ********************** process notifications for warnings ********************************************
for k, v in warndict.items():
user=uid2user(k)
if not os.path.exists(tmpdir+'/'+curruser+'/fs-cleaner/'+user):
os.mkdir(tmpdir+'/'+curruser+'/fs-cleaner/'+user)
file2send=tmpdir+'/'+curruser+'/fs-cleaner/'+user+'/'+user+'-warn-delete.txt'
if list2file(v,file2send):
if not args.debug:
try:
if not args.suppress_emails:
send_mail([user,], "WARNING: In %s days will delete files in %s!" % (args.warndays, args.folder),
"Please see attached list of files!\n\n" \
"The files listed in the attached text file\n" \
"will be deleted in %s days when they will\n" \
"not have been touched for %s days:\n" \
"\n# of files: %s, total space: %s GB\n" \
"You can prevent deletion of these files\n" \
"by using the command 'touch -a filename'\n" \
"on each file. This will reset the access \n" \
"time of the file to the current date.\n" \
"\n" % (args.warndays, args.days, infodict[k][2], "{0:.3f}".format(infodict[k][3]/1073741824)), # TB: 838860 , GB: 1073741824
[file2send,])
print ('\nSent file delete warning to user %s' % user)
log.info('Sent delete warning for %s files (%s GB) to %s with filelist %s' % (infodict[k][2], "{0:.3f}".format(infodict[k][3]/1073741824), user, file2send))
except:
e=sys.exc_info()[0]
sys.stderr.write("Error in send_mail while sending to '%s': %s\n" % (user, e))
log.error("Error in send_mail while sending to '%s': %s" % (user, e))
if args.email:
send_mail([args.email,], "Error - fs-cleaner",
"Please debug email notification to user '%s', Error: %s\n" % (user, e))
else:
sys.stderr.write('no option --email-notify given, cannot send error status via email\n')
else:
fn=len(v)
if fn>10:
fn=10
print("\nDEBUG: ##### WARN ##########################################################")
print("DEBUG: Will delete %s files (%s GB total) owned by '%s'" % (infodict[k][2], "{0:.3f}".format(infodict[k][3]/float(1073741824)), user))
print("DEBUG: would send file '%s' to user '%s' !" % (file2send, user))
print('DEBUG: List of files to delete (maximum 10 listed):')
for i in range(fn):
print(v[i])
else:
print("Could not save file '%s'" % file2send)
# ******************* process deletions with notification ********************************
for k, v in filedict.items():
user=uid2user(k)
if not os.path.exists(tmpdir+'/'+curruser+'/fs-cleaner/'+user):
os.mkdir(tmpdir+'/'+curruser+'/fs-cleaner/'+user)
file2send=tmpdir+'/'+curruser+'/fs-cleaner/'+user+'/'+user+'-deleted-'+days_back_datestr+'.txt'
if list2file(v,file2send):
if not args.debug:
try:
if not args.suppress_emails:
send_mail([user,], "NOTE: Deleted files in %s that were not accessed for %s days" % (args.folder, args.days),
"Please see attached list of files!\n\n" \
"The files listed in the attached text file\n" \
"were deleted because they were not accessed\n" \
"in the last %s days." \
"\n" % args.days, [file2send,])
print ('\nSent file delete notification to user %s' % user)
log.info('Sent delete note to %s with filelist %s' % (user, file2send))
except:
e=sys.exc_info()[0]
sys.stderr.write("Error in send_mail while sending to '%s': %s\n" % (user, e))
log.error("Error in send_mail while sending to '%s': %s" % (user, e))
if args.email:
send_mail([args.email,], "Error - fs-cleaner",
"Please debug email notification to user '%s', Error: %s\n" % (user, e))
else:
sys.stderr.write('no option --email-notify given, cannot send error status via email\n')
else:
fn=len(v)
if fn>10:
fn=10
print("\nDEBUG: ##### DELETE ##########################################################")
print("DEBUG: would have deleted %s files (%s GB total) owned by '%s'" % (infodict[k][0], "{0:.3f}".format(infodict[k][1]/float(1073741824)), user))
print("DEBUG: would have sent file '%s' to user '%s' !" % (file2send, user))
print('DEBUG: List of files that would have been deleted (maximum 10 listed):')
for i in range(fn):
print(v[i])
else:
print("Could not save file '%s'" % file2send)
# ******************* process archiving without notification ********************************
for k, v in archdict.items():
fldr = k
if not os.path.exists(tmpdir+'/'+curruser+'/fs-cleaner/'+fldr):
os.makedirs(tmpdir+'/'+curruser+'/fs-cleaner/'+fldr)
file2send=tmpdir+'/'+curruser+'/fs-cleaner/'+fldr+'/'+'archived-'+days_back_datestr+'.txt'
tenant=''
if args.atenant: # the first level below the source root represents a tenant that should go before
p=fldr.find('/')
if p>=0:
tenant=fldr[:p]
fldr=fldr[p+1:]
else:
tenant=fldr
fldr=''
rsyncsrcroot = os.path.join(args.folder,tenant,fldr,'')
if fldr == '':
rsyncdestroot = os.path.join(args.aroot,tenant,args.aprefix,days_back_datestr)
else:
rsyncdestroot = os.path.join(args.aroot,tenant,args.aprefix,fldr+'-'+days_back_datestr)
if args.debug:
print('**************************************************************')
print("DEBUG: rsyncsrcroot",rsyncsrcroot)
print("DEBUG: rsyncdestroot",rsyncdestroot)
print('**************************************************************')
if pathlist2file(v,file2send,rsyncsrcroot):
bwlimitstr = ''
if args.bwlimit>0:
bwlimitstr = '--bwlimit=%i ' % args.bwlimit
rsync_cmd = '/usr/bin/rsync -av --inplace --remove-source-files --exclude=".archive-me" --exclude=".snapshot" %s--files-from="%s" "%s" "%s"' % (bwlimitstr,file2send,rsyncsrcroot,rsyncdestroot)
if args.debug:
print("DEBUG: would have archived files in '%s' to '%s' !" % (file2send, rsyncdestroot))
print("DEBUG: would have run: '%s' !" % rsync_cmd)
else:
if os.path.exists(args.aroot):
if not os.path.exists(rsyncdestroot):
os.makedirs(rsyncdestroot)
print("executing: '%s' !" % rsync_cmd)
p = subprocess.Popen(rsync_cmd, shell=True).wait()
if p != 0:
print(' **** Warning: Rsync resturned error code %i' % p)
else:
print('Archiving of folder %s complete !' % fldr)
else:
print('folder %s does not exist. Please execute this manually: %s' % (args.aroot,rsync_cmd))
log.info('finished checking folder %s for files older than %s days!' % (args.folder, args.days))
def startswithpath(pathlist, pathstr):
""" checks if at least one of the paths in a list of paths starts with a string """
for path in pathlist:
if (os.path.join(pathstr, '')).startswith(path):
return True
return False
def getstartpath(pathlist, pathstr):
""" return the path from pathlist that is the frist part of pathstr"""
for path in pathlist:
if (os.path.join(pathstr, '')).startswith(path):
return path
return ''
def getstat(path):
""" returns the stat information of a file"""
statinfo=None
try:
statinfo=os.lstat(path)
except (IOError, OSError) as e: # FileNotFoundError only since python 3.3
if args.debug:
sys.stderr.write(str(e))
except:
raise
return statinfo
def setfiletime(path,attr="atime"):
""" sets the a time of a file to the current time """
try:
statinfo=getstat(path)
if attr=="atime":
os.utime(path,(time.time(),statinfo.st_mtime))
if attr=="mtime" or attr=="all":
os.utime(path)
return True
except Exception as err:
sys.stderr.write(str(err))
sys.stderr.write('\n')
return False
def uid2user(uidNumber):
""" attempts to convert uidNumber to username """
import pwd
try:
return pwd.getpwuid(int(uidNumber)).pw_name
except Exception as err:
sys.stderr.write(str(err))
sys.stderr.write('\n')
return str(uidNumber)
def list2file(mylist,path):
""" dumps a list into a text file, one line per item"""
try:
with open(path,'w') as f:
for item in mylist:
f.write("{}\r\n".format(item))
return True
except Exception as err:
sys.stderr.write(str(err))
sys.stderr.write('\n')
return False
def pathlist2file(mylist,path,root):
""" dumps a list into a text file, one line per item, but removes
a root folder from all paths. Used for --files-from feature in rsync"""
try:
with open(path,'w') as f:
for item in mylist:
f.write("{}\r\n".format(item[len(root):]))
return True
except Exception as err:
sys.stderr.write(str(err))
sys.stderr.write('\n')
return False
def mywalk(top, skipdirs=['.snapshot',]):
""" returns subset of os.walk """
for root, dirs, files in walk(top,topdown=True,onerror=walkerr):
for skipdir in skipdirs:
if skipdir in dirs:
dirs.remove(skipdir) # don't visit this directory
yield root, dirs, files
def walkerr(oserr):
sys.stderr.write(str(oserr))
sys.stderr.write('\n')
return 0
def send_mail(to, subject, text, attachments=[], cc=[], bcc=[], smtphost="", fromaddr=""):
if sys.version_info[0] == 2:
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.Utils import COMMASPACE, formatdate
from email import Encoders
else:
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
from email import encoders as Encoders
from string import Template
import socket
import smtplib
if not isinstance(to,list):
print("the 'to' parameter needs to be a list")
return False
if len(to)==0:
print("no 'to' email addresses")
return False
myhost=socket.getfqdn()
if smtphost == '':
smtphost = get_mx_from_email_or_fqdn(myhost)
if not smtphost:
sys.stderr.write('could not determine smtp mail host !\n')
if fromaddr == '':
fromaddr = os.path.basename(__file__) + '-no-reply@' + \
'.'.join(myhost.split(".")[-2:]) #extract domain from host
tc=0
for t in to:
if '@' not in t:
# if no email domain given use domain from local host
to[tc]=t + '@' + '.'.join(myhost.split(".")[-2:])
tc+=1
message = MIMEMultipart()
message['From'] = fromaddr
message['To'] = COMMASPACE.join(to)
message['Date'] = formatdate(localtime=True)
message['Subject'] = subject
message['Cc'] = COMMASPACE.join(cc)
message['Bcc'] = COMMASPACE.join(bcc)
body = Template('This is a notification message from $application, running on \n' + \
'host $host. Please review the following message:\n\n' + \
'$notify_text\n\nIf output is being captured, you may find additional\n' + \
'information in your logs.\n'
)
host_name = socket.gethostname()
full_body = body.substitute(host=host_name.upper(), notify_text=text, application=os.path.basename(__file__))
message.attach(MIMEText(full_body))
for f in attachments:
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(f, 'rb').read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(f))
message.attach(part)
addresses = []
for x in to:
addresses.append(x)
for x in cc:
addresses.append(x)
for x in bcc:
addresses.append(x)
smtp = smtplib.SMTP(smtphost)
smtp.sendmail(fromaddr, addresses, message.as_string())
smtp.close()
return True
def get_mx_from_email_or_fqdn(addr):
"""retrieve the first mail exchanger dns name from an email address."""
# Match the mail exchanger line in nslookup output.
MX = re.compile(r'^.*\s+mail exchanger = (?P<priority>\d+) (?P<host>\S+)\s*$')
# Find mail exchanger of this email address or the current host
if '@' in addr:
domain = addr.rsplit('@', 2)[1]
else:
domain = '.'.join(addr.rsplit('.')[-2:])
p = os.popen('/usr/bin/nslookup -q=mx %s' % domain, 'r')
mxes = list()
for line in p:
m = MX.match(line)
if m is not None:
mxes.append(m.group('host')[:-1]) #[:-1] just strips the ending dot
if len(mxes) == 0:
return ''
else:
return mxes[0]
def logger(name=None, stderr=False):
import logging, logging.handlers
# levels: CRITICAL:50,ERROR:40,WARNING:30,INFO:20,DEBUG:10,NOTSET:0
if not name:
name=__file__.split('/')[-1:][0]
l=logging.getLogger(name)
l.setLevel(logging.INFO)
f=logging.Formatter('%(name)s: %(levelname)s:%(module)s.%(lineno)d: %(message)s')
# logging to syslog
s=logging.handlers.SysLogHandler('/dev/log')
s.formatter = f
l.addHandler(s)
if stderr:
l.setLevel(logging.DEBUG)
# logging to stderr
c=logging.StreamHandler()
c.formatter = f
l.addHandler(c)
return l
def parse_arguments():
"""
Gather command-line arguments.
"""
parser = argparse.ArgumentParser(prog='fs-cleaner',
description='clean out old files on a scratch file system ' + \
'and notify file owners. Optionally archive files to destination ' + \
'archive-root/+archive-prefix1level/+archive-prefix2/project-yyyy-mm-dd')
parser.add_argument( '--debug', '-g', dest='debug', action='store_true',
help='show the actual shell commands that are executed (git, chmod, cd)',
default=False )
parser.add_argument( '--suppress-emails', '-s', dest='suppress_emails', action='store_true',
help='do not send any emails to end users',
default=False )
parser.add_argument( '--delete-folders', '-x', dest='delete_folders', action='store_true',
help='remove empty folders',
default=False )
parser.add_argument( '--email-notify', '-e', dest='email',
action='store',
help='notify this email address of any error ',
default='' )
parser.add_argument( '--warn-days', '-w', dest='warndays',
action='store',
type=int,
help='warn user x days before removal of file (default: 0 days = deactivated) ',
default=0 )
parser.add_argument( '--days', '-d', dest='days',
action='store',
type=int,
help='remove files older than x days (default: 1461 days or 4 years) ',
default=1461 )
parser.add_argument( '--archive-root', '-r', dest='aroot',
action='store',
help='the root folder of the destination archive file system',
default='')
parser.add_argument( '--archive-prefix', '-p', dest='aprefix',
action='store',
help=' fixed string to be added to prefix the target archive project folder with this sub directory',
default='')
parser.add_argument( '--archive-tenant', '-t', dest='atenant', action='store_true',
help='If true treat the first folder level below --folder as group or tenant. In that case the archive ' \
'target root directory will be aroot+tenant+aprefix ',
default=False )
parser.add_argument( '--bwlimit', '-b', dest='bwlimit',
action='store',
type=int,
help='maximum bandwidth limit (KB/s) of all parallel rsync sessions combined',
default=0)
parser.add_argument( '--touch-instead-delete', '-i', dest='touchnotdel', action='store_true',
help='Do not delete a file but touch it so atime will be reset to the current time',
default=False )
parser.add_argument( '--remove-appledoubles', '-a', dest='del_adoubles', action='store_true',
help='immediately remove AppleDoubles at the source.',
default=False )
parser.add_argument( '--folder', '-f', dest='folder',
action='store',
help='search this folder and below for files to remove')
args = parser.parse_args()
if not args.folder:
parser.error('required option --folder not given !')
if args.debug:
print('DEBUG: Arguments/Options: %s' % args)
return args
if __name__ == '__main__':
# Parse command-line arguments
args = parse_arguments()
sys.exit(main())
|
FredHutch/fs-cleaner
|
fs-cleaner.py
|
Python
|
apache-2.0
| 24,639
|
[
"VisIt"
] |
ab361765e6fc9f7d4f19d9d884313d98837e31a7eaf116a9abd14026f12638db
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import sys
sys.path.append('/home/will/PatientPicker/')
import pandas as pd
# <codecell>
import LoadingTools
store = pd.HDFStore('/home/will/HIVReportGen/Data/SplitRedcap/2013-01-16/EntireCohort.hdf')
pat_data = store['redcap']
# <codecell>
drug_cols = ['Current Tobacco use',
'Current Alcohol use',
'Cocaine + metabolite',
'Amphetamines',
'Barbiturates',
'Benzodiazepines',
'Cannabinoid',
'Opiates',
'Phencyclidine',
"Drugs used (choice='Ritalin')"]
gender_cols = ['Gender']
age_cols = ['Age', 'Calc-Years-Seropositive']
haart_cols = ['Current ART status']
race_cols = [col for col in pat_data.columns if col.startswith('Race')]
eth_cols = ['Ethnicity']
wanted_cols = drug_cols+gender_cols+age_cols+haart_cols+race_cols+eth_cols+['Patient ID', 'Patient visit number']
spat_data = pat_data[wanted_cols].sort(['Patient ID', 'Patient visit number'])
# <codecell>
intake_mask = spat_data['Patient visit number'] == 'R00'
intake_data = spat_data[intake_mask].set_index('Patient ID').drop(['Patient visit number'], axis = 1)
pc_pats = ['A0022','A0025','A0029','A0039',
'A0040','A0047','A0056','A0058',
'A0068','A0083','A0091','A0106',
'A0124','A0136','A0142','A0151',
'A0175','A0181','A0191','A0208',
'A0209','A0262','A0284','A0313',
'A0379','A0388','A0427']
pn_pats = ['A0010','A0017','A0032','A0078',
'A0100','A0159','A0195','A0206',
'A0217','A0220','A0223','A0238',
'A0239','A0240','A0242','A0255',
'A0258','A0280','A0294','A0321',
'A0339','A0356','A0363','A0376',
'A0380','A0393','A0397','A0405',
'A0406','A0415','A0440','A0447',
'A0456']
pc_data = intake_data.ix[pc_pats]
pn_data = intake_data.ix[pn_pats]
# <codecell>
from itertools import product
from copy import deepcopy
def calc_gender(indata):
tdict = {
'Male':(indata['Gender']=='Male').sum(),
'Female':(indata['Gender']=='Female').sum(),
}
return tdict
def calc_race(indata):
wcol = "Race (choice='White')"
bcol = "Race (choice='Black or African American')"
ucol = "Race (choice='Unknown')"
ocols = ["Race (choice='Asian')",
"Race (choice='American Indian/Alaska Native')",
"Race (choice='Native Hawaiian or other Pacific Islander')",
"Race (choice='More than one race')"]
tdict = {
'White': indata[wcol].sum(),
'Black/AA': indata[bcol].sum(),
'Unknown': indata[ucol].sum(),
'Other': indata[ocols].any(axis=1).sum()
}
return tdict
def calc_drug_use(indata):
groups = [('Tobacco', indata['Current Tobacco use']=='Yes'),
('Alcohol', indata['Current Alcohol use']=='Yes'),
('Cocaine', indata['Cocaine + metabolite']),
('Cannabinoids', indata['Cannabinoid']),
('Methamphetamines', indata['Amphetamines']),
('Benzodiazepines', indata['Benzodiazepines']),
('Narcotics', indata['Opiates']),
('Ritalin', indata["Drugs used (choice='Ritalin')"])]
tdict = {}
for key, col in groups:
tdict[key] = col.sum()
return tdict
def calc_eth(indata):
cols = ['Not Hispanic or Latino', 'Hispanic or Latino']
tdict = {}
for col in cols:
tdict[col] = (indata['Ethnicity']==col).sum()
tdict['Unknown'] = indata['Ethnicity'].isnull().sum()
return tdict
def calc_haart(indata):
col = 'Current ART status'
tdict = {
'cH': (indata[col] == 'on').sum(),
'dH': ((indata[col] == 'off') | (indata[col] == 'non-adherent')).sum(),
'nH': (indata[col] == 'naive').sum(),
}
return tdict
def calc_age(indata):
return {'':indata['Age'].mean()}
def calc_sero(indata):
return {'':indata['Calc-Years-Seropositive'].mean()}
anal_list = [('Gender', calc_gender),
('Race', calc_race),
('Ethnicity', calc_eth),
('Drug Use', calc_drug_use),
('HAART', calc_haart),
('Age', calc_age),
('Years Seropsitive', calc_sero)]
groups = [('All', intake_data),
('PN', pn_data),
('PC', pc_data)]
out_groups = []
for (gname, group), (varname, func) in product(groups, anal_list):
odict = func(group)
for key, val in odict.items():
out_groups.append({
'group':gname,
'varname':varname,
'itemname':key,
'itemval':val
})
# <codecell>
ndf = pd.DataFrame(out_groups)
table_data = pd.pivot_table(ndf,
rows = ['varname', 'itemname'],
cols = 'group',
values = 'itemval')
print table_data
table_data.to_excel('/home/will/quickBrain/pat_groups.xlsx')
# <codecell>
drug_cols = ['Current Tobacco use',
'Current Alcohol use',
'Cocaine + metabolite',
'Amphetamines',
'Barbiturates',
'Benzodiazepines',
'Cannabinoid',
'Opiates',
'Phencyclidine']
intake_data['Ethnicity'].unique()
# <codecell>
pat_data['Patient ID'].unique()
# <codecell>
|
JudoWill/ResearchNotebooks
|
Untitled2.py
|
Python
|
mit
| 5,582
|
[
"VisIt"
] |
721af7b22edac582d6b85caacc7a3eed683c687b3a3521b37cebc2de96babd3e
|
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class State(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, name=None, code=None, fips_number=None, last_date_for_individual=None, last_date_for_shop=None, live_for_business=None, live_for_consumers=None):
"""
State - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'name': 'str',
'code': 'str',
'fips_number': 'str',
'last_date_for_individual': 'date',
'last_date_for_shop': 'date',
'live_for_business': 'bool',
'live_for_consumers': 'bool'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'code': 'code',
'fips_number': 'fips_number',
'last_date_for_individual': 'last_date_for_individual',
'last_date_for_shop': 'last_date_for_shop',
'live_for_business': 'live_for_business',
'live_for_consumers': 'live_for_consumers'
}
self._id = id
self._name = name
self._code = code
self._fips_number = fips_number
self._last_date_for_individual = last_date_for_individual
self._last_date_for_shop = last_date_for_shop
self._live_for_business = live_for_business
self._live_for_consumers = live_for_consumers
@property
def id(self):
"""
Gets the id of this State.
Primary Key of State
:return: The id of this State.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this State.
Primary Key of State
:param id: The id of this State.
:type: int
"""
self._id = id
@property
def name(self):
"""
Gets the name of this State.
Name of state
:return: The name of this State.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this State.
Name of state
:param name: The name of this State.
:type: str
"""
self._name = name
@property
def code(self):
"""
Gets the code of this State.
2 letter code for state
:return: The code of this State.
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""
Sets the code of this State.
2 letter code for state
:param code: The code of this State.
:type: str
"""
self._code = code
@property
def fips_number(self):
"""
Gets the fips_number of this State.
National FIPs number
:return: The fips_number of this State.
:rtype: str
"""
return self._fips_number
@fips_number.setter
def fips_number(self, fips_number):
"""
Sets the fips_number of this State.
National FIPs number
:param fips_number: The fips_number of this State.
:type: str
"""
self._fips_number = fips_number
@property
def last_date_for_individual(self):
"""
Gets the last_date_for_individual of this State.
Last date this state is live for individuals
:return: The last_date_for_individual of this State.
:rtype: date
"""
return self._last_date_for_individual
@last_date_for_individual.setter
def last_date_for_individual(self, last_date_for_individual):
"""
Sets the last_date_for_individual of this State.
Last date this state is live for individuals
:param last_date_for_individual: The last_date_for_individual of this State.
:type: date
"""
self._last_date_for_individual = last_date_for_individual
@property
def last_date_for_shop(self):
"""
Gets the last_date_for_shop of this State.
Last date this state is live for shop
:return: The last_date_for_shop of this State.
:rtype: date
"""
return self._last_date_for_shop
@last_date_for_shop.setter
def last_date_for_shop(self, last_date_for_shop):
"""
Sets the last_date_for_shop of this State.
Last date this state is live for shop
:param last_date_for_shop: The last_date_for_shop of this State.
:type: date
"""
self._last_date_for_shop = last_date_for_shop
@property
def live_for_business(self):
"""
Gets the live_for_business of this State.
Is this State available for businesses
:return: The live_for_business of this State.
:rtype: bool
"""
return self._live_for_business
@live_for_business.setter
def live_for_business(self, live_for_business):
"""
Sets the live_for_business of this State.
Is this State available for businesses
:param live_for_business: The live_for_business of this State.
:type: bool
"""
self._live_for_business = live_for_business
@property
def live_for_consumers(self):
"""
Gets the live_for_consumers of this State.
Is this State available for individuals
:return: The live_for_consumers of this State.
:rtype: bool
"""
return self._live_for_consumers
@live_for_consumers.setter
def live_for_consumers(self, live_for_consumers):
"""
Sets the live_for_consumers of this State.
Is this State available for individuals
:param live_for_consumers: The live_for_consumers of this State.
:type: bool
"""
self._live_for_consumers = live_for_consumers
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
vericred/vericred-python
|
vericred_client/models/state.py
|
Python
|
apache-2.0
| 17,064
|
[
"VisIt"
] |
cf5b6d20332a229bee10f90d58f6a2c5bc6423e486260df3fd139744d2ad628d
|
import numpy, matplotlib
from CoolProp.HumidAirProp import HAProps
from CoolProp.Plots.Plots import InlineLabel
import numpy as np
import serial
from threading import Thread
import time
from datetime import tzinfo, timedelta, datetime
####################
# Setup for serial #
####################
last_received = ''
mabuffer = ''
# A class building tzinfo objects for fixed-offset time zones.
# Note that FixedOffset(0, "UTC") is a different way to build a
# UTC tzinfo object.
ZERO = timedelta(0)
class FixedOffset(tzinfo):
"""Fixed offset in timedelta (east from UTC)."""
def __init__(self, offset, name):
self.__offset = offset
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
def serialLoop(ser,fil):
global last_received, mabuffer
try:
a = ser.readline()
except serial.SerialException as e:
print e
print ser
if ser.isOpen():
ser.close()
print ser
try:
ser.open()
print ser
except serial.SerialException as e2:
print e2
time.sleep(10)
if len(a) > 0:
last_received = a.strip()
print 'in <',last_received
if (last_received.find('\x07') > -1):
# It requests sync
s="T%.2f"%time.time()
ser.write(s)
print 'out>',s
last_received=last_received[1:]
print >>fil, last_received
fil.flush()
#t = datetime.strptime(last_received,'%Y-%m-%dT%H:%M:%S')
#t = datetime.utcfromtimestamp(float(last_received))
tokens = last_received.split(',\t')
i = 0
timestr=tokens[i]
datime = long(timestr)
i += 1
status = []
relhum = []
temp = []
try:
for j in range(5):
status.append(int(tokens[i])); i += 1
relhum.append(float(tokens[i])); i += 1
temp.append(float(tokens[i])); i += 1
except:
print tokens, i
#raise
return (datime, status, relhum, temp)
else:
print "."
return
######################
# Setup for plotting #
######################
p = 101.325
Tdb = numpy.linspace(-10,40,100)+273.15
#Make the figure and the axes
fig=matplotlib.pyplot.figure(figsize=(10,8))
ax=fig.add_axes((0.1,0.1,0.85,0.85))
fig2=matplotlib.pyplot.figure(figsize=(10,8))
ax2=fig2.add_axes((0.1,0.1,0.85,0.85))
ax2.set_xlim((-1,10))
ax2.set_ylim((0,50))
# Saturation line
w = [HAProps('W','T',T,'P',p,'R',1.0) for T in Tdb]
ax.plot(Tdb-273.15,w,lw=2)
# Humidity lines
RHValues = [0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
for RH in RHValues:
w = [HAProps('W','T',T,'P',p,'R',RH) for T in Tdb]
ax.plot(Tdb-273.15,w,'b--',lw=1)
# Enthalpy lines
for H in [-20, -10, 0, 10, 20, 30, 40, 50, 60, 70, 80, 90]:
#Line goes from saturation to zero humidity ratio for this enthalpy
T1 = HAProps('T','H',H,'P',p,'R',1.0)-273.15
T0 = HAProps('T','H',H,'P',p,'R',0.0)-273.15
w1 = HAProps('W','H',H,'P',p,'R',1.0)
w0 = HAProps('W','H',H,'P',p,'R',0.0)
ax.plot(numpy.r_[T1,T0],numpy.r_[w1,w0],'r--',lw=1)
ax.set_xlim(Tdb[0]-273.15,Tdb[-1]-273.15)
ax.set_ylim(0,0.02)
ax.set_xlabel(r"Dry bulb temperature [$^{\circ}$C]")
ax.set_ylabel(r"Humidity ratio ($m_{water}/m_{dry\ air}$) [-]")
xv = Tdb #[K]
for RH in [0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
yv = [HAProps('W','T',T,'P',p,'R',RH) for T in Tdb]
#y = HAProps('W','P',p,'H',65.000000,'R',RH)
y = HAProps('W','P',p,'H',40,'R',RH)
T_K,w,rot = InlineLabel(xv, yv, y=y, axis = ax)
string = r'$\phi$='+str(RH*100)+'%'
bbox_opts = dict(boxstyle='square,pad=0.0',fc='white',ec='None',alpha = 0.5)
ax.text(T_K-273.15,w,string,rotation = rot,ha ='center',va='center',bbox=bbox_opts)
import matplotlib.animation as animation
import time
t = 0
x = 25
y = 0.005
a,b=[[]]*5,[[]]*5
tt=[-1,0]
xx=[[]]*5
yy=[[]]*5
for j in range(5):
a[j], = ax.plot(x, y,'o')
xx[j] = [25.0,25.0]
yy[j] = [0.005,0.005]
b[j], = ax2.plot(tt, xx[j],'-')
def funco(framenum,ser,fil):
global t,a,b,x,y,tt,xx,yy
print framenum,
t = time.clock()
try:
(datime, status, relhum, temp) = serialLoop(ser,fil)
#x = 25 + 10 * np.sin(t)
#y = HAProps('W','T',x+273.15,'P',p,'R',r1)
#z = HAProps('W','T',x+273.15,'P',p,'R',r2)
tt.append(t)
for j in range(5):
x = temp[j]
y = HAProps('W','T',x+273.15,'P',p,'R',relhum[j]*0.01)
xx[j].append(x)
yy[j].append(y)
a[j].set_data(x,y)
except KeyboardInterrupt:
print('exciting!')
except TypeError:
pass
def funca(framenum):
global a,b,x,y,tt,xx,yy
t = time.clock()
for j in range(5):
# Problem with this is that animation.FuncAnimation
# only acts on one figure, but we have here a second figure.
b[j].set_data(tt,xx[j])
#ax2.relim()
#ax2.autoscale_view()
ax2.set_xlim([0,t])
print "hello still here"
if __name__ == '__main__':
localtz = FixedOffset(timedelta(seconds=-time.timezone),time.tzname[0])
utctz = FixedOffset(timedelta(0),'UTC')
starttime = datetime.now(localtz)
filname = 'serial4_out_%s.csv'%starttime.strftime('%Y%m%dT%H%M%S')
with serial.Serial(port='com4',
baudrate=9600,
timeout=0.1,
writeTimeout=0.1) \
as ser, \
open(filname,'w') \
as fil:
anim = animation.FuncAnimation(fig,funco,
fargs=(ser,fil),interval=400)
anim2 = animation.FuncAnimation(fig2,funca,
interval=100)
matplotlib.pyplot.show()
|
nfette/psychroplotter
|
PsychScript2.py
|
Python
|
gpl-2.0
| 6,222
|
[
"exciting"
] |
af747e8f346c684f5c5956d4b768ad9d2e153982f67e096d46cfa116c7a37542
|
# !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-08-06 18:39:54
# @Last modified by: Brian Cherinka
# @Last Modified time: 2017-08-07 11:31:13
from __future__ import print_function, division, absolute_import
import warnings
class SciServerError(Exception):
pass
class SciServerAPIError(SciServerError):
def __init__(self, message=None):
if not message:
message = 'Error with Http Response from SciServer API'
else:
message = 'Http response error from SciServer API. {0}'.format(message)
super(SciServerAPIError, self).__init__(message)
class SciServerWarning(Warning):
pass
class SciServerDeprecationWarning(DeprecationWarning, SciServerWarning):
"""A warning for deprecated features."""
pass
|
havok2063/SciScript-Python
|
python/sciserver/exceptions.py
|
Python
|
apache-2.0
| 858
|
[
"Brian"
] |
90700c997bdbbdc1e8d3943d76e21f3e1ac825810bfee5db4818bd2b712341af
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2013-2014 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Reports/Text Reports/Family Group Report"""
#------------------------------------------------------------------------
#
# Python Library
#
#------------------------------------------------------------------------
from functools import partial
#------------------------------------------------------------------------
#
# GRAMPS
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.gen.lib import EventRoleType, EventType, NoteType, Person
from gramps.gen.plug.menu import BooleanOption, FamilyOption
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils as ReportUtils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.report import stdoptions
from gramps.gen.plug.docgen import (IndexMark, FontStyle, ParagraphStyle,
TableStyle, TableCellStyle,
FONT_SANS_SERIF, FONT_SERIF,
INDEX_TYPE_TOC, PARA_ALIGN_CENTER)
#------------------------------------------------------------------------
#
# FamilyGroup
#
#------------------------------------------------------------------------
class FamilyGroup(Report):
def __init__(self, database, options, user):
"""
Create the FamilyGroup object that produces the report.
The arguments are:
database - the GRAMPS database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
This report needs the following parameters (class variables)
that come in the options class.
family_handle - Handle of the family to write report on.
includeAttrs - Whether to include attributes
name_format - Preferred format to display names
incl_private - Whether to include private data
"""
Report.__init__(self, database, options, user)
menu = options.menu
stdoptions.run_private_data_option(self, menu)
self.family_handle = None
family_id = menu.get_option_by_name('family_id').get_value()
family = self.database.get_family_from_gramps_id(family_id)
if family:
self.family_handle = family.get_handle()
else:
self.family_handle = None
get_option_by_name = menu.get_option_by_name
get_value = lambda name:get_option_by_name(name).get_value()
self.recursive = get_value('recursive')
self.missingInfo = get_value('missinginfo')
self.generations = get_value('generations')
self.incParEvents = get_value('incParEvents')
self.incParAddr = get_value('incParAddr')
self.incParNotes = get_value('incParNotes')
self.incParNames = get_value('incParNames')
self.incParMar = get_value('incParMar')
self.incRelDates = get_value('incRelDates')
self.incChiMar = get_value('incChiMar')
self.includeAttrs = get_value('incattrs')
rlocale = self.set_locale(get_value('trans'))
self._ = rlocale.translation.sgettext # needed for English
stdoptions.run_name_format_option(self, menu)
def dump_parent_event(self, name,event):
place = ""
date = ""
descr = ""
if event:
date = self._get_date(event.get_date_object())
place_handle = event.get_place_handle()
place = ReportUtils.place_name(self.database,place_handle)
descr = event.get_description()
if self.includeAttrs:
for attr in event.get_attribute_list():
if descr:
# translators: needed for Arabic, ignore otherwise
descr += self._("; ")
attr_type = self._get_type(attr.get_type())
# translators: needed for French, ignore otherwise
descr += self._("%(str1)s: %(str2)s") % {
'str1' : self._(attr_type),
'str2' : attr.get_value() }
self.doc.start_row()
self.doc.start_cell("FGR-TextContents")
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(name)
self.doc.end_paragraph()
self.doc.end_cell()
if descr:
self.doc.start_cell("FGR-TextContentsEnd",2)
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(descr)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
if date or place:
self.doc.start_row()
self.doc.start_cell("FGR-TextContents")
self.doc.start_paragraph('FGR-Normal')
self.doc.end_paragraph()
self.doc.end_cell()
if (date or place) or not descr:
self.doc.start_cell("FGR-TextContents")
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(date)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell("FGR-TextContentsEnd")
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(place)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
def dump_parent_parents(self,person):
family_handle = person.get_main_parents_family_handle()
father_name = ""
mother_name = ""
if family_handle:
family = self.database.get_family_from_handle(family_handle)
father_handle = family.get_father_handle()
if father_handle:
father = self.database.get_person_from_handle(father_handle)
father_name = self._name_display.display(father)
if self.incRelDates:
birth_ref = father.get_birth_ref()
birth = " "
if birth_ref:
event = self.database.get_event_from_handle(birth_ref.ref)
birth = self._get_date(event.get_date_object())
death_ref = father.get_death_ref()
death = " "
if death_ref:
event = self.database.get_event_from_handle(death_ref.ref)
death = self._get_date(event.get_date_object())
if birth_ref or death_ref:
father_name = "%s (%s - %s)" % (father_name,birth,death)
mother_handle = family.get_mother_handle()
if mother_handle:
mother = self.database.get_person_from_handle(mother_handle)
mother_name = self._name_display.display(mother)
if self.incRelDates:
birth_ref = mother.get_birth_ref()
birth = " "
if birth_ref:
event = self.database.get_event_from_handle(birth_ref.ref)
birth = self._get_date(event.get_date_object())
death_ref = mother.get_death_ref()
death = " "
if death_ref:
event = self.database.get_event_from_handle(death_ref.ref)
death = self._get_date(event.get_date_object())
if birth_ref or death_ref:
mother_name = "%s (%s - %s)" % (mother_name,birth,death)
if father_name != "":
self.doc.start_row()
self.doc.start_cell("FGR-TextContents")
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(self._("Father"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell("FGR-TextContentsEnd",2)
self.doc.start_paragraph('FGR-Normal')
mark = ReportUtils.get_person_mark(self.database,father)
self.doc.write_text(father_name,mark)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
elif self.missingInfo:
self.dump_parent_line(self._("Father"), "")
if mother_name != "":
self.doc.start_row()
self.doc.start_cell("FGR-TextContents")
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(self._("Mother"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell("FGR-TextContentsEnd",2)
self.doc.start_paragraph('FGR-Normal')
mark = ReportUtils.get_person_mark(self.database,mother)
self.doc.write_text(mother_name,mark)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
elif self.missingInfo:
self.dump_parent_line(self._("Mother"), "")
def dump_parent_line(self, name, text):
self.doc.start_row()
self.doc.start_cell("FGR-TextContents")
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(name)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell("FGR-TextContentsEnd",2)
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(text)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
def dump_parent_noteline(self, name, note):
self.doc.start_row()
self.doc.start_cell("FGR-TextContents")
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(name)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell("FGR-TextContentsEnd", 2)
self.doc.write_styled_note(note.get_styledtext(),
note.get_format(), 'FGR-Note',
contains_html= (note.get_type() ==
NoteType.HTML_CODE)
)
self.doc.end_cell()
self.doc.end_row()
def dump_parent(self,title,person_handle):
if not person_handle and not self.missingInfo:
return
elif not person_handle:
person = Person()
else:
person = self.database.get_person_from_handle(person_handle)
name = self._name_display.display(person)
self.doc.start_table(title,'FGR-ParentTable')
self.doc.start_row()
self.doc.start_cell('FGR-ParentHead',3)
self.doc.start_paragraph('FGR-ParentName')
self.doc.write_text(title + ': ')
mark = ReportUtils.get_person_mark(self.database,person)
self.doc.write_text(name,mark)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
birth_ref = person.get_birth_ref()
birth = None
evtName = self._("Birth")
if birth_ref:
birth = self.database.get_event_from_handle(birth_ref.ref)
if birth or self.missingInfo:
self.dump_parent_event(evtName,birth)
death_ref = person.get_death_ref()
death = None
evtName = self._("Death")
if death_ref:
death = self.database.get_event_from_handle(death_ref.ref)
if death or self.missingInfo:
self.dump_parent_event(evtName,death)
self.dump_parent_parents(person)
if self.incParEvents:
for event_ref in person.get_primary_event_ref_list():
if event_ref != birth_ref and event_ref != death_ref:
event = self.database.get_event_from_handle(event_ref.ref)
event_type = self._get_type(event.get_type())
self.dump_parent_event(self._(event_type),event)
if self.incParAddr:
addrlist = person.get_address_list()[:]
for addr in addrlist:
location = ReportUtils.get_address_str(addr)
date = self._get_date(addr.get_date_object())
self.doc.start_row()
self.doc.start_cell("FGR-TextContents")
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(self._("Address"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell("FGR-TextContents")
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(date)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell("FGR-TextContentsEnd")
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(location)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
if self.incParNotes:
for notehandle in person.get_note_list():
note = self.database.get_note_from_handle(notehandle)
self.dump_parent_noteline(self._("Note"), note)
if self.includeAttrs:
for attr in person.get_attribute_list():
attr_type = self._get_type(attr.get_type())
self.dump_parent_line(self._(attr_type),attr.get_value())
if self.incParNames:
for alt_name in person.get_alternate_names():
name_type = self._get_type(alt_name.get_type())
name = self._name_display.display_name(alt_name)
self.dump_parent_line(self._(name_type), name)
self.doc.end_table()
def dump_marriage(self,family):
if not family:
return
m = None
family_list = family.get_event_ref_list()
for event_ref in family_list:
if event_ref:
event = self.database.get_event_from_handle(event_ref.ref)
if event.get_type() == EventType.MARRIAGE and \
(event_ref.get_role() == EventRoleType.FAMILY or
event_ref.get_role() == EventRoleType.PRIMARY):
m = event
break
if len(family_list) > 0 or self.missingInfo or self.includeAttrs:
self.doc.start_table("MarriageInfo",'FGR-ParentTable')
self.doc.start_row()
self.doc.start_cell('FGR-ParentHead',3)
self.doc.start_paragraph('FGR-ParentName')
self.doc.write_text(self._("Marriage:"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
self.dump_parent_event(self._("Marriage"),m)
for event_ref in family_list:
if event_ref:
event = self.database.get_event_from_handle(event_ref.ref)
if event.get_type() != EventType.MARRIAGE:
event_type = self._get_type(event.get_type())
self.dump_parent_event(self._(event_type),event)
if self.includeAttrs:
for attr in family.get_attribute_list():
attr_type = self._get_type(attr.get_type())
self.dump_parent_line(self._(attr_type),attr.get_value())
self.doc.end_table()
def dump_child_event(self,text, name,event):
date = ""
place = ""
if event:
date = self._get_date(event.get_date_object())
place_handle = event.get_place_handle()
if place_handle:
place = self.database.get_place_from_handle(place_handle).get_title()
self.doc.start_row()
self.doc.start_cell(text)
self.doc.start_paragraph('FGR-Normal')
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('FGR-TextContents')
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(name)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('FGR-TextContents')
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(date)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('FGR-TextContentsEnd')
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(place)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
def dump_child(self,index,person_handle):
person = self.database.get_person_from_handle(person_handle)
families = len(person.get_family_handle_list())
birth_ref = person.get_birth_ref()
if birth_ref:
birth = self.database.get_event_from_handle(birth_ref.ref)
else:
birth = None
death_ref = person.get_death_ref()
if death_ref:
death = self.database.get_event_from_handle(death_ref.ref)
else:
death = None
spouse_count = 0;
if self.incChiMar:
for family_handle in person.get_family_handle_list():
family = self.database.get_family_from_handle(family_handle)
spouse_id = None
if person_handle == family.get_father_handle():
spouse_id = family.get_mother_handle()
else:
spouse_id = family.get_father_handle()
if spouse_id:
spouse_count += 1
self.doc.start_row()
if spouse_count != 0 or self.missingInfo or death is not None or birth is not None:
self.doc.start_cell('FGR-TextChild1')
else:
self.doc.start_cell('FGR-TextChild2')
self.doc.start_paragraph('FGR-ChildText')
index_str = ("%d" % index)
if person.get_gender() == Person.MALE:
self.doc.write_text(index_str + self._("acronym for male|M"))
elif person.get_gender() == Person.FEMALE:
self.doc.write_text(index_str + self._("acronym for female|F"))
else:
self.doc.write_text(self._("acronym for unknown|%dU") % index)
self.doc.end_paragraph()
self.doc.end_cell()
name = self._name_display.display(person)
mark = ReportUtils.get_person_mark(self.database,person)
self.doc.start_cell('FGR-ChildName',3)
self.doc.start_paragraph('FGR-ChildText')
self.doc.write_text(name,mark)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
if self.missingInfo or birth is not None:
if spouse_count != 0 or self.missingInfo or death is not None:
self.dump_child_event('FGR-TextChild1',self._('Birth'),birth)
else:
self.dump_child_event('FGR-TextChild2',self._('Birth'),birth)
if self.missingInfo or death is not None:
if spouse_count == 0 or not self.incChiMar:
self.dump_child_event('FGR-TextChild2',self._('Death'),death)
else:
self.dump_child_event('FGR-TextChild1',self._('Death'),death)
if self.incChiMar:
index = 0
for family_handle in person.get_family_handle_list():
m = None
index += 1
family = self.database.get_family_from_handle(family_handle)
for event_ref in family.get_event_ref_list():
if event_ref:
event = self.database.get_event_from_handle(event_ref.ref)
if event.type == EventType.MARRIAGE:
m = event
break
spouse_id = None
if person_handle == family.get_father_handle():
spouse_id = family.get_mother_handle()
else:
spouse_id = family.get_father_handle()
if spouse_id:
self.doc.start_row()
if m or index != families:
self.doc.start_cell('FGR-TextChild1')
else:
self.doc.start_cell('FGR-TextChild2')
self.doc.start_paragraph('FGR-Normal')
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('FGR-TextContents')
self.doc.start_paragraph('FGR-Normal')
self.doc.write_text(self._("Spouse"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('FGR-TextContentsEnd',2)
self.doc.start_paragraph('FGR-Normal')
spouse = self.database.get_person_from_handle(spouse_id)
spouse_name = self._name_display.display(spouse)
if self.incRelDates:
birth = " "
birth_ref = spouse.get_birth_ref()
if birth_ref:
event = self.database.get_event_from_handle(birth_ref.ref)
birth = self._get_date(event.get_date_object())
death = " "
death_ref = spouse.get_death_ref()
if death_ref:
event = self.database.get_event_from_handle(death_ref.ref)
death = self._get_date(event.get_date_object())
if birth_ref or death_ref:
spouse_name = "%s (%s - %s)" % (spouse_name,birth,death)
mark = ReportUtils.get_person_mark(self.database,spouse)
self.doc.write_text(spouse_name,mark)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
if m:
evtName = self._("Marriage")
if index == families:
self.dump_child_event('FGR-TextChild2',evtName,m)
else:
self.dump_child_event('FGR-TextChild1',evtName,m)
def dump_family(self,family_handle,generation):
self.doc.start_paragraph('FGR-Title')
if self.recursive and self.generations:
title = self._("Family Group Report - Generation %d") % generation
else:
title = self._("Family Group Report")
mark = IndexMark(title, INDEX_TYPE_TOC,1)
self.doc.write_text( title, mark )
self.doc.end_paragraph()
family = self.database.get_family_from_handle(family_handle)
self.dump_parent(self._("Husband"),family.get_father_handle())
self.doc.start_paragraph("FGR-blank")
self.doc.end_paragraph()
if self.incParMar:
self.dump_marriage(family)
self.doc.start_paragraph("FGR-blank")
self.doc.end_paragraph()
self.dump_parent(self._("Wife"),family.get_mother_handle())
length = len(family.get_child_ref_list())
if length > 0:
self.doc.start_paragraph("FGR-blank")
self.doc.end_paragraph()
self.doc.start_table('FGR-Children','FGR-ChildTable')
self.doc.start_row()
self.doc.start_cell('FGR-ParentHead',4)
self.doc.start_paragraph('FGR-ParentName')
self.doc.write_text(self._("Children"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
index = 1
for child_ref in family.get_child_ref_list():
self.dump_child(index,child_ref.ref)
index += 1
self.doc.end_table()
if self.recursive:
for child_ref in family.get_child_ref_list():
child = self.database.get_person_from_handle(child_ref.ref)
for child_family_handle in child.get_family_handle_list():
if child_family_handle != family_handle:
self.doc.page_break()
self.dump_family(child_family_handle,(generation+1))
def write_report(self):
if self.family_handle:
self.dump_family(self.family_handle,1)
else:
self.doc.start_paragraph('FGR-Title')
self.doc.write_text(self._("Family Group Report"))
self.doc.end_paragraph()
#------------------------------------------------------------------------
#
# MenuReportOptions
#
#------------------------------------------------------------------------
class FamilyGroupOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
##########################
category_name = _("Report Options")
add_option = partial(menu.add_option, category_name)
##########################
family_id = FamilyOption(_("Center Family"))
family_id.set_help(_("The center family for the report"))
add_option("family_id", family_id)
stdoptions.add_name_format_option(menu, category_name)
recursive = BooleanOption(_('Recursive'),False)
recursive.set_help(_("Create reports for all descendants "
"of this family."))
add_option("recursive", recursive)
stdoptions.add_private_data_option(menu, category_name)
stdoptions.add_localization_option(menu, category_name)
##########################
add_option = partial(menu.add_option, _("Include"))
##########################
generations = BooleanOption(_("Generation numbers "
"(recursive only)"),True)
generations.set_help(_("Whether to include the generation on each "
"report (recursive only)."))
add_option("generations", generations)
incParEvents = BooleanOption(_("Parent Events"),False)
incParEvents.set_help(_("Whether to include events for parents."))
add_option("incParEvents", incParEvents)
incParAddr = BooleanOption(_("Parent Addresses"),False)
incParAddr.set_help(_("Whether to include addresses for parents."))
add_option("incParAddr", incParAddr)
incParNotes = BooleanOption(_("Parent Notes"),False)
incParNotes.set_help(_("Whether to include notes for parents."))
add_option("incParNotes", incParNotes)
incattrs = BooleanOption(_("Parent Attributes"),False)
incattrs.set_help(_("Whether to include attributes."))
add_option("incattrs", incattrs)
incParNames = BooleanOption(_("Alternate Parent Names"),False)
incParNames.set_help(_("Whether to include alternate "
"names for parents."))
add_option("incParNames", incParNames)
incParMar = BooleanOption(_("Parent Marriage"),False)
incParMar.set_help(_("Whether to include marriage information "
"for parents."))
add_option("incParMar", incParMar)
incRelDates = BooleanOption(_("Dates of Relatives"),False)
incRelDates.set_help(_("Whether to include dates for relatives "
"(father, mother, spouse)."))
add_option("incRelDates", incRelDates)
incChiMar = BooleanOption(_("Children Marriages"),True)
incChiMar.set_help(_("Whether to include marriage information "
"for children."))
add_option("incChiMar", incChiMar)
##########################
add_option = partial(menu.add_option, _("Missing Information"))
##########################
missinginfo = BooleanOption(_("Print fields for missing "
"information"),True)
missinginfo.set_help(_("Whether to include fields for missing "
"information."))
add_option("missinginfo", missinginfo)
def make_default_style(self,default_style):
"""Make default output style for the Family Group Report."""
para = ParagraphStyle()
#Paragraph Styles
font = FontStyle()
font.set_size(4)
para.set_font(font)
default_style.add_paragraph_style('FGR-blank',para)
font = FontStyle()
font.set_type_face(FONT_SANS_SERIF)
font.set_size(16)
font.set_bold(1)
para = ParagraphStyle()
para.set_font(font)
para.set_alignment(PARA_ALIGN_CENTER)
para.set_header_level(1)
para.set_description(_("The style used for the title of the page."))
default_style.add_paragraph_style('FGR-Title',para)
font = FontStyle()
font.set_type_face(FONT_SERIF)
font.set_size(10)
font.set_bold(0)
para = ParagraphStyle()
para.set_font(font)
para.set_description(_('The basic style used for the text display.'))
default_style.add_paragraph_style('FGR-Normal',para)
para = ParagraphStyle()
font = FontStyle()
font.set_type_face(FONT_SERIF)
font.set_size(10)
font.set_bold(0)
para.set_font(font)
para.set(lmargin=0.0)
para.set_top_margin(0.0)
para.set_bottom_margin(0.0)
para.set_description(_('The basic style used for the note display.'))
default_style.add_paragraph_style("FGR-Note",para)
font = FontStyle()
font.set_type_face(FONT_SANS_SERIF)
font.set_size(10)
font.set_bold(1)
para = ParagraphStyle()
para.set_font(font)
para.set_description(_('The style used for the text related to the children.'))
default_style.add_paragraph_style('FGR-ChildText',para)
font = FontStyle()
font.set_type_face(FONT_SANS_SERIF)
font.set_size(12)
font.set_bold(1)
para = ParagraphStyle()
para.set_font(font)
para.set_header_level(3)
para.set_description(_("The style used for the parent's name"))
default_style.add_paragraph_style('FGR-ParentName',para)
#Table Styles
cell = TableCellStyle()
cell.set_padding(0.2)
cell.set_top_border(1)
cell.set_bottom_border(1)
cell.set_right_border(1)
cell.set_left_border(1)
default_style.add_cell_style('FGR-ParentHead',cell)
cell = TableCellStyle()
cell.set_padding(0.1)
cell.set_bottom_border(1)
cell.set_left_border(1)
default_style.add_cell_style('FGR-TextContents',cell)
cell = TableCellStyle()
cell.set_padding(0.1)
cell.set_bottom_border(0)
cell.set_left_border(1)
cell.set_padding(0.1)
default_style.add_cell_style('FGR-TextChild1',cell)
cell = TableCellStyle()
cell.set_padding(0.1)
cell.set_bottom_border(1)
cell.set_left_border(1)
cell.set_padding(0.1)
default_style.add_cell_style('FGR-TextChild2',cell)
cell = TableCellStyle()
cell.set_padding(0.1)
cell.set_bottom_border(1)
cell.set_right_border(1)
cell.set_left_border(1)
default_style.add_cell_style('FGR-TextContentsEnd',cell)
cell = TableCellStyle()
cell.set_padding(0.2)
cell.set_bottom_border(1)
cell.set_right_border(1)
cell.set_left_border(1)
default_style.add_cell_style('FGR-ChildName',cell)
table = TableStyle()
table.set_width(100)
table.set_columns(3)
table.set_column_width(0,20)
table.set_column_width(1,40)
table.set_column_width(2,40)
default_style.add_table_style('FGR-ParentTable',table)
table = TableStyle()
table.set_width(100)
table.set_columns(4)
table.set_column_width(0,7)
table.set_column_width(1,18)
table.set_column_width(2,35)
table.set_column_width(3,40)
default_style.add_table_style('FGR-ChildTable',table)
|
pmghalvorsen/gramps_branch
|
gramps/plugins/textreport/familygroup.py
|
Python
|
gpl-2.0
| 33,501
|
[
"Brian"
] |
bba2debd7882655c963ea4a153768c142ce7f95841fde1bc2b8d48306e41f0d9
|
try:
import cProfile as myprofiler
except:
import profile as myprofiler
import orca.orca
myprofiler.run('orca.orca.main()', 'orcaprof')
|
pvagner/orca
|
test/harness/runprofiler.py
|
Python
|
lgpl-2.1
| 144
|
[
"ORCA"
] |
df2e9b7770d776fc6cdd86ede4bced7a26d27787fd615e14e90d98e54733041e
|
import telebot
from telebot.types import LabeledPrice, ShippingOption
token = '1234567890:AAAABBBBCCCCDDDDeeeeFFFFgggGHHHH'
provider_token = '1234567890:TEST:AAAABBBBCCCCDDDD' # @BotFather -> Bot Settings -> Payments
bot = telebot.TeleBot(token)
# More about Payments: https://core.telegram.org/bots/payments
prices = [LabeledPrice(label='Working Time Machine', amount=5750), LabeledPrice('Gift wrapping', 500)]
shipping_options = [
ShippingOption(id='instant', title='WorldWide Teleporter').add_price(LabeledPrice('Teleporter', 1000)),
ShippingOption(id='pickup', title='Local pickup').add_price(LabeledPrice('Pickup', 300))]
@bot.message_handler(commands=['start'])
def command_start(message):
bot.send_message(message.chat.id,
"Hello, I'm the demo merchant bot."
" I can sell you a Time Machine."
" Use /buy to order one, /terms for Terms and Conditions")
@bot.message_handler(commands=['terms'])
def command_terms(message):
bot.send_message(message.chat.id,
'Thank you for shopping with our demo bot. We hope you like your new time machine!\n'
'1. If your time machine was not delivered on time, please rethink your concept of time and try again.\n'
'2. If you find that your time machine is not working, kindly contact our future service workshops on Trappist-1e.'
' They will be accessible anywhere between May 2075 and November 4000 C.E.\n'
'3. If you would like a refund, kindly apply for one yesterday and we will have sent it to you immediately.')
@bot.message_handler(commands=['buy'])
def command_pay(message):
bot.send_message(message.chat.id,
"Real cards won't work with me, no money will be debited from your account."
" Use this test card number to pay for your Time Machine: `4242 4242 4242 4242`"
"\n\nThis is your demo invoice:", parse_mode='Markdown')
bot.send_invoice(message.chat.id, title='Working Time Machine',
description='Want to visit your great-great-great-grandparents?'
' Make a fortune at the races?'
' Shake hands with Hammurabi and take a stroll in the Hanging Gardens?'
' Order our Working Time Machine today!',
provider_token=provider_token,
currency='usd',
photo_url='http://erkelzaar.tsudao.com/models/perrotta/TIME_MACHINE.jpg',
photo_height=512, # !=0/None or picture won't be shown
photo_width=512,
photo_size=512,
is_flexible=False, # True If you need to set up Shipping Fee
prices=prices,
start_parameter='time-machine-example',
invoice_payload='HAPPY FRIDAYS COUPON')
@bot.shipping_query_handler(func=lambda query: True)
def shipping(shipping_query):
print(shipping_query)
bot.answer_shipping_query(shipping_query.id, ok=True, shipping_options=shipping_options,
error_message='Oh, seems like our Dog couriers are having a lunch right now. Try again later!')
@bot.pre_checkout_query_handler(func=lambda query: True)
def checkout(pre_checkout_query):
bot.answer_pre_checkout_query(pre_checkout_query.id, ok=True,
error_message="Aliens tried to steal your card's CVV, but we successfully protected your credentials,"
" try to pay again in a few minutes, we need a small rest.")
@bot.message_handler(content_types=['successful_payment'])
def got_payment(message):
bot.send_message(message.chat.id,
'Hoooooray! Thanks for payment! We will proceed your order for `{} {}` as fast as possible! '
'Stay in touch.\n\nUse /buy again to get a Time Machine for your friend!'.format(
message.successful_payment.total_amount / 100, message.successful_payment.currency),
parse_mode='Markdown')
bot.infinity_polling(skip_pending = True)
|
eternnoir/pyTelegramBotAPI
|
examples/payments_example.py
|
Python
|
gpl-2.0
| 4,286
|
[
"VisIt"
] |
39748b63ee617b21b0ab3d82888021afeabbbc58fcb234f54de64fe30050112f
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import subprocess
import time
import sys
import os
import gc
import shutil
import csv
import tempfile
import threading
# try to import the resource module. We check further down if it failed
try:
import resource
except:
pass
from TestHarness.testers.Tester import Tester
def process_timeout(proc, timeout_sec):
kill_proc = lambda p: p.kill()
timer = threading.Timer(timeout_sec, kill_proc, [proc])
try:
timer.start()
proc.wait()
finally:
timer.cancel()
class Test:
def __init__(self, executable, infile, rootdir='.', args=None, perflog=False):
self.rootdir = rootdir
self.executable = executable
self.infile = infile
self.args = args
self.dur_secs = 0
self.perflog = []
self.getpot_options = ['Outputs/console=false', 'Outputs/exodus=false', 'Outputs/csv=false', '--no-gdb-backtrace']
self.have_perflog = perflog
if self.have_perflog:
self.getpot_options.append('UserObjects/perflog/type=PerflogDumper')
def _buildcmd(self):
cmdpath = self.executable
infilepath = os.path.abspath(os.path.join(self.rootdir, self.infile))
cmd = [cmdpath, '-i', infilepath]
if self.args is not None:
cmd.extend(self.args)
cmd.extend(self.getpot_options)
# check for linux cpu isolation
isolpath = '/sys/devices/system/cpu/isolated'
cpuid = None
if os.path.exists(isolpath):
with open(isolpath, 'r') as f:
cpus = f.read().split(',')
if len(cpus[0].strip()) > 0:
cpuid = cpus[0]
if cpuid:
cmd = ['taskset', '-c', cpuid] + cmd
return cmd
def reset(self):
self.perflog = []
self.dur_secs = 0
def run(self, timer=None, timeout=300):
self.reset()
cmd = self._buildcmd()
tmpdir = tempfile.mkdtemp()
shutil.rmtree(tmpdir, ignore_errors=True)
os.makedirs(tmpdir)
rusage = resource.getrusage(resource.RUSAGE_CHILDREN)
start = rusage.ru_utime
gc.disable()
with open(os.devnull, 'w') as devnull:
if timer:
timer.start()
p = subprocess.Popen(cmd, cwd=tmpdir, stdout=devnull, stderr=devnull)
process_timeout(p, timeout)
if timer:
timer.stop()
gc.enable()
rusage = resource.getrusage(resource.RUSAGE_CHILDREN)
end = rusage.ru_utime
if p.returncode != 0:
raise RuntimeError('command {} returned nonzero exit code'.format(cmd))
self.dur_secs = end - start
# write perflog
if self.have_perflog:
with open(os.path.join(tmpdir, 'perflog.csv'), 'r') as csvfile:
reader = csv.reader(csvfile)
skip = True # use to skip header line
for row in reader:
if not skip:
self.perflog.append(row)
else:
skip = False
shutil.rmtree(tmpdir)
class SpeedTest(Tester):
@staticmethod
def validParams():
params = Tester.validParams()
params.addParam('input', 'The input file to use for this test.')
params.addParam('test_name', 'The name of the test - populated automatically')
params.addParam('cumulative_dur', 60, 'cumulative time (secs) to run each benchmark')
params.addParam('min_runs', 40, 'minimum number of runs for each benchmark')
params.addParam('max_runs', 400, 'maximum number of runs for each benchmark')
params.addParam('perflog', False, 'true to enable perflog and store its output')
return params
def __init__(self, name, params):
Tester.__init__(self, name, params)
self.tags.append('speedtests')
self.timeout = max(3600, float(params['max_time']))
self.check_only = False
self.params = params
self.benchmark = None
self.db = os.environ.get('MOOSE_SPEED_DB', 'speedtests.sqlite')
# override
def getMaxTime(self):
return self.timeout
# override
def checkRunnable(self, options):
# check if resource is available
if 'resource' not in sys.modules:
return False
# if user is not explicitly running benchmarks, we only run moose once and just check
# input - to make sure the benchmark isn't broken.
if 'speedtests' not in options.runtags:
self.params['max_runs'] = 1
self.params['cli_args'].insert(0, '--check-input')
self.check_only = True
return True
# override
def run(self, timer, options):
p = self.params
if not self.check_only and options.method not in ['opt', 'oprof', 'dbg']:
raise ValueError('cannot run benchmark with "' + options.method + '" build')
t = Test(p['executable'], p['input'], args=p['cli_args'], rootdir=p['test_dir'], perflog=p['perflog'])
if self.check_only:
t.run(timer, timeout=p['max_time'])
return
name = p['test_name'].split('.')[-1]
self.benchmark = Bench(name, test=t, cum_dur=float(p['cumulative_dur']), min_runs=int(p['min_runs']), max_runs=int(p['max_runs']))
self.benchmark.run(timer, timeout=self.timeout)
with DB(self.db) as db:
db.store(self.benchmark)
# override
def processResults(self, moose_dir, options, output):
self.setStatus(self.success)
return output
class Bench:
def __init__(self, name, realruns=None, test=None, cum_dur=60, min_runs=40, max_runs=400):
self.name = name
self.test = test
self.realruns = []
self.perflogruns = []
if realruns is not None:
self.realruns.extend(realruns)
self._cum_dur = cum_dur
self._min_runs = min_runs
self._max_runs = max_runs
def run(self, timer=None, timeout=3600):
tot = 0.0
start = time.time()
while (len(self.realruns) < self._min_runs or tot < self._cum_dur) and len(self.realruns) < self._max_runs:
dt = time.time() - start
if dt >= timeout:
raise RuntimeError('benchmark timed out after {} with {} runs'.format(dt, len(self.realruns)))
self.test.run(timer, timeout=timeout - dt)
self.realruns.append(self.test.dur_secs)
self.perflogruns.append(self.test.perflog)
tot += self.test.dur_secs
class BenchComp:
def __init__(self, oldbench, newbench, psig=0.01):
self.name = oldbench.name
self.psig = psig
self.old = oldbench.realruns
self.new = newbench.realruns
self.iqr_old = _iqr(self.old)
self.iqr_new = _iqr(self.new)
from scipy.stats import mannwhitneyu
try:
result = mannwhitneyu(self.iqr_old, self.iqr_new, alternative='two-sided')
self.pvalue = result.pvalue
except:
self.pvalue = 1.0
self.u = result[0]
self.avg_old = float(sum(self.iqr_old))/len(self.iqr_old)
self.avg_new = float(sum(self.iqr_new))/len(self.iqr_new)
self.speed_change = (self.avg_new - self.avg_old) / self.avg_old
@classmethod
def header(cls, revold, revnew):
oldstr, newstr = revold, revnew
if len(oldstr) > 12:
oldstr = oldstr[:12]
if len(newstr) > 12:
newstr = newstr[:12]
revstr = ' {} to {} '.format(oldstr, newstr)
revstr = revstr.center(30,'-')
return '' \
+ '--------------------------------{}--------------------------------'.format(revstr) \
+ '\n{:^30s} {:^15s} {:^15s} {:5s}'.format('benchmark', 'old (sec/run)', 'new (sec/run)', 'speedup (pvalue, nsamples)') \
+ '\n----------------------------------------------------------------------------------------------'
@classmethod
def footer(cls):
return '----------------------------------------------------------------------------------------------'
def __str__(self):
name = self.name
if len(name) > 30:
name = name[:27] + '...'
if self.pvalue <= self.psig:
return '{:>30s}: {:^15f} {:^15f} {:+5.1f}% (p={:.4f},n={}+{})'.format(name, self.avg_old, self.avg_new, self.speed_change*100, self.pvalue, len(self.iqr_old), len(self.iqr_new))
else:
return '{:>30s}: {:^15f} {:^15f} ~ (p={:.4f},n={}+{})'.format(name, self.avg_old, self.avg_new, self.pvalue, len(self.iqr_old), len(self.iqr_new))
def _iqr(a, frac=1000):
"""return elements of a within frac*iqr of the the interquartile range (inclusive)"""
import numpy
qup, qlow = numpy.percentile(a, [75 ,25])
iqr = qup - qlow
clean = []
for val in a:
if qlow - frac*iqr <= val and val <= qup + frac*iqr:
clean.append(val)
return clean
class DB:
def __init__(self, fname):
CREATE_BENCH_TABLE = '''CREATE TABLE IF NOT EXISTS benchmarks
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
executable TEXT,
executable_name TEXT,
executable_method TEXT,
input_file TEXT,
timestamp INTEGER,
revision TEXT,
date INTEGER,
load REAL
);'''
CREATE_TIMES_TABLE = '''CREATE TABLE IF NOT EXISTS timings
(
benchmark_id INTEGER,
run INTEGER,
realtime_secs REAL
);'''
CREATE_PERFLOG_TABLE = '''CREATE TABLE IF NOT EXISTS perflog
(
benchmark_id INTEGER,
run INTEGER,
field TEXT,
subfield TEXT,
exec_count INTEGER,
self_time_secs REAL,
cum_time_secs REAL
);'''
self.fname = fname
# python might not have sqlite3 builtin, so do the import here so
# that the TestHarness can always import this file
import sqlite3
self.conn = sqlite3.connect(fname)
c = self.conn.cursor()
c.execute(CREATE_BENCH_TABLE)
c.execute(CREATE_TIMES_TABLE)
c.execute(CREATE_PERFLOG_TABLE)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def revisions(self, method='opt'):
c = self.conn.cursor()
c.execute('SELECT revision,date FROM benchmarks WHERE executable_method=? GROUP BY revision ORDER BY date ASC', (method,))
rows = c.fetchall()
revs = []
times = []
for r in rows:
revs.append(r[0])
times.append(r[1])
return revs, times
def bench_names(self, method='opt'):
c = self.conn.cursor()
c.execute('SELECT DISTINCT name FROM benchmarks WHERE executable_method=?', (method,))
rows = c.fetchall()
names = []
for r in rows:
names.append(r[0])
return names
def list(self, revision, benchmark='', method='opt'):
c = self.conn.cursor()
if benchmark == '':
c.execute('SELECT id,name,executable,input_file FROM benchmarks WHERE INSTR(revision,?) AND executable_method=? ORDER BY date ASC', (revision,method))
else:
c.execute('SELECT id,name,executable,input_file FROM benchmarks WHERE INSTR(revision,?) AND name=? AND executable_method=? ORDER BY date ASC', (revision,benchmark,method))
benchmarks = c.fetchall()
return benchmarks
def load_times(self, bench_id):
c = self.conn.cursor()
c.execute('SELECT realtime_secs FROM timings WHERE benchmark_id=?', (bench_id,))
ents = c.fetchall()
real = []
for ent in ents:
real.append(float(ent[0]))
return real
def load(self, revision, bench_name, method='opt'):
"""loads and returns a Bench object for the given revision and benchmark name"""
entries = self.list(revision, benchmark=bench_name, method=method)
if len(entries) < 1:
raise RuntimeError('load: no benchamrk for revision="{}",bench_name="{}"'.format(revision, bench_name))
b = entries[0]
real = self.load_times(b[0])
return Bench(b[1], test=Test(b[2], b[3]), realruns=real)
def store(self, benchmark, rev=None):
"""stores a (run/executed) Bench in the database. if rev is None, git revision is retrieved from git"""
ex = benchmark.test.executable
(ex_name, ex_method) = os.path.basename(ex).rsplit('-', 1)
infile = benchmark.test.infile
timestamp = time.time()
date = timestamp
if rev is None:
if 'MOOSE_REVISION' in os.environ:
rev = os.environ['MOOSE_REVISION']
else:
rev, date = git_revision()
load = os.getloadavg()[0]
c = self.conn.cursor()
c.execute('INSERT INTO benchmarks (name,executable,executable_name,executable_method,input_file,timestamp,revision,date,load) VALUES (?,?,?,?,?,?,?,?,?)',
(benchmark.name, ex, ex_name, ex_method, infile, timestamp, rev, date, load))
bench_id = c.lastrowid
self.conn.commit()
i = 0
for real, perflog in zip(benchmark.realruns, benchmark.perflogruns):
c.execute('INSERT INTO timings (benchmark_id, run, realtime_secs) VALUES (?,?,?)', (bench_id, i, real))
i += 1
for entry in perflog:
cat, subcat, nruns, selftime, cumtime = entry
c.execute('INSERT INTO perflog (benchmark_id, run, field, subfield, exec_count, self_time_secs, cum_time_secs) VALUES (?,?,?,?,?,?,?)',
(bench_id, i, cat, subcat, nruns, selftime, cumtime))
return bench_id
def close(self):
self.conn.commit()
self.conn.close()
def git_revision(dir='.'):
# return hash and (unix secs since epoch) date
cmd = ['git', 'log', '--date', 'raw', '--pretty=format:%H %ad', '-n', '1']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, cwd=dir)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise RuntimeError('failed to retrieve git revision')
commit = str(stdout).strip().split(' ')[0]
date = int(str(stdout).strip().split(' ')[1])
return commit, date
|
harterj/moose
|
python/TestHarness/testers/bench.py
|
Python
|
lgpl-2.1
| 14,760
|
[
"MOOSE"
] |
6034a24db9b999549a5e90d877175488d978d52524a0da87289e227e292e28cd
|
from flask import jsonify, Blueprint, request, json
from werkzeug.datastructures import MultiDict
from projectp import socketio
from projectp import db
from projectp.auth import requires_auth
from projectp.visits.forms import DateForm
from projectp.visits.models import Visit
from .models import Location
locations = Blueprint('locations', __name__)
@locations.route('/')
def all():
"""Get all locations"""
locations = Location.query.all()
return jsonify(Location.serialize_list(locations))
@locations.route('/<int:location_id>')
def status(location_id):
"""Get a location"""
location = Location.query.get(location_id)
if location:
return jsonify(location.serialize())
return jsonify(message='Location {} not found.'.format(location_id), code=404), 404
@locations.route('/<int:location_id>/visits')
def visits(location_id):
"""Get all visits by location id"""
visits = Visit.query.filter_by(location_id=location_id).all()
print(visits)
if visits:
return jsonify(Visit.serialize_list(visits))
if not len(visits):
return jsonify({})
return jsonify(message='Something went wrong'.format(location_id), code=400), 400
@locations.route('/<int:location_id>/visits/<start>/<end>')
def visits_range(location_id, start, end):
"""Get all visits by location id in a certain period"""
form = DateForm(MultiDict(request.view_args))
if not form.validate():
return jsonify(message='Form errors!', error=form.errors, code=400), 400
visits = Visit.query \
.filter_by(location_id=location_id) \
.filter(Visit.start_time.between(start, end)) \
.order_by(Visit.start_time) \
.all()
return jsonify(visits.serialize_list())
@locations.route('/status', methods=['PUT'])
@requires_auth
def set_status():
"""Set the status of a location"""
occupied = request.form.get('occupied')
if occupied not in ['true', 'false']:
return jsonify(message='Occupied should be either true or false, not {}.'.format(occupied), code=400), 400
h = request.headers.get('Authorization')
location = Location.query \
.join(Location.token) \
.filter_by(hash=h.replace('Bearer ', '')) \
.first()
# convert true and false to True and False
location.occupied = True if occupied == 'true' else False
db.session.commit()
i = location.serialize()
socketio.emit('location', json.dumps(i), broadcast=True)
return jsonify(message='Location {} updated succesfully.'.format(location.id), code=200, location=location.serialize()), 200
|
Proj-P/project-p-api
|
projectp/locations/views.py
|
Python
|
mit
| 2,614
|
[
"VisIt"
] |
573fc7d2530d2c86e572384ab1ca12d04c1d6439f873b3e589957b7587d2d486
|
#!python
# coding=utf-8
from __future__ import division
import os
import json
import math
import shutil
import argparse
import calendar
import tempfile
from glob import glob
from pathlib import Path
from datetime import datetime
from collections import OrderedDict
import pandas as pd
import netCDF4 as nc4
from compliance_checker.runner import ComplianceChecker, CheckSuite
from pocean.cf import cf_safe_name
from pocean.utils import (
dict_update,
get_fill_value,
create_ncvar_from_series,
get_ncdata_from_series
)
from pocean.meta import MetaInterface
from pocean.dsg import (
IncompleteMultidimensionalTrajectory,
ContiguousRaggedTrajectoryProfile
)
from gutils import get_uv_data, get_profile_data, safe_makedirs, setup_cli_logger
from gutils.filters import process_dataset
from gutils.slocum import SlocumReader
import logging
logging.getLogger("urllib3").setLevel(logging.WARNING)
L = logging.getLogger(__name__)
class ProfileIdTypes(object):
"""Types of profile IDs"""
EPOCH = 1 # epochs
COUNT = 2 # "count" from the output directory
FRAME = 3 # "profile" column from the input dataframe
def read_attrs(config_path=None, template=None):
def cfg_file(name):
return os.path.join(
config_path,
name
)
template = template or 'trajectory'
if os.path.isfile(template):
default_attrs_path = template
else:
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
default_attrs_path = os.path.join(template_dir, '{}.json'.format(template))
if not os.path.isfile(default_attrs_path):
L.error("Template path {} not found, using defaults.".format(default_attrs_path))
default_attrs_path = os.path.join(template_dir, 'trajectory.json')
# Load in template defaults
defaults = dict(MetaInterface.from_jsonfile(default_attrs_path))
# Load instruments
ins = {}
if config_path:
ins_attrs_path = cfg_file("instruments.json")
if os.path.isfile(ins_attrs_path):
ins = dict(MetaInterface.from_jsonfile(ins_attrs_path))
# Load deployment attributes (including some global attributes)
deps = {}
if config_path:
deps_attrs_path = cfg_file("deployment.json")
if os.path.isfile(deps_attrs_path):
deps = dict(MetaInterface.from_jsonfile(deps_attrs_path))
# Update, highest precedence updates last
one = dict_update(defaults, ins)
two = dict_update(one, deps)
return two
def set_scalar_value(value, ncvar):
if value is None or math.isnan(value):
ncvar[:] = get_fill_value(ncvar)
else:
ncvar[:] = value
def set_profile_data(ncd, profile_txy, profile_index):
prof_t = ncd.variables['profile_time']
prof_y = ncd.variables['profile_lat']
prof_x = ncd.variables['profile_lon']
prof_id = ncd.variables['profile_id']
t_value = profile_txy.t
if isinstance(t_value, datetime):
t_value = nc4.date2num(
t_value,
units=prof_t.units,
calendar=getattr(prof_t, 'calendar', 'standard')
)
set_scalar_value(t_value, prof_t)
set_scalar_value(profile_txy.y, prof_y)
set_scalar_value(profile_txy.x, prof_x)
set_scalar_value(profile_index, prof_id)
ncd.sync()
def set_uv_data(ncd, uv_txy):
# The uv index should be the second row where v (originally m_water_vx) is not null
uv_t = ncd.variables['time_uv']
uv_x = ncd.variables['lon_uv']
uv_y = ncd.variables['lat_uv']
uv_u = ncd.variables['u']
uv_v = ncd.variables['v']
t_value = uv_txy.t
if isinstance(t_value, datetime):
t_value = nc4.date2num(
t_value,
units=uv_t.units,
calendar=getattr(uv_t, 'calendar', 'standard')
)
set_scalar_value(t_value, uv_t)
set_scalar_value(uv_txy.y, uv_y)
set_scalar_value(uv_txy.x, uv_x)
set_scalar_value(uv_txy.u, uv_u)
set_scalar_value(uv_txy.v, uv_v)
ncd.sync()
def set_extra_data(ncd, extras_df):
"""
extras_df must have a single datetime index, all columns will be variables
dimensioned by that index.
"""
if extras_df.empty:
return
dims = ('extras',)
extras_df = extras_df.reset_index()
extras_df = extras_df.drop(columns=['profile'])
ncd.createDimension(dims[0], len(extras_df))
for c in extras_df.columns:
var_name = cf_safe_name(c)
v = create_ncvar_from_series(
ncd,
var_name,
dims,
extras_df[c],
zlib=True,
complevel=1
)
vvalues = get_ncdata_from_series(extras_df[c], v)
v[:] = vvalues
def get_geographic_attributes(profile):
miny = round(profile.y.min(), 5)
maxy = round(profile.y.max(), 5)
minx = round(profile.x.min(), 5)
maxx = round(profile.x.max(), 5)
polygon_wkt = 'POLYGON ((' \
'{maxy:.6f} {minx:.6f}, ' \
'{maxy:.6f} {maxx:.6f}, ' \
'{miny:.6f} {maxx:.6f}, ' \
'{miny:.6f} {minx:.6f}, ' \
'{maxy:.6f} {minx:.6f}' \
'))'.format(
miny=miny,
maxy=maxy,
minx=minx,
maxx=maxx
)
return {
'attributes': {
'geospatial_lat_min': miny,
'geospatial_lat_max': maxy,
'geospatial_lon_min': minx,
'geospatial_lon_max': maxx,
'geospatial_bounds': polygon_wkt
}
}
def get_vertical_attributes(profile):
return {
'attributes': {
'geospatial_vertical_min': round(profile.z.min(), 6),
'geospatial_vertical_max': round(profile.z.max(), 6),
'geospatial_vertical_units': 'm',
}
}
def get_temporal_attributes(profile):
mint = profile.t.min()
maxt = profile.t.max()
return {
'attributes': {
'time_coverage_start': mint.strftime('%Y-%m-%dT%H:%M:%SZ'),
'time_coverage_end': maxt.strftime('%Y-%m-%dT%H:%M:%SZ'),
'time_coverage_duration': (maxt - mint).isoformat(),
}
}
def get_creation_attributes(profile):
nc_create_ts = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
return {
'attributes': {
'date_created': nc_create_ts,
'date_issued': nc_create_ts,
'date_modified': nc_create_ts,
'history': '{} - {}'.format(
nc_create_ts,
'Created with the GUTILS package: https://github.com/SECOORA/GUTILS'
)
}
}
def create_profile_netcdf(attrs, profile, output_path, mode, profile_id_type=ProfileIdTypes.EPOCH,
extras_df=None):
if extras_df is None:
extras_df = pd.DataFrame()
try:
# Path to hold file while we create it
tmp_handle, tmp_path = tempfile.mkstemp(suffix='.nc', prefix='gutils_glider_netcdf_')
profile_time = profile.t.dropna().iloc[0]
if profile_id_type == ProfileIdTypes.EPOCH:
# We are using the epoch as the profile_index!
profile_index = calendar.timegm(profile_time.utctimetuple())
# Figure out which profile index to use (epoch or integer)
elif profile_id_type == ProfileIdTypes.COUNT:
# Get all existing netCDF outputs and find out the index of this netCDF file. That
# will be the profile_id of this file. This is effectively keeping a tally of netCDF
# files that have been created and only works if NETCDF FILES ARE WRITTEN IN
# ASCENDING ORDER.
# There is a race condition here if files are being in parallel and one should be
# sure that when this function is being run there can be no more files written.
# This file being written is the last profile available.
netcdf_files_same_mode = list(glob(
os.path.join(
output_path,
'*_{}.nc'.format(mode)
)
))
profile_index = len(netcdf_files_same_mode)
elif profile_id_type == ProfileIdTypes.FRAME:
profile_index = profile.profile.iloc[0]
else:
raise ValueError('{} is not a valid profile type'.format(profile_id_type))
# Create final filename
filename = "{0}_{1:010d}_{2:%Y%m%dT%H%M%S}Z_{3}.nc".format(
attrs['glider'],
profile_index,
profile_time,
mode
)
output_file = os.path.join(output_path, filename)
# Add in the trajectory dimension to make pocean happy
traj_name = '{}-{}'.format(
attrs['glider'],
attrs['trajectory_date']
)
profile = profile.assign(trajectory=traj_name)
# We add this back in later
profile.drop('profile', axis=1, inplace=True)
# Compute U/V scalar values
uv_txy = get_uv_data(profile)
if 'u_orig' in profile.columns and 'v_orig' in profile.columns:
profile.drop(['u_orig', 'v_orig'], axis=1, inplace=True)
# Compute profile scalar values
profile_txy = get_profile_data(profile, method=None)
# Calculate some geographic global attributes
attrs = dict_update(attrs, get_geographic_attributes(profile))
# Calculate some vertical global attributes
attrs = dict_update(attrs, get_vertical_attributes(profile))
# Calculate some temporal global attributes
attrs = dict_update(attrs, get_temporal_attributes(profile))
# Set the creation dates and history
attrs = dict_update(attrs, get_creation_attributes(profile))
# Changing column names here from the default 't z x y'
axes = {
't': 'time',
'z': 'depth',
'x': 'lon',
'y': 'lat',
'sample': 'time'
}
profile = profile.rename(columns=axes)
# Use pocean to create NetCDF file
with IncompleteMultidimensionalTrajectory.from_dataframe(
profile,
tmp_path,
axes=axes,
reduce_dims=True,
mode='a') as ncd:
# Set an extras data
set_extra_data(ncd, extras_df)
# We only want to apply metadata from the `attrs` map if the variable is already in
# the netCDF file or it is a scalar variable (no shape defined). This avoids
# creating measured variables that were not measured in this profile.
prof_attrs = attrs.copy()
vars_to_update = OrderedDict()
for vname, vobj in prof_attrs['variables'].items():
if vname in ncd.variables or ('shape' not in vobj and 'type' in vobj):
if 'shape' in vobj:
# Assign coordinates
vobj['attributes']['coordinates'] = '{} {} {} {}'.format(
axes.get('t'),
axes.get('z'),
axes.get('x'),
axes.get('y'),
)
vars_to_update[vname] = vobj
else:
# L.debug("Skipping missing variable: {}".format(vname))
pass
prof_attrs['variables'] = vars_to_update
ncd.apply_meta(prof_attrs)
# Set trajectory value
ncd.id = traj_name
ncd.variables['trajectory'][0] = traj_name
# Set profile_* data
set_profile_data(ncd, profile_txy, profile_index)
# Set *_uv data
set_uv_data(ncd, uv_txy)
# Move to final destination
safe_makedirs(os.path.dirname(output_file))
os.chmod(tmp_path, 0o664)
shutil.move(tmp_path, output_file)
L.info('Created: {}'.format(output_file))
return output_file
except BaseException:
raise
finally:
os.close(tmp_handle)
if os.path.exists(tmp_path):
os.remove(tmp_path)
def change_datatype(data, c, attrs):
if c in attrs.get('variables', {}) and attrs['variables'][c].get('type'):
try:
ztype = attrs['variables'][c]['type']
return data[c].astype(ztype)
except ValueError:
try:
if '_FillValue' in attrs['variables'][c]:
if 'data' in attrs['variables'][c]['_FillValue']:
return data[c].fillna(attrs['variables'][c]['_FillValue']['data']).astype(ztype)
else:
return data[c].fillna(attrs['variables'][c]['_FillValue']).astype(ztype)
except ValueError:
L.error("Could not covert {} to {}. Skipping {}.".format(c, ztype, c))
return None
def create_netcdf(attrs, data, output_path, mode, profile_id_type=ProfileIdTypes.EPOCH,
subset=True, extras_df=None):
if extras_df is None:
extras_df = pd.DataFrame()
# Create NetCDF Files for Each Profile
written_files = []
for df in [data, extras_df]:
# Optionally, remove any variables from the dataframe that do not have metadata assigned
if subset is True:
all_columns = set(df.columns)
reserved_columns = [
'trajectory',
'profile',
't',
'x',
'y',
'z',
'u_orig',
'v_orig'
]
removable_columns = all_columns - set(reserved_columns)
orphans = removable_columns - set(attrs.get('variables', {}).keys())
L.debug(
"Excluded from output (absent from JSON config):\n * {}".format('\n * '.join(orphans))
)
df.drop(orphans, axis=1, inplace=True)
# Change to the datatype defined in the JSON. This is so
# all netCDF files have the same dtypes for the variables in the end
for c in df.columns:
changed = change_datatype(df, c, attrs)
if changed is not None:
df[c] = changed
written = []
for pi, profile in data.groupby('profile'):
profile_extras = pd.DataFrame()
if not extras_df.empty:
profile_extras = extras_df.loc[extras_df.profile == pi]
try:
cr = create_profile_netcdf(attrs, profile, output_path, mode, profile_id_type,
extras_df=profile_extras)
written.append(cr)
except BaseException:
L.exception('Error creating netCDF for profile {}. Skipping.'.format(pi))
continue
return written_files
def create_arg_parser():
parser = argparse.ArgumentParser(
description='Parses a single combined ASCII file into a set of '
'NetCDFs file according to JSON configurations '
'for institution, deployment, glider, and datatypes.'
)
parser.add_argument(
'file',
help="Combined ASCII file to process into NetCDF"
)
parser.add_argument(
'deployments_path',
help='Path to folder containing all deployment config and for file output.'
)
parser.add_argument(
"-r",
"--reader_class",
help="Glider reader to interpret the data",
default='slocum'
)
parser.add_argument(
'-ts', '--tsint',
help="Interpolation window to consider when assigning profiles",
default=None
)
parser.add_argument(
'-fp', '--filter_points',
help="Filter out profiles that do not have at least this number of points",
default=None
)
parser.add_argument(
'-fd', '--filter_distance',
help="Filter out profiles that do not span at least this vertical distance (meters)",
default=None
)
parser.add_argument(
'-ft', '--filter_time',
help="Filter out profiles that last less than this numer of seconds",
default=None
)
parser.add_argument(
'-fz', '--filter_z',
help="Filter out profiles that are not completely below this depth (meters)",
default=None
)
parser.add_argument(
"-za",
"--z_axis_method",
help="1 == Calculate depth from pressure, 2 == Use raw depth values",
default=1,
type=int
)
parser.add_argument(
'--no-subset',
dest='subset',
action='store_false',
help='Process all variables - not just those available in a datatype mapping JSON file'
)
parser.add_argument(
"-t",
"--template",
help="The template to use when writing netCDF files. Options: None, [filepath], trajectory, ioos_ngdac",
default='trajectory'
)
parser.set_defaults(subset=True)
return parser
def create_dataset(file,
reader_class,
deployments_path,
subset,
template,
profile_id_type,
prefer_file_filters=False,
**filter_args
):
# Remove None filters from the arguments
filter_args = { k: v for k, v in filter_args.items() if v is not None }
# Figure out the netCDF output path based on the file and the deployments_path
dep_path = Path(deployments_path)
file_path = Path(file)
individual_dep_path = None
for pp in file_path.parents:
if dep_path == pp:
break
individual_dep_path = pp
config_path = individual_dep_path / 'config'
# Extract the filters from the config and override with passed in filters that are not None
attrs = read_attrs(config_path, template=template)
file_filters = attrs.pop('filters', {})
# By default the filters passed in as filter_args will overwrite the filters defined in the
# config file. If the opposite should happen (typically on a watch that uses a global set
# of command line filters), you can set prefer_file_filters=True to have the file filters
# take precedence over the passed in filters.
if prefer_file_filters is False:
filters = dict_update(file_filters, filter_args)
else:
filters = dict_update(filter_args, file_filters)
processed_df, extras_df, mode = process_dataset(file, reader_class, **filters)
if processed_df is None:
return 1
output_path = individual_dep_path / mode / 'netcdf'
return create_netcdf(attrs, processed_df, output_path, mode, profile_id_type,
subset=subset, extras_df=extras_df)
def main_create():
setup_cli_logger(logging.INFO)
parser = create_arg_parser()
args = parser.parse_args()
filter_args = vars(args)
# Remove non-filter args into positional arguments
file = filter_args.pop('file')
deployments_path = filter_args.pop('deployments_path')
subset = filter_args.pop('subset')
template = filter_args.pop('template')
# Move reader_class to a class
reader_class = filter_args.pop('reader_class')
if reader_class == 'slocum':
reader_class = SlocumReader
return create_dataset(
file=file,
reader_class=reader_class,
deployments_path=deployments_path,
subset=subset,
template=template,
**filter_args
)
# CHECKER
def check_dataset(args):
check_suite = CheckSuite()
check_suite.load_all_available_checkers()
outhandle, outfile = tempfile.mkstemp()
def show_messages(jn, log):
out_messages = []
for k, v in jn.items():
if isinstance(v, list):
for x in v:
if 'msgs' in x and x['msgs']:
out_messages += x['msgs']
log(
'{}:\n{}'.format(args.file, '\n'.join([' * {}'.format(
m) for m in out_messages ])
)
)
try:
return_value, errors = ComplianceChecker.run_checker(
ds_loc=args.file,
checker_names=['gliderdac:3.0'],
verbose=2,
criteria='lenient',
skip_checks=[
# This takes forever and hurts my CPU. Skip it.
'check_standard_names:A',
],
output_format='json',
output_filename=outfile
)
except BaseException as e:
L.warning('{} - {}'.format(args.file, e))
return 1
else:
if errors is False:
return_value = 0
log = L.debug
else:
return_value = 1
log = L.warning
with open(outfile, 'rt') as f:
show_messages(json.loads(f.read())['gliderdac:3.0'], log)
return return_value
finally:
os.close(outhandle)
if os.path.isfile(outfile):
os.remove(outfile)
def check_arg_parser():
parser = argparse.ArgumentParser(
description='Verifies that a glider NetCDF file from a provider '
'contains all the required global attributes, dimensions,'
'scalar variables and dimensioned variables.'
)
parser.add_argument(
'file',
help='Path to Glider NetCDF file.'
)
return parser
def main_check():
setup_cli_logger(logging.INFO)
parser = check_arg_parser()
args = parser.parse_args()
# Check filenames
if args.file is None:
raise ValueError('Must specify path to NetCDF file')
return check_dataset(args)
def merge_profile_netcdf_files(folder, output):
import pandas as pd
from glob import glob
new_fp, new_path = tempfile.mkstemp(suffix='.nc', prefix='gutils_merge_')
try:
# Get the number of profiles
members = sorted(list(glob(os.path.join(folder, '*.nc'))))
# Iterate over the netCDF files and create a dataframe for each
dfs = []
axes = {
'trajectory': 'trajectory',
't': 'time',
'x': 'lon',
'y': 'lat',
'z': 'depth',
}
for ncf in members:
with IncompleteMultidimensionalTrajectory(ncf) as old:
df = old.to_dataframe(axes=axes, clean_cols=False)
dfs.append(df)
full_df = pd.concat(dfs, ignore_index=True, sort=False)
full_df = full_df.sort_values(['trajectory', 'profile_id', 'profile_time', 'depth'])
# Now add a profile axes
axes = {
'trajectory': 'trajectory',
'profile': 'profile_id',
't': 'profile_time',
'x': 'profile_lon',
'y': 'profile_lat',
'z': 'depth',
}
newds = ContiguousRaggedTrajectoryProfile.from_dataframe(
full_df,
output=new_path,
axes=axes,
mode='a'
)
# Apply default metadata
attrs = read_attrs(template='ioos_ngdac')
newds.apply_meta(attrs, create_vars=False, create_dims=False)
newds.close()
safe_makedirs(os.path.dirname(output))
shutil.move(new_path, output)
finally:
os.close(new_fp)
if os.path.exists(new_path):
os.remove(new_path)
def process_folder(deployment_path, mode, merger_class, reader_class, subset=True, template='trajectory', profile_id_type=ProfileIdTypes.EPOCH, workers=4, **filters):
from multiprocessing import Pool
binary_path = os.path.join(deployment_path, mode, 'binary')
ascii_path = os.path.join(deployment_path, mode, 'ascii')
# Make ASCII files
merger = merger_class(
binary_path,
ascii_path
)
# The merge results contain a reference to the new produced ASCII file as well as what binary files went into it.
merger.convert()
asciis = sorted(
[ x.path for x in os.scandir(ascii_path) if Path(x).suffix in ['.dat']]
)
with Pool(processes=workers) as pool:
kwargs = dict(
reader_class=SlocumReader,
deployments_path=Path(str(deployment_path)).parent,
subset=subset,
template=template,
profile_id_type=profile_id_type,
**filters
)
multiple_results = [
pool.apply_async(
create_dataset, (), dict(file=x, **kwargs)
) for x in asciis
]
print([ res.get() for res in multiple_results ])
|
SECOORA/GUTILS
|
gutils/nc.py
|
Python
|
mit
| 24,414
|
[
"NetCDF"
] |
2aa27aec19d65e876c5c1eb58f972b392fc7adf59af9218b9ef6f4461f06f6a1
|
#!/usr/bin/python
"""
Copyright 2012 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import cgi
import MySQLdb
import dbShared
import ghShared
#
form = cgi.FieldStorage()
resGroup = form.getfirst('resGroup', '')
outType = form.getfirst('outType', '')
# escape input to prevent sql injection
resGroup = dbShared.dbInsertSafe(resGroup)
outType = dbShared.dbInsertSafe(outType)
print 'Content-type: text/html\n'
if outType == 'links':
print '<ul class="plain">'
else:
print '<option value="none" title="p00000000000">None</option>'
if len(resGroup) > 0:
criteriaStr = ' AND (resourceGroup = "' + resGroup + '" OR resourceCategory = "' + resGroup + '")'
else:
criteriaStr = ''
conn = dbShared.ghConn()
cursor = conn.cursor()
if (cursor):
cursor.execute('SELECT resourceType, resourceTypeName, CONCAT("p", CASE WHEN CRmax>0 THEN "1" ELSE "0" END, CASE WHEN CDmax>0 THEN "1" ELSE "0" END, CASE WHEN DRmax>0 THEN "1" ELSE "0" END, CASE WHEN FLmax>0 THEN "1" ELSE "0" END, CASE WHEN HRmax>0 THEN "1" ELSE "0" END, CASE WHEN MAmax>0 THEN "1" ELSE "0" END, CASE WHEN PEmax>0 THEN "1" ELSE "0" END, CASE WHEN OQmax>0 THEN "1" ELSE "0" END, CASE WHEN SRmax>0 THEN "1" ELSE "0" END, CASE WHEN UTmax>0 THEN "1" ELSE "0" END, CASE WHEN ERmax>0 THEN "1" ELSE "0" END) AS statMask FROM tResourceType WHERE enterable>0' + criteriaStr + ' ORDER BY resourceTypeName;')
row = cursor.fetchone()
if row == None and len(resGroup) > 0:
cursor.execute('select rgc.resourceGroup, rg.groupName, "p11111111111" AS statMask FROM tResourceGroupCategory rgc INNER JOIN tResourceGroup rg ON rgc.resourceGroup = rg.resourceGroup WHERE rgc.resourceCategory="' + resGroup + '";')
row = cursor.fetchone()
while (row != None):
if outType == 'links':
print '<li><a href="' + ghShared.BASE_SCRIPT_URL + 'resourceType.py/' + row[0] + '">' + row[1] + '</a></li>'
else:
print '<option value="'+str(row[0])+'" title="'+row[2]+'">'+row[1]+'</option>'
row = cursor.fetchone()
if outType == 'links':
print '</ul>'
|
druss316/G-Harvestor
|
html/getResourceTypeList.py
|
Python
|
gpl-3.0
| 2,692
|
[
"Galaxy"
] |
fb276a75328cb283ac5e21727c8276d2fe467dd362f1b34905dd5daf61335b45
|
"""Module to work with network in SIF format.
http://wiki.cytoscape.org/Cytoscape_User_Manual/Network_Formats#SIF_Format
"""
class SIFNode(object):
def __init__(self, id):
self.id = id
self.edges = []
self.reverse= []
class SIFEdge(object):
def __init__(self, type, target):
self.type = type
self.target = target
class SIFNetwork(object):
def __init__(self):
self.nodes = {}
def parse(self, fp):
if isinstance(fp, basestring):
fp = open(fp, 'r')
for l in fp:
tmp = l.split()
source = tmp[0]
edge_type = tmp[1]
if source not in self.nodes:
source_node = SIFNode(source)
self.nodes[source] = source_node
else:
source_node = self.nodes[source]
for target in tmp[2:]:
if target not in self.nodes:
target_node = SIFNode(target)
self.nodes[target] = target_node
else:
target_node = self.nodes[target]
edge = SIFEdge(edge_type, target_node)
source_node.edges.append(edge)
r_edge = SIFEdge(edge_type, source_node)
target_node.reverse.append(r_edge)
fp.close()
def subnetwork(self, center_id, degree=1):
"""Find the subnetwork with up to a given degree neighbors,
regardless of direction.
"""
sub = self._subnetwork(center_id, degree)
for n in sub.nodes.itervalues():
for edge in n.edges:
complement_flag = False
for r_edge in edge.target.reverse:
if n.id == r_edge.target.id and edge.type == r_edge.type:
complement_flag = True
break
if not complement_flag:
edge.target.reverse.append(SIFEdge(edge.type, n))
for edge in n.reverse:
complement_flag = False
for r_edge in edge.target.edges:
if n.id == r_edge.target.id and edge.type == r_edge.type:
complement_flag = True
break
if not complement_flag:
edge.target.edges.append(SIFEdge(edge.type, n))
return sub
def _subnetwork(self, id, degree=1, sub=None, visited=None):
if degree == 0:
return None
if visited is None:
visited = set()
if id in visited:
return None
if sub is None:
sub = SIFNetwork()
if id not in sub.nodes:
center = SIFNode(id)
sub.nodes[id] = center
else:
center = sub.nodes[id]
visited.add(id)
for edge in self.nodes[id].edges:
target_node = edge.target
if target_node.id not in sub.nodes:
new_target = SIFNode(target_node.id)
sub.nodes[target_node.id] = new_target
else:
new_target = sub.nodes[target_node.id]
center.edges.append(SIFEdge(edge.type, new_target))
self._subnetwork(target_node.id, degree-1, sub, visited)
for edge in self.nodes[id].reverse:
target_node = edge.target
if target_node.id not in sub.nodes:
new_target = SIFNode(target_node.id)
sub.nodes[target_node.id] = new_target
else:
new_target = sub.nodes[target_node.id]
center.reverse.append(SIFEdge(edge.type, new_target))
self._subnetwork(target_node.id, degree-1, sub, visited)
return sub
def split_components(self):
"""Return a list of connected components.
For large network, need to raise recursion depth limit.
Or rewrite in iterative way.
"""
components = []
visited = set()
degree = len(self.nodes.keys())
for nid in self.nodes.iterkeys():
if nid in visited:
continue
comp = self.subnetwork(nid, degree)
visited = visited.union(comp.nodes.iterkeys())
components.append(comp)
return components
def write(self, filename):
with open(filename, 'w') as fp:
for node in self.nodes.itervalues():
for edge in node.edges:
fp.write('%s\t%s\t%s\n' %
(node.id, edge.type, edge.target.id))
def to_json(self):
"""For use with Cytoscape.js"""
import json
js = []
for node in self.nodes.itervalues():
js.append({'group': 'nodes',
'data': {'id': node.id}
})
for edge in node.edges:
js.append({'group': 'edges',
'data': {
'source': node.id,
'target': edge.target.id,
'type': edge.type
}
})
return json.dumps(js)
# vim: ts=4 expandtab sw=4 sts=4 tw=78
|
tryptochan/domain_utils
|
SIF.py
|
Python
|
mit
| 5,228
|
[
"Cytoscape"
] |
5841858ba4e46b040697b79c6e42b0dd3ff39d186b03dff574b0fc2a098eb86a
|
"""
Step and impulse responses
==========================
These examples compare the analytical solution with `empymod` for time-domain
step and impulse responses for inline, x-directed source and receivers, for the
four different frequency-to-time methods **QWE**, **DLF**, **FFTLog**, and
**FFT**. Which method is faster and which is more precise depends on the model
(land or marine, source/receiver at air-interface or not) and the response
(step or impulse).
"""
import empymod
import numpy as np
from scipy.special import erf
import matplotlib.pyplot as plt
from scipy.constants import mu_0 # Permeability of free space [H/m]
plt.style.use('ggplot')
colors = [color['color'] for color in list(plt.rcParams['axes.prop_cycle'])]
###############################################################################
# Analytical solutions
# --------------------
#
# Analytical solution for source and receiver at the interface between two
# half-spaces
#
# The time-domain step and impulse responses for a source at the origin
# (:math:`x_s = y_s = z_s = 0` m) and an in-line receiver at the surface
# (:math:`y_r = z_r = 0` m), is given by the following equations, where
# :math:`\rho_h` is horizontal resistivity (:math:`\Omega` m),
# :math:`\lambda` is anisotropy (-), with :math:`\lambda =
# \sqrt{\rho_v/\rho_h}`, :math:`r` is offset (m), :math:`t` is time (s), and
# :math:`\tau_h = \sqrt{\mu_0 r^2/(\rho_h t)}`; :math:`\mu_0` is the magnetic
# permeability of free space (H/m).
#
# Time Domain: Step Response :math:`\mathbf{\mathcal{H}(t)}`
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# .. math::
#
# E_x(\rho_h,\lambda,r,t) = \frac{\rho_h}{2 \pi r^3} \left[ 2\lambda +
# \rm{erf}\left(\frac{\tau_h}{2}\right) - 2\lambda
# \rm{erf}\left(\frac{\tau_h}{2\lambda}\right) + \frac{\tau_h}{\sqrt{\pi}}
# \exp\left(- \frac{\tau_h^2}{4\lambda^2}\right) \right]
#
# Time Domain: Impulse Response :math:`\mathbf{\delta(t)}`
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# .. math::
#
# \dot{E}_x(\rho_h,\lambda,r,t) =
# \frac{\rho_h}{2 \pi r^3} \left[ \delta(t) + \frac{\tau_h}{2t\sqrt{\pi}}
# \left\{ - \exp\left(-\frac{\tau_h^2}{4}\right) +
# \left( \frac{\tau_h^2}{2 \lambda^2} + 1 \right) \exp\left(-
# \frac{\tau_h^2}{4 \lambda^2}\right) \right\} \right]
#
# Reference
# ~~~~~~~~~
# Equations 3.2 and 3.3 in Werthmüller, D., 2009, Inversion of multi-transient
# EM data from anisotropic media: M.S. thesis, TU Delft, ETH Zürich, RWTH
# Aachen; UUID: `f4b071c1-8e55-4ec5-86c6-a2d54c3eda5a
# <http://repository.tudelft.nl/islandora/object/uuid:f4b071c1-8e55-4ec5-86c6-a2d54c3eda5a>`_.
#
# Analytical functions
# ~~~~~~~~~~~~~~~~~~~~
def ee_xx_impulse(res, aniso, off, time):
"""VTI-Halfspace impulse response, xx, inline.
res : horizontal resistivity [Ohm.m]
aniso : anisotropy [-]
off : offset [m]
time : time(s) [s]
"""
tau_h = np.sqrt(mu_0*off**2/(res*time))
t0 = tau_h/(2*time*np.sqrt(np.pi))
t1 = np.exp(-tau_h**2/4)
t2 = tau_h**2/(2*aniso**2) + 1
t3 = np.exp(-tau_h**2/(4*aniso**2))
Exx = res/(2*np.pi*off**3)*t0*(-t1 + t2*t3)
Exx[time == 0] = res/(2*np.pi*off**3) # Delta dirac part
return Exx
def ee_xx_step(res, aniso, off, time):
"""VTI-Halfspace step response, xx, inline.
res : horizontal resistivity [Ohm.m]
aniso : anisotropy [-]
off : offset [m]
time : time(s) [s]
"""
tau_h = np.sqrt(mu_0*off**2/(res*time))
t0 = erf(tau_h/2)
t1 = 2*aniso*erf(tau_h/(2*aniso))
t2 = tau_h/np.sqrt(np.pi)*np.exp(-tau_h**2/(4*aniso**2))
Exx = res/(2*np.pi*off**3)*(2*aniso + t0 - t1 + t2)
return Exx
###############################################################################
# Example 1: Source and receiver at z=0m
# --------------------------------------
#
# Comparison with analytical solution; put 1 mm below the interface, as they
# would be regarded as in the air by `emmod` otherwise.
src = [0, 0, 0.001] # Source at origin, slightly below interface
rec = [6000, 0, 0.001] # Receivers in-line, 0.5m below interface
res = [2e14, 10] # Resistivity: [air, half-space]
aniso = [1, 2] # Anisotropy: [air, half-space]
eperm = [0, 1] # Set el. perm. of air to 0 because of num. noise
t = np.logspace(-2, 1, 301) # Desired times (s)
# Collect parameters
inparg = {'src': src, 'rec': rec, 'depth': 0, 'freqtime': t, 'res': res,
'aniso': aniso, 'epermH': eperm, 'ht': 'dlf', 'verb': 2}
###############################################################################
# Impulse response
# ~~~~~~~~~~~~~~~~
ex = ee_xx_impulse(res[1], aniso[1], rec[0], t)
inparg['signal'] = 0 # signal 0 = impulse
print('QWE')
qwe = empymod.dipole(**inparg, ft='qwe')
print('DLF (Sine)')
sin = empymod.dipole(**inparg, ft='dlf', ftarg={'dlf': 'key_81_CosSin_2009'})
print('FFTLog')
ftl = empymod.dipole(**inparg, ft='fftlog')
print('FFT')
fft = empymod.dipole(
**inparg, ft='fft',
ftarg={'dfreq': .0005, 'nfreq': 2**20, 'pts_per_dec': 10})
###############################################################################
# => `FFTLog` is the fastest by quite a margin, followed by the `Sine`-filter.
# What cannot see from the output (set `verb` to something bigger than 2 to see
# it) is how many frequencies each method used:
#
# - QWE: 159 (0.000794328 - 63095.7 Hz)
# - Sine: 116 (5.33905E-06 - 52028 Hz)
# - FFTLog: 60 (0.000178575 - 141.847 Hz)
# - FFT: 61 (0.0005 - 524.288 Hz)
#
# Note that for the actual transform, `FFT` used 2^20 = 1'048'576 frequencies!
# It only computed 60 frequencies, and then interpolated the rest, as it
# requires regularly spaced data.
plt.figure()
plt.title(r'Impulse response for HS-model, $r=$' +
str(int(rec[0]/1000)) + ' km.')
plt.xlabel('Time (s)')
plt.ylabel(r'Amplitude (V/m)')
plt.semilogx(t, ex, 'k-', label='Analytical')
plt.semilogx(t, qwe, 'C0-', label='QWE')
plt.semilogx(t, sin, 'C1--', label='Sine Filter')
plt.semilogx(t, ftl, 'C2-.', label='FFTLog')
plt.semilogx(t, fft, 'C3:', label='FFT')
plt.legend(loc='best')
plt.ylim([-.1*np.max(ex), 1.1*np.max(ex)])
plt.show()
###############################################################################
plt.figure()
plt.title('Error')
plt.xlabel('Time (s)')
plt.ylabel('Relative error (-)')
plt.loglog(t, abs(qwe-ex)/ex, 'C0-', label='QWE')
plt.plot(t, abs(sin-ex)/ex, 'C1--', label='Sine Filter')
plt.plot(t, abs(ftl-ex)/ex, 'C2-.', label='FFTLog')
plt.plot(t, abs(fft-ex)/ex, 'C3:', label='FFT')
plt.legend(loc='best')
plt.show()
###############################################################################
# => The error is comparable in all cases. `FFT` is not too good at later
# times. This could be improved by computing lower frequencies. But because FFT
# needs regularly spaced data, our vector would soon explode (and you would
# need a lot of memory). In the current case we are already using 2^20 samples!
#
# Step response
# ~~~~~~~~~~~~~
#
# Step responses are almost impossible with `FFT`. We can either try to model
# late times with lots of low frequencies, or the step with lots of high
# frequencies. I do not use `FFT` in the step-response examples.
#
# Switch-on
# '''''''''
ex = ee_xx_step(res[1], aniso[1], rec[0], t)
inparg['signal'] = 1 # signal 1 = switch-on
print('QWE')
qwe = empymod.dipole(**inparg, ft='qwe')
print('DLF (Sine)')
sin = empymod.dipole(**inparg, ft='dlf', ftarg={'dlf': 'key_81_CosSin_2009'})
print('FFTLog')
ftl = empymod.dipole(**inparg, ft='fftlog', ftarg={'q': -0.6})
###############################################################################
# Used number of frequencies:
#
# - QWE: 159 (0.000794328 - 63095.7 Hz)
# - Sine: 116 (5.33905E-06 - 52028 Hz)
# - FFTLog: 60 (0.000178575 - 141.847 Hz)
plt.figure()
plt.title(r'Switch-on response for HS-model, $r=$' +
str(int(rec[0]/1000)) + ' km.')
plt.xlabel('Time (s)')
plt.ylabel('Amplitude (V/m)')
plt.semilogx(t, ex, 'k-', label='Analytical')
plt.semilogx(t, qwe, 'C0-', label='QWE')
plt.semilogx(t, sin, 'C1--', label='Sine Filter')
plt.semilogx(t, ftl, 'C2-.', label='FFTLog')
plt.legend(loc='best')
plt.show()
###############################################################################
plt.figure()
plt.title('Error')
plt.xlabel('Time (s)')
plt.ylabel('Relative error (-)')
plt.loglog(t, abs(qwe-ex)/ex, 'C0-', label='QWE')
plt.plot(t, abs(sin-ex)/ex, 'C1--', label='Sine Filter')
plt.plot(t, abs(ftl-ex)/ex, 'C2-.', label='FFTLog')
plt.legend(loc='best')
plt.show()
###############################################################################
# Switch-off
# ''''''''''
# For switch-off to work properly you need `empymod`-version bigger than 1.3.0!
# You can do it with previous releases too, but you will have to do the
# DC-computation and subtraction manually, as is done here for `ee_xx_step`.
exDC = ee_xx_step(res[1], aniso[1], rec[0], 60*60)
ex = exDC - ee_xx_step(res[1], aniso[1], rec[0], t)
inparg['signal'] = -1 # signal -1 = switch-off
print('QWE')
qwe = empymod.dipole(**inparg, ft='qwe')
print('DLF (Cosine/Sine)')
sin = empymod.dipole(**inparg, ft='dlf', ftarg={'dlf': 'key_81_CosSin_2009'})
print('FFTLog')
ftl = empymod.dipole(**inparg, ft='fftlog', ftarg={'add_dec': [-5, 3]})
###############################################################################
plt.figure()
plt.title(r'Switch-off response for HS-model, $r=$' +
str(int(rec[0]/1000)) + ' km.')
plt.xlabel('Time (s)')
plt.ylabel('Amplitude (V/m)')
plt.semilogx(t, ex, 'k-', label='Analytical')
plt.semilogx(t, qwe, 'C0-', label='QWE')
plt.semilogx(t, sin, 'C1--', label='Cosine/Sine Filter')
plt.semilogx(t, ftl, 'C2-.', label='FFTLog')
plt.legend(loc='best')
plt.show()
###############################################################################
plt.figure()
plt.title('Error')
plt.xlabel('Time (s)')
plt.ylabel('Relative error (-)')
plt.loglog(t, abs(qwe-ex)/ex, 'C0-', label='QWE')
plt.plot(t, abs(sin-ex)/ex, 'C1--', label='Sine Filter')
plt.plot(t, abs(ftl-ex)/ex, 'C2-.', label='FFTLog')
plt.legend(loc='best')
plt.show()
###############################################################################
# Example 2: Air-seawater-halfspace
# ---------------------------------
#
# In seawater the transformation is generally much easier, as we do not have
# the step or the impules at zero time.
src = [0, 0, 950] # Source 50 m above seabottom
rec = [6000, 0, 1000] # Receivers in-line, at seabottom
res = [1e23, 1/3, 10] # Resistivity: [air, water, half-space]
aniso = [1, 1, 2] # Anisotropy: [air, water, half-space]
t = np.logspace(-2, 1, 301) # Desired times (s)
# Collect parameters
inparg = {'src': src, 'rec': rec, 'depth': [0, 1000], 'freqtime': t,
'res': res, 'aniso': aniso, 'ht': 'dlf', 'verb': 2}
###############################################################################
# Impulse response
# ~~~~~~~~~~~~~~~~
inparg['signal'] = 0 # signal 0 = impulse
print('QWE')
qwe = empymod.dipole(**inparg, ft='qwe', ftarg={'maxint': 500})
print('DLF (Sine)')
sin = empymod.dipole(**inparg, ft='dlf', ftarg={'dlf': 'key_81_CosSin_2009'})
print('FFTLog')
ftl = empymod.dipole(**inparg, ft='fftlog')
print('FFT')
fft = empymod.dipole(
**inparg, ft='fft',
ftarg={'dfreq': .001, 'nfreq': 2**15, 'ntot': 2**16, 'pts_per_dec': 10}
)
###############################################################################
# Used number of frequencies:
#
# - QWE: 167 (0.000794328 - 158489 Hz)
# - Sine: 116 (5.33905E-06 - 52028 Hz)
# - FFTLog: 60 (0.000178575 - 141.847 Hz)
# - FFT: 46 (0.001 - 32.768 Hz)
plt.figure()
plt.title(r'Impulse response for HS-model, $r=$' +
str(int(rec[0]/1000)) + ' km.')
plt.xlabel('Time (s)')
plt.ylabel(r'Amplitude (V/m)')
plt.semilogx(t, qwe, 'C0-', label='QWE')
plt.semilogx(t, sin, 'C1--', label='Sine Filter')
plt.semilogx(t, ftl, 'C2-.', label='FFTLog')
plt.semilogx(t, fft, 'C3:', label='FFT')
plt.legend(loc='best')
plt.show()
###############################################################################
# Step response
# ~~~~~~~~~~~~~
inparg['signal'] = 1 # signal 1 = step
print('QWE')
qwe = empymod.dipole(**inparg, ft='qwe', ftarg={'nquad': 31, 'maxint': 500})
print('DLF (Sine)')
sin = empymod.dipole(**inparg, ft='dlf', ftarg={'dlf': 'key_81_CosSin_2009'})
print('FFTLog')
ftl = empymod.dipole(**inparg, ft='fftlog', ftarg={'add_dec': [-2, 4]})
###############################################################################
# Used number of frequencies:
#
# - QWE: 173 (0.000398107 - 158489 Hz)
# - Sine: 116 (5.33905E-06 - 52028 Hz)
# - FFTLog: 90 (0.000178575 - 141847 Hz)
plt.figure()
plt.title(r'Step response for HS-model, $r=$' + str(int(rec[0]/1000)) + ' km.')
plt.xlabel('Time (s)')
plt.ylabel('Amplitude (V/m)')
plt.semilogx(t, qwe, 'C0-', label='QWE')
plt.semilogx(t, sin, 'C1--', label='Sine Filter')
plt.semilogx(t, ftl, 'C2-.', label='FFTLog')
plt.ylim([-.1e-12, 1.5*qwe.max()])
plt.legend(loc='best')
plt.show()
###############################################################################
empymod.Report()
|
prisae/empymod
|
examples/time_domain/step_and_impulse.py
|
Python
|
apache-2.0
| 13,182
|
[
"DIRAC"
] |
2c5e3c688f9b2bbc724533119fc3fc4149190be54ae55b097805b608c2d09a7b
|
import sys
sys.path.insert(1, "../../")
import h2o, tests
def offset_1388():
print "Loading datasets..."
pros_hex = h2o.import_file(tests.locate("smalldata/prostate/prostate.csv"))
pros_hex[1] = pros_hex[1].asfactor()
pros_hex[3] = pros_hex[3].asfactor()
pros_hex[4] = pros_hex[4].asfactor()
pros_hex[5] = pros_hex[5].asfactor()
pros_hex[8] = pros_hex[8].asfactor()
cars_hex = h2o.import_file(tests.locate("smalldata/junit/cars.csv"))
cars_hex[0] = cars_hex[0].asfactor()
cars_hex[2] = cars_hex[2].asfactor()
print "Running Binomial Comparison..."
glm_bin_h2o = h2o.glm(x=pros_hex[2:9], y=pros_hex[1], training_frame=pros_hex, family="binomial", standardize=False,
offset_column="AGE", Lambda=[0], max_iterations=100)
print "binomial"
print "R:"
print "deviance: {0}".format(1464.9565781185)
print "null deviance: {0}".format(2014.93087862689)
print "aic: {0}".format(1494.9565781185)
print "H2O:"
print "deviance {0}".format(glm_bin_h2o.residual_deviance())
print "null deviance {0}".format(glm_bin_h2o.null_deviance())
print "aic {0}".format(glm_bin_h2o.aic())
assert abs(1464.9565781185 - glm_bin_h2o.residual_deviance()) < 0.1
assert abs(2014.93087862689 - glm_bin_h2o.null_deviance()) < 0.1
assert abs(1494.9565781185 - glm_bin_h2o.aic()) < 0.1
print "Running Regression Comparisons..."
glm_h2o = h2o.glm(x=cars_hex[2:8], y=cars_hex[1], training_frame=cars_hex, family="gaussian", standardize=False,
offset_column="year", Lambda = [0], max_iterations = 100)
print "gaussian"
print "R:"
print "deviance: {0}".format(4204.68399275449)
print "null deviance: {0}".format(16072.0955102041)
print "aic: {0}".format(2062.54330117177)
print "H2O:"
print "deviance {0}".format(glm_h2o.residual_deviance())
print "null deviance {0}".format(glm_h2o.null_deviance())
print "aic {0}".format(glm_h2o.aic())
assert abs(4204.68399275449 - glm_h2o.residual_deviance()) < 0.1
assert abs(16072.0955102041 - glm_h2o.null_deviance()) < 0.1
assert abs(2062.54330117177 - glm_h2o.aic()) < 0.1
glm_h2o = h2o.glm(x=cars_hex[2:8], y=cars_hex[1], training_frame=cars_hex, family="poisson", standardize=False,
offset_column="year", Lambda = [0], max_iterations = 100)
print "poisson"
print "R:"
print "deviance: {0}".format(54039.1725227918)
print "null deviance: {0}".format(59381.5624028358)
print "aic: {0}".format("Inf")
print "H2O:"
print "deviance {0}".format(glm_h2o.residual_deviance())
print "null deviance {0}".format(glm_h2o.null_deviance())
print "aic {0}".format(glm_h2o.aic())
assert abs(54039.1725227918 - glm_h2o.residual_deviance()) < 0.1
assert abs(59381.5624028358 - glm_h2o.null_deviance()) < 0.1
assert abs(float('inf') - glm_h2o.aic()) < 0.1
if __name__ == "__main__":
tests.run_test(sys.argv, offset_1388)
|
kyoren/https-github.com-h2oai-h2o-3
|
h2o-py/tests/testdir_jira/pyunit_NOPASS_pubdev_1388_offset_comparisons.py
|
Python
|
apache-2.0
| 3,003
|
[
"Gaussian"
] |
713c5bb54c00e8442cf524a32c4d4ff3a0f61a867f90a1e10f040859019152e1
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines filters for Transmuter object.
"""
import abc
from collections import defaultdict
from monty.json import MSONable
from pymatgen.analysis.structure_matcher import ElementComparator, StructureMatcher
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
class AbstractStructureFilter(MSONable, metaclass=abc.ABCMeta):
"""
AbstractStructureFilter that defines an API to perform testing of
Structures. Structures that return True to a test are retained during
transmutation while those that return False are removed.
"""
@abc.abstractmethod
def test(self, structure):
"""
Method to execute the test.
Args:
structure (Structure): Input structure to test
Returns:
(bool) Structures that return true are kept in the Transmuter
object during filtering.
"""
return
class ContainsSpecieFilter(AbstractStructureFilter):
"""
Filter for structures containing certain elements or species.
By default compares by atomic number.
"""
def __init__(self, species, strict_compare=False, AND=True, exclude=False):
"""
Args:
species ([Species/Element]): list of species to look for
AND: whether all species must be present to pass (or fail) filter.
strict_compare: if true, compares objects by specie or element
object if false, compares atomic number
exclude: If true, returns false for any structures with the specie
(excludes them from the Transmuter)
"""
self._species = list(map(get_el_sp, species))
self._strict = strict_compare
self._AND = AND
self._exclude = exclude
def test(self, structure):
"""
Method to execute the test.
Returns: True if structure do not contain specified species.
"""
# set up lists to compare
if not self._strict:
# compare by atomic number
filter_set = {sp.Z for sp in self._species}
structure_set = {sp.Z for sp in structure.composition.elements}
else:
# compare by specie or element object
filter_set = set(self._species)
structure_set = set(structure.composition.elements)
if self._AND and filter_set <= structure_set:
# return true if we aren't excluding since all are in structure
return not self._exclude
if (not self._AND) and filter_set & structure_set:
# return true if we aren't excluding since one is in structure
return not self._exclude
# return false if we aren't excluding otherwise
return self._exclude
def __repr__(self):
return "\n".join(
[
"ContainsSpecieFilter with parameters:",
f"species = {self._species}",
f"strict_compare = {self._strict}",
f"AND = {self._AND}",
f"exclude = {self._exclude}",
]
)
def as_dict(self):
"""
Returns: MSONAble dict
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"init_args": {
"species": [str(sp) for sp in self._species],
"strict_compare": self._strict,
"AND": self._AND,
"exclude": self._exclude,
},
}
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): Dict representation
Returns:
Filter
"""
return cls(**d["init_args"])
class SpecieProximityFilter(AbstractStructureFilter):
"""
This filter removes structures that have certain species that are too close
together.
"""
def __init__(self, specie_and_min_dist_dict):
"""
Args:
specie_and_min_dist_dict (dict): A species string to float mapping. For
example, {"Na+": 1} means that all Na+ ions must be at least 1
Angstrom away from each other. Multiple species criteria can be
applied. Note that the testing is done based on the actual object
. If you have a structure with Element, you must use {"Na":1}
instead to filter based on Element and not Species.
"""
self.specie_and_min_dist = {get_el_sp(k): v for k, v in specie_and_min_dist_dict.items()}
def test(self, structure):
"""
Method to execute the test.
Args:
structure (Structure): Input structure to test
Returns: True if structure does not contain species within specified
distances.
"""
all_species = set(self.specie_and_min_dist.keys())
for site in structure:
species = site.species.keys()
sp_to_test = set(species).intersection(all_species)
if sp_to_test:
max_r = max(self.specie_and_min_dist[sp] for sp in sp_to_test)
nn = structure.get_neighbors(site, max_r)
for sp in sp_to_test:
for nnsite, dist, *_ in nn:
if sp in nnsite.species.keys():
if dist < self.specie_and_min_dist[sp]:
return False
return True
def as_dict(self):
"""
Returns: MSONable dict
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"init_args": {"specie_and_min_dist_dict": {str(sp): v for sp, v in self.specie_and_min_dist.items()}},
}
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): Dict representation
Returns:
Filter
"""
return cls(**d["init_args"])
class RemoveDuplicatesFilter(AbstractStructureFilter):
"""
This filter removes exact duplicate structures from the transmuter.
"""
def __init__(
self,
structure_matcher=StructureMatcher(comparator=ElementComparator()),
symprec=None,
):
"""
Remove duplicate structures based on the structure matcher
and symmetry (if symprec is given).
Args:
structure_matcher: Provides a structure matcher to be used for
structure comparison.
symprec: The precision in the symmetry finder algorithm if None (
default value), no symmetry check is performed and only the
structure matcher is used. A recommended value is 1e-5.
"""
self.symprec = symprec
self.structure_list = defaultdict(list)
if isinstance(structure_matcher, dict):
self.structure_matcher = StructureMatcher.from_dict(structure_matcher)
else:
self.structure_matcher = structure_matcher
def test(self, structure):
"""
Args:
structure (Structure): Input structure to test
Returns: True if structure is not in list.
"""
h = self.structure_matcher._comparator.get_hash(structure.composition)
if not self.structure_list[h]:
self.structure_list[h].append(structure)
return True
def get_sg(s):
finder = SpacegroupAnalyzer(s, symprec=self.symprec)
return finder.get_space_group_number()
for s in self.structure_list[h]:
if self.symprec is None or get_sg(s) == get_sg(structure):
if self.structure_matcher.fit(s, structure):
return False
self.structure_list[h].append(structure)
return True
class RemoveExistingFilter(AbstractStructureFilter):
"""
This filter removes structures existing in a given list from the transmuter.
"""
def __init__(
self,
existing_structures,
structure_matcher=StructureMatcher(comparator=ElementComparator()),
symprec=None,
):
"""
Remove existing structures based on the structure matcher
and symmetry (if symprec is given).
Args:
existing_structures: List of existing structures to compare with
structure_matcher: Provides a structure matcher to be used for
structure comparison.
symprec: The precision in the symmetry finder algorithm if None (
default value), no symmetry check is performed and only the
structure matcher is used. A recommended value is 1e-5.
"""
self.symprec = symprec
self.structure_list = []
self.existing_structures = existing_structures
if isinstance(structure_matcher, dict):
self.structure_matcher = StructureMatcher.from_dict(structure_matcher)
else:
self.structure_matcher = structure_matcher
def test(self, structure):
"""
Method to execute the test.
Args:
structure (Structure): Input structure to test
Returns: True if structure is not in existing list.
"""
def get_sg(s):
finder = SpacegroupAnalyzer(s, symprec=self.symprec)
return finder.get_space_group_number()
for s in self.existing_structures:
if self.structure_matcher._comparator.get_hash(
structure.composition
) == self.structure_matcher._comparator.get_hash(s.composition):
if self.symprec is None or get_sg(s) == get_sg(structure):
if self.structure_matcher.fit(s, structure):
return False
self.structure_list.append(structure)
return True
def as_dict(self):
"""
Returns: MSONable dict
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"init_args": {"structure_matcher": self.structure_matcher.as_dict()},
}
class ChargeBalanceFilter(AbstractStructureFilter):
"""
This filter removes structures that are not charge balanced from the
transmuter. This only works if the structure is oxidation state
decorated, as structures with only elemental sites are automatically
assumed to have net charge of 0.
"""
def __init__(self):
"""
No args required.
"""
def test(self, structure):
"""
Method to execute the test.
Args:
structure (Structure): Input structure to test
Returns: True if structure is neutral.
"""
return structure.charge == 0.0
class SpeciesMaxDistFilter(AbstractStructureFilter):
"""
This filter removes structures that do have two particular species that are
not nearest neighbors by a predefined max_dist. For instance, if you are
analyzing Li battery materials, you would expect that each Li+ would be
nearest neighbor to lower oxidation state transition metal for
electrostatic reasons. This only works if the structure is oxidation state
decorated, as structures with only elemental sites are automatically
assumed to have net charge of 0.
"""
def __init__(self, sp1, sp2, max_dist):
"""
Args:
sp1 (Species): First specie
sp2 (Species): Second specie
max_dist (float): Maximum distance between species.
"""
self.sp1 = get_el_sp(sp1)
self.sp2 = get_el_sp(sp2)
self.max_dist = max_dist
def test(self, structure):
"""
Method to execute the test.
Args:
structure (Structure): Input structure to test
Returns: True if structure does not contain the two species are distances
greater than max_dist.
"""
sp1_indices = [i for i, site in enumerate(structure) if site.specie == self.sp1]
sp2_indices = [i for i, site in enumerate(structure) if site.specie == self.sp2]
fcoords = structure.frac_coords
fcoords1 = fcoords[sp1_indices, :]
fcoords2 = fcoords[sp2_indices, :]
lattice = structure.lattice
dists = lattice.get_all_distances(fcoords1, fcoords2)
return all(any(row) for row in dists < self.max_dist)
|
materialsproject/pymatgen
|
pymatgen/alchemy/filters.py
|
Python
|
mit
| 12,546
|
[
"pymatgen"
] |
6ba7e25d26e8fcf336d78e902760dc61dabaaee6e1d8dba194af2b91b0523168
|
""" This is a test of the creation of the pilot wrapper
"""
# pylint: disable=protected-access, invalid-name, no-self-use
import os
import base64
import bz2
from DIRAC.WorkloadManagementSystem.Utilities.PilotWrapper import pilotWrapperScript
def test_scriptEmpty():
""" test script creation
"""
res = pilotWrapperScript()
assert 'cmd = "python dirac-pilot.py "' in res
assert 'os.environ["someName"]="someValue"' not in res
def test_scriptoptions():
""" test script creation
"""
res = pilotWrapperScript(
pilotFilesCompressedEncodedDict={'dirac-install.py': 'someContentOfDiracInstall',
'someOther.py': 'someOtherContent'},
pilotOptions="-c 123 --foo bar")
assert "with open('dirac-install.py', 'w') as fd:" in res
assert 'os.environ["someName"]="someValue"' not in res
def test_scriptReal():
""" test script creation
"""
diracInstall = os.path.join(os.getcwd(), 'Core/scripts/dirac-install.py')
with open(diracInstall, "r") as fd:
diracInstall = fd.read()
diracInstallEncoded = base64.b64encode(bz2.compress(diracInstall, 9))
diracPilot = os.path.join(os.getcwd(), 'WorkloadManagementSystem/PilotAgent/dirac-pilot.py')
with open(diracPilot, "r") as fd:
diracPilot = fd.read()
diracPilotEncoded = base64.b64encode(bz2.compress(diracPilot, 9))
diracPilotTools = os.path.join(os.getcwd(), 'WorkloadManagementSystem/PilotAgent/pilotTools.py')
with open(diracPilotTools, "r") as fd:
diracPilotTools = fd.read()
diracPilotToolsEncoded = base64.b64encode(bz2.compress(diracPilotTools, 9))
diracPilotCommands = os.path.join(os.getcwd(), 'WorkloadManagementSystem/PilotAgent/pilotCommands.py')
with open(diracPilotCommands, "r") as fd:
diracPilotCommands = fd.read()
diracPilotCommandsEncoded = base64.b64encode(bz2.compress(diracPilotCommands, 9))
res = pilotWrapperScript(
pilotFilesCompressedEncodedDict={'dirac-install.py': diracInstallEncoded,
'dirac-pilot.py': diracPilotEncoded,
'pilotTools.py': diracPilotToolsEncoded,
'pilotCommands.py': diracPilotCommandsEncoded},
pilotOptions="-c 123 --foo bar")
assert "with open('dirac-pilot.py', 'w') as fd:" in res
assert "with open('dirac-install.py', 'w') as fd:" in res
assert 'os.environ["someName"]="someValue"' not in res
def test_scriptWithEnvVars():
""" test script creation
"""
res = pilotWrapperScript(
pilotFilesCompressedEncodedDict={'dirac-install.py': 'someContentOfDiracInstall',
'someOther.py': 'someOtherContent'},
pilotOptions="-c 123 --foo bar",
envVariables={'someName': 'someValue',
'someMore': 'oneMore'})
assert 'os.environ["someName"]="someValue"' in res
def test_scriptPilot3():
""" test script creation
"""
res = pilotWrapperScript(
pilotFilesCompressedEncodedDict={'proxy': 'thisIsSomeProxy'},
pilotOptions="-c 123 --foo bar",
envVariables={'someName': 'someValue',
'someMore': 'oneMore'},
location='lhcb-portal.cern.ch')
assert 'os.environ["someName"]="someValue"' in res
assert 'lhcb-portal.cern.ch' in res
|
andresailer/DIRAC
|
WorkloadManagementSystem/Utilities/test/Test_PilotWrapper.py
|
Python
|
gpl-3.0
| 3,291
|
[
"DIRAC"
] |
fef8a2ae2cf2f0348af6ff41cb4b018c98e5b0cb82de1b917f3a2f731373036f
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
import sys
from spack import *
from spack.operating_systems.mac_os import macos_version
# Trilinos is complicated to build, as an inspiration a couple of links to
# other repositories which build it:
# https://github.com/hpcugent/easybuild-easyblocks/blob/master/easybuild/easyblocks/t/trilinos.py#L111
# https://github.com/koecher/candi/blob/master/deal.II-toolchain/packages/trilinos.package
# https://gitlab.com/configurations/cluster-config/blob/master/trilinos.sh
# https://github.com/Homebrew/homebrew-science/blob/master/trilinos.rb and some
# relevant documentation/examples:
# https://github.com/trilinos/Trilinos/issues/175
class Trilinos(CMakePackage):
"""The Trilinos Project is an effort to develop algorithms and enabling
technologies within an object-oriented software framework for the solution
of large-scale, complex multi-physics engineering and scientific problems.
A unique design feature of Trilinos is its focus on packages.
"""
homepage = "https://trilinos.org/"
url = "https://github.com/trilinos/Trilinos/archive/trilinos-release-12-12-1.tar.gz"
git = "https://github.com/trilinos/Trilinos.git"
maintainers = ['aprokop']
# ###################### Versions ##########################
version('xsdk-0.2.0', tag='xsdk-0.2.0')
version('develop', branch='develop')
version('master', branch='master')
version('12.12.1', 'ecd4606fa332212433c98bf950a69cc7')
version('12.10.1', '667333dbd7c0f031d47d7c5511fd0810')
version('12.8.1', '9f37f683ee2b427b5540db8a20ed6b15')
version('12.6.4', 'e11fff717d0e4565779f75a47feecbb2')
version('12.6.3', '9ce30b6ab956bfc41730479a9ef05d05')
version('12.6.2', '0237d32feedd979a6fbb139aa5df8500')
version('12.6.1', '14ab8f7e74b66c33d5731cbf68b8cb82')
version('12.4.2', '98880f414752220e60feaeb36b023f60')
version('12.2.1', '8b344a9e9e533126dfd96db58ce69dde')
version('12.0.1', 'b8263f7037f7c688091d0da19d169709')
version('11.14.3', 'ff31ad49d633ab28369c228784055c85')
version('11.14.2', '1fdf15a5b4494f832b414f9c447ab685')
version('11.14.1', '478d0438d935294a7c94347c94a7c8cb')
# ###################### Variants ##########################
# Other
# not everyone has py-numpy activated, keep it disabled by default to avoid
# configure errors
variant('python', default=False,
description='Build python wrappers')
# Build options
variant('fortran', default=True,
description='Compile with Fortran support')
variant('instantiate', default=True,
description='Compile with explicit instantiation')
variant('instantiate_cmplx', default=False,
description='Compile with explicit instantiation for complex')
variant('openmp', default=False,
description='Enable OpenMP')
variant('shared', default=True,
description='Enables the build of shared libraries')
variant('xsdkflags', default=False,
description='Compile using the default xSDK configuration')
# TPLs (alphabet order)
variant('boost', default=True,
description='Compile with Boost')
variant('cgns', default=False,
description='Enable CGNS')
variant('exodus', default=True,
description='Compile with Exodus from SEACAS')
variant('gtest', default=True,
description='Compile with Gtest')
variant('hdf5', default=True,
description='Compile with HDF5')
variant('hypre', default=True,
description='Compile with Hypre preconditioner')
variant('metis', default=True,
description='Compile with METIS and ParMETIS')
variant('mumps', default=True,
description='Compile with support for MUMPS solvers')
variant('pnetcdf', default=False,
description='Compile with parallel-netcdf')
variant('suite-sparse', default=True,
description='Compile with SuiteSparse solvers')
variant('superlu-dist', default=True,
description='Compile with SuperluDist solvers')
variant('superlu', default=False,
description='Compile with SuperLU solvers')
variant('x11', default=False,
description='Compile with X11')
variant('zlib', default=False,
description='Compile with zlib')
# Package options (alphabet order)
variant('alloptpkgs', default=False,
description='Compile with all optional packages')
variant('amesos', default=True,
description='Compile with Amesos')
variant('amesos2', default=True,
description='Compile with Amesos2')
variant('anasazi', default=True,
description='Compile with Anasazi')
variant('aztec', default=True,
description='Compile with Aztec')
variant('belos', default=True,
description='Compile with Belos')
variant('epetra', default=True,
description='Compile with Epetra')
variant('epetraext', default=True,
description='Compile with EpetraExt')
variant('ifpack', default=True,
description='Compile with Ifpack')
variant('ifpack2', default=True,
description='Compile with Ifpack2')
variant('intrepid', default=False,
description='Enable Intrepid')
variant('intrepid2', default=False,
description='Enable Intrepid2')
variant('isorropia', default=False,
description='Compile with Isorropia')
variant('kokkos', default=True,
description='Compile with Kokkos')
variant('ml', default=True,
description='Compile with ML')
variant('minitensor', default=False,
description='Compile with MiniTensor')
variant('muelu', default=True,
description='Compile with Muelu')
variant('nox', default=False,
description='Compile with NOX')
variant('piro', default=False,
description='Compile with Piro')
variant('phalanx', default=False,
description='Compile with Phalanx')
variant('rol', default=False,
description='Compile with ROL')
variant('rythmos', default=False,
description='Compile with Rythmos')
variant('sacado', default=True,
description='Compile with Sacado')
variant('stk', default=False,
description='Compile with STK')
variant('shards', default=False,
description='Compile with Shards')
variant('teko', default=False,
description='Compile with Teko')
variant('tempus', default=False,
description='Compile with Tempus')
variant('teuchos', default=True,
description='Compile with Teuchos')
variant('tpetra', default=True,
description='Compile with Tpetra')
variant('zoltan', default=True,
description='Compile with Zoltan')
variant('zoltan2', default=True,
description='Compile with Zoltan2')
# External package options
variant('dtk', default=False,
description='Enable DataTransferKit')
variant('fortrilinos', default=False,
description='Enable ForTrilinos')
resource(name='dtk',
git='https://github.com/ornl-cees/DataTransferKit.git',
tag='master',
placement='DataTransferKit',
when='+dtk')
resource(name='fortrilinos',
git='https://github.com/trilinos/ForTrilinos.git',
tag='develop',
placement='packages/ForTrilinos',
when='+fortrilinos')
conflicts('+amesos2', when='~teuchos')
conflicts('+amesos2', when='~tpetra')
conflicts('+amesos', when='~epetra')
conflicts('+amesos', when='~teuchos')
conflicts('+anasazi', when='~teuchos')
conflicts('+belos', when='~teuchos')
conflicts('+epetraext', when='~epetra')
conflicts('+epetraext', when='~teuchos')
conflicts('+ifpack2', when='~belos')
conflicts('+ifpack2', when='~teuchos')
conflicts('+ifpack2', when='~tpetra')
conflicts('+ifpack', when='~epetra')
conflicts('+ifpack', when='~teuchos')
conflicts('+intrepid2', when='~kokkos')
conflicts('+intrepid2', when='~shards')
conflicts('+intrepid2', when='~teuchos')
conflicts('+intrepid', when='~sacado')
conflicts('+intrepid', when='~shards')
conflicts('+intrepid', when='~teuchos')
conflicts('+isorropia', when='~epetra')
conflicts('+isorropia', when='~epetraext')
conflicts('+isorropia', when='~teuchos')
conflicts('+isorropia', when='~zoltan')
conflicts('+muelu', when='~teuchos')
conflicts('+muelu', when='~xpetra')
conflicts('+nox', when='~teuchos')
conflicts('+phalanx', when='~kokkos')
conflicts('+phalanx', when='~sacado')
conflicts('+phalanx', when='~teuchos')
conflicts('+piro', when='~teuchos')
conflicts('+rol', when='~teuchos')
conflicts('+rythmos', when='~teuchos')
conflicts('+teko', when='~amesos')
conflicts('+teko', when='~anasazi')
conflicts('+teko', when='~aztec')
conflicts('+teko', when='~ifpack')
conflicts('+teko', when='~ml')
conflicts('+teko', when='~teuchos')
conflicts('+teko', when='~tpetra')
conflicts('+tempus', when='~nox')
conflicts('+tempus', when='~teuchos')
conflicts('+tpetra', when='~kokkos')
conflicts('+tpetra', when='~teuchos')
conflicts('+zoltan2', when='~teuchos')
conflicts('+zoltan2', when='~tpetra')
conflicts('+zoltan2', when='~xpetra')
conflicts('+zoltan2', when='~zoltan')
conflicts('+dtk', when='~intrepid2')
conflicts('+dtk', when='~kokkos')
conflicts('+dtk', when='~teuchos')
conflicts('+dtk', when='~tpetra')
conflicts('+fortrilinos', when='~fortran')
conflicts('+fortrilinos', when='@:99')
conflicts('+fortrilinos', when='@master')
# Can only use one type of SuperLU
conflicts('+superlu-dist', when='+superlu')
# For Trilinos v11 we need to force SuperLUDist=OFF, since only the
# deprecated SuperLUDist v3.3 together with an Amesos patch is working.
conflicts('+superlu-dist', when='@11.4.1:11.14.3')
# PnetCDF was only added after v12.10.1
conflicts('+pnetcdf', when='@0:12.10.1')
# ###################### Dependencies ##########################
# Everything should be compiled position independent (-fpic)
depends_on('blas')
depends_on('lapack')
depends_on('boost', when='+boost')
depends_on('boost', when='+dtk')
depends_on('matio')
depends_on('glm')
depends_on('metis@5:', when='+metis')
depends_on('suite-sparse', when='+suite-sparse')
depends_on('zlib', when="+zlib")
# MPI related dependencies
depends_on('mpi')
depends_on('netcdf+mpi', when="~pnetcdf")
depends_on('netcdf+mpi+parallel-netcdf', when="+pnetcdf@master,12.12.1:")
depends_on('parallel-netcdf', when="+pnetcdf@master,12.12.1:")
depends_on('parmetis', when='+metis')
depends_on('cgns', when='+cgns')
# Trilinos' Tribits config system is limited which makes it very tricky to
# link Amesos with static MUMPS, see
# https://trilinos.org/docs/dev/packages/amesos2/doc/html/classAmesos2_1_1MUMPS.html
# One could work it out by getting linking flags from mpif90 --showme:link
# (or alike) and adding results to -DTrilinos_EXTRA_LINK_FLAGS together
# with Blas and Lapack and ScaLAPACK and Blacs and -lgfortran and it may
# work at the end. But let's avoid all this by simply using shared libs
depends_on('mumps@5.0:+mpi+shared', when='+mumps')
depends_on('scalapack', when='+mumps')
depends_on('superlu-dist', when='+superlu-dist')
depends_on('superlu-dist@:4.3', when='@:12.6.1+superlu-dist')
depends_on('superlu-dist@4.4:5.3', when='@12.6.2:12.12.1+superlu-dist')
depends_on('superlu-dist@develop', when='@develop+superlu-dist')
depends_on('superlu-dist@xsdk-0.2.0', when='@xsdk-0.2.0+superlu-dist')
depends_on('superlu+pic@4.3', when='+superlu')
# Trilinos can not be built against 64bit int hypre
depends_on('hypre~internal-superlu~int64', when='+hypre')
depends_on('hypre@xsdk-0.2.0~internal-superlu', when='@xsdk-0.2.0+hypre')
depends_on('hypre@develop~internal-superlu', when='@develop+hypre')
# FIXME: concretizer bug? 'hl' req by netcdf is affecting this code.
depends_on('hdf5+hl+mpi', when='+hdf5')
depends_on('python', when='+python')
depends_on('py-numpy', when='+python', type=('build', 'run'))
depends_on('swig', when='+python')
patch('umfpack_from_suitesparse.patch', when='@11.14.1:12.8.1')
patch('xlf_seacas.patch', when='@12.10.1:12.12.1 %xl')
patch('xlf_seacas.patch', when='@12.10.1:12.12.1 %xl_r')
patch('xlf_seacas.patch', when='@12.10.1:12.12.1 %clang')
patch('xlf_tpetra.patch', when='@12.12.1:%xl')
patch('xlf_tpetra.patch', when='@12.12.1:%xl_r')
patch('xlf_tpetra.patch', when='@12.12.1:%clang')
def url_for_version(self, version):
url = "https://github.com/trilinos/Trilinos/archive/trilinos-release-{0}.tar.gz"
return url.format(version.dashed)
def cmake_args(self):
spec = self.spec
cxx_flags = []
options = []
# #################### Base Settings #######################
mpi_bin = spec['mpi'].prefix.bin
options.extend([
'-DTrilinos_VERBOSE_CONFIGURE:BOOL=OFF',
'-DTrilinos_ENABLE_TESTS:BOOL=OFF',
'-DTrilinos_ENABLE_EXAMPLES:BOOL=OFF',
'-DTrilinos_ENABLE_CXX11:BOOL=ON',
'-DBUILD_SHARED_LIBS:BOOL=%s' % (
'ON' if '+shared' in spec else 'OFF'),
# The following can cause problems on systems that don't have
# static libraries available for things like dl and pthreads
# for example when trying to build static libs
# '-DTPL_FIND_SHARED_LIBS:BOOL=%s' % (
# 'ON' if '+shared' in spec else 'OFF'),
# '-DTrilinos_LINK_SEARCH_START_STATIC:BOOL=%s' % (
# 'OFF' if '+shared' in spec else 'ON'),
# Force Trilinos to use the MPI wrappers instead of raw compilers
# this is needed on Apple systems that require full resolution of
# all symbols when linking shared libraries
'-DTPL_ENABLE_MPI:BOOL=ON',
'-DCMAKE_C_COMPILER=%s' % spec['mpi'].mpicc,
'-DCMAKE_CXX_COMPILER=%s' % spec['mpi'].mpicxx,
'-DCMAKE_Fortran_COMPILER=%s' % spec['mpi'].mpifc,
'-DMPI_BASE_DIR:PATH=%s' % spec['mpi'].prefix
])
# ################## Trilinos Packages #####################
options.extend([
'-DTrilinos_ENABLE_ALL_OPTIONAL_PACKAGES:BOOL=%s' % (
'ON' if '+alloptpkgs' in spec else 'OFF'),
'-DTrilinos_ENABLE_Amesos:BOOL=%s' % (
'ON' if '+amesos' in spec else 'OFF'),
'-DTrilinos_ENABLE_Amesos2:BOOL=%s' % (
'ON' if '+amesos2' in spec else 'OFF'),
'-DTrilinos_ENABLE_Anasazi:BOOL=%s' % (
'ON' if '+anasazi' in spec else 'OFF'),
'-DTrilinos_ENABLE_AztecOO:BOOL=%s' % (
'ON' if '+aztec' in spec else 'OFF'),
'-DTrilinos_ENABLE_Belos:BOOL=%s' % (
'ON' if '+belos' in spec else 'OFF'),
'-DTrilinos_ENABLE_Epetra:BOOL=%s' % (
'ON' if '+epetra' in spec else 'OFF'),
'-DTrilinos_ENABLE_EpetraExt:BOOL=%s' % (
'ON' if '+epetraext' in spec else 'OFF'),
'-DTrilinos_ENABLE_Ifpack:BOOL=%s' % (
'ON' if '+ifpack' in spec else 'OFF'),
'-DTrilinos_ENABLE_Ifpack2:BOOL=%s' % (
'ON' if '+ifpack2' in spec else 'OFF'),
'-DTrilinos_ENABLE_Intrepid=%s' % (
'ON' if '+intrepid' in spec else 'OFF'),
'-DTrilinos_ENABLE_Intrepid2=%s' % (
'ON' if '+intrepid2' in spec else 'OFF'),
'-DTrilinos_ENABLE_Isorropia=%s' % (
'ON' if '+isorropia' in spec else 'OFF'),
'-DTrilinos_ENABLE_Kokkos:BOOL=%s' % (
'ON' if '+kokkos' in spec else 'OFF'),
'-DTrilinos_ENABLE_MiniTensor=%s' % (
'ON' if '+minitensor' in spec else 'OFF'),
'-DTrilinos_ENABLE_ML:BOOL=%s' % (
'ON' if '+ml' in spec else 'OFF'),
'-DTrilinos_ENABLE_MueLu:BOOL=%s' % (
'ON' if '+muelu' in spec else 'OFF'),
'-DTrilinos_ENABLE_NOX:BOOL=%s' % (
'ON' if '+nox' in spec else 'OFF'),
'-DTrilinos_ENABLE_Piro:BOOL=%s' % (
'ON' if '+piro' in spec else 'OFF'),
'-DTrilinos_ENABLE_Phalanx=%s' % (
'ON' if '+phalanx' in spec else 'OFF'),
'-DTrilinos_ENABLE_PyTrilinos:BOOL=%s' % (
'ON' if '+python' in spec else 'OFF'),
'-DTrilinos_ENABLE_ROL:BOOL=%s' % (
'ON' if '+rol' in spec else 'OFF'),
'-DTrilinos_ENABLE_Rythmos=%s' % (
'ON' if '+rythmos' in spec else 'OFF'),
'-DTrilinos_ENABLE_Sacado:BOOL=%s' % (
'ON' if '+sacado' in spec else 'OFF'),
'-DTrilinos_ENABLE_Shards=%s' % (
'ON' if '+shards' in spec else 'OFF'),
'-DTrilinos_ENABLE_Teko=%s' % (
'ON' if '+teko' in spec else 'OFF'),
'-DTrilinos_ENABLE_Tempus=%s' % (
'ON' if '+tempus' in spec else 'OFF'),
'-DTrilinos_ENABLE_Teuchos:BOOL=%s' % (
'ON' if '+teuchos' in spec else 'OFF'),
'-DTrilinos_ENABLE_Tpetra:BOOL=%s' % (
'ON' if '+tpetra' in spec else 'OFF'),
'-DTrilinos_ENABLE_Zoltan:BOOL=%s' % (
'ON' if '+zoltan' in spec else 'OFF'),
'-DTrilinos_ENABLE_Zoltan2:BOOL=%s' % (
'ON' if '+zoltan2' in spec else 'OFF'),
])
if '+xsdkflags' in spec:
options.extend(['-DUSE_XSDK_DEFAULTS=YES'])
if '+stk' in spec:
# Currently these are fairly specific to the Nalu package
# They can likely change when necessary in the future
options.extend([
'-DTrilinos_ENABLE_STKMesh:BOOL=ON',
'-DTrilinos_ENABLE_STKSimd:BOOL=ON',
'-DTrilinos_ENABLE_STKIO:BOOL=ON',
'-DTrilinos_ENABLE_STKTransfer:BOOL=ON',
'-DTrilinos_ENABLE_STKSearch:BOOL=ON',
'-DTrilinos_ENABLE_STKUtil:BOOL=ON',
'-DTrilinos_ENABLE_STKTopology:BOOL=ON',
'-DTrilinos_ENABLE_STKUnit_tests:BOOL=ON',
'-DTrilinos_ENABLE_STKUnit_test_utils:BOOL=ON',
'-DTrilinos_ENABLE_STKClassic:BOOL=OFF',
'-DTrilinos_ENABLE_STKExprEval:BOOL=ON'
])
if '+dtk' in spec:
options.extend([
'-DTrilinos_EXTRA_REPOSITORIES:STRING=DataTransferKit',
'-DTpetra_INST_INT_UNSIGNED_LONG:BOOL=ON',
'-DTrilinos_ENABLE_DataTransferKit:BOOL=ON'
])
if '+exodus' in spec:
# Currently these are fairly specific to the Nalu package
# They can likely change when necessary in the future
options.extend([
'-DTrilinos_ENABLE_SEACAS:BOOL=ON',
'-DTrilinos_ENABLE_SEACASExodus:BOOL=ON',
'-DTrilinos_ENABLE_SEACASEpu:BOOL=ON',
'-DTrilinos_ENABLE_SEACASExodiff:BOOL=ON',
'-DTrilinos_ENABLE_SEACASNemspread:BOOL=ON',
'-DTrilinos_ENABLE_SEACASNemslice:BOOL=ON',
'-DTrilinos_ENABLE_SEACASIoss:BOOL=ON'
])
else:
options.extend([
'-DTrilinos_ENABLE_SEACAS:BOOL=OFF',
'-DTrilinos_ENABLE_SEACASExodus:BOOL=OFF'
])
# ######################### TPLs #############################
blas = spec['blas'].libs
lapack = spec['lapack'].libs
# Note: -DXYZ_LIBRARY_NAMES= needs semicolon separated list of names
options.extend([
'-DTPL_ENABLE_BLAS=ON',
'-DBLAS_LIBRARY_NAMES=%s' % ';'.join(blas.names),
'-DBLAS_LIBRARY_DIRS=%s' % ';'.join(blas.directories),
'-DTPL_ENABLE_LAPACK=ON',
'-DLAPACK_LIBRARY_NAMES=%s' % ';'.join(lapack.names),
'-DLAPACK_LIBRARY_DIRS=%s' % ';'.join(lapack.directories),
'-DTPL_ENABLE_Netcdf:BOOL=ON',
'-DNetCDF_ROOT:PATH=%s' % spec['netcdf'].prefix,
'-DTPL_ENABLE_X11:BOOL=%s' % (
'ON' if '+x11' in spec else 'OFF'),
'-DTrilinos_ENABLE_Gtest:BOOL=%s' % (
'ON' if '+gtest' in spec else 'OFF'),
])
if '+hypre' in spec:
options.extend([
'-DTPL_ENABLE_HYPRE:BOOL=ON',
'-DHYPRE_INCLUDE_DIRS:PATH=%s' % spec['hypre'].prefix.include,
'-DHYPRE_LIBRARY_DIRS:PATH=%s' % spec['hypre'].prefix.lib
])
if '+boost' in spec:
options.extend([
'-DTPL_ENABLE_Boost:BOOL=ON',
'-DBoost_INCLUDE_DIRS:PATH=%s' % spec['boost'].prefix.include,
'-DBoost_LIBRARY_DIRS:PATH=%s' % spec['boost'].prefix.lib
])
else:
options.extend(['-DTPL_ENABLE_Boost:BOOL=OFF'])
if '+hdf5' in spec:
options.extend([
'-DTPL_ENABLE_HDF5:BOOL=ON',
'-DHDF5_INCLUDE_DIRS:PATH=%s' % spec['hdf5'].prefix.include,
'-DHDF5_LIBRARY_DIRS:PATH=%s' % spec['hdf5'].prefix.lib
])
else:
options.extend(['-DTPL_ENABLE_HDF5:BOOL=OFF'])
if '+suite-sparse' in spec:
options.extend([
# FIXME: Trilinos seems to be looking for static libs only,
# patch CMake TPL file?
'-DTPL_ENABLE_Cholmod:BOOL=OFF',
# '-DTPL_ENABLE_Cholmod:BOOL=ON',
# '-DCholmod_LIBRARY_DIRS:PATH=%s' % (
# spec['suite-sparse'].prefix.lib,
# '-DCholmod_INCLUDE_DIRS:PATH=%s' % (
# spec['suite-sparse'].prefix.include,
'-DTPL_ENABLE_UMFPACK:BOOL=ON',
'-DUMFPACK_LIBRARY_DIRS:PATH=%s' % (
spec['suite-sparse'].prefix.lib),
'-DUMFPACK_INCLUDE_DIRS:PATH=%s' % (
spec['suite-sparse'].prefix.include),
'-DUMFPACK_LIBRARY_NAMES=umfpack;amd;colamd;cholmod;' +
'suitesparseconfig'
])
else:
options.extend([
'-DTPL_ENABLE_Cholmod:BOOL=OFF',
'-DTPL_ENABLE_UMFPACK:BOOL=OFF',
])
if '+metis' in spec:
options.extend([
'-DTPL_ENABLE_METIS:BOOL=ON',
'-DMETIS_LIBRARY_DIRS=%s' % spec['metis'].prefix.lib,
'-DMETIS_LIBRARY_NAMES=metis',
'-DTPL_METIS_INCLUDE_DIRS=%s' % spec['metis'].prefix.include,
'-DTPL_ENABLE_ParMETIS:BOOL=ON',
'-DParMETIS_LIBRARY_DIRS=%s;%s' % (
spec['parmetis'].prefix.lib, spec['metis'].prefix.lib),
'-DParMETIS_LIBRARY_NAMES=parmetis;metis',
'-DTPL_ParMETIS_INCLUDE_DIRS=%s;%s' % (
spec['parmetis'].prefix.include,
spec['metis'].prefix.include)
])
else:
options.extend([
'-DTPL_ENABLE_METIS:BOOL=OFF',
'-DTPL_ENABLE_ParMETIS:BOOL=OFF',
])
if '+mumps' in spec:
scalapack = spec['scalapack'].libs
options.extend([
'-DTPL_ENABLE_MUMPS:BOOL=ON',
'-DMUMPS_LIBRARY_DIRS=%s' % spec['mumps'].prefix.lib,
# order is important!
'-DMUMPS_LIBRARY_NAMES=dmumps;mumps_common;pord',
'-DTPL_ENABLE_SCALAPACK:BOOL=ON',
'-DSCALAPACK_LIBRARY_NAMES=%s' % ';'.join(scalapack.names),
'-DSCALAPACK_LIBRARY_DIRS=%s' % ';'.join(scalapack.directories)
])
# see
# https://github.com/trilinos/Trilinos/blob/master/packages/amesos/README-MUMPS
cxx_flags.extend([
'-DMUMPS_5_0'
])
else:
options.extend([
'-DTPL_ENABLE_MUMPS:BOOL=OFF',
'-DTPL_ENABLE_SCALAPACK:BOOL=OFF',
])
if '+superlu-dist' in spec:
# Amesos, conflicting types of double and complex SLU_D
# see
# https://trilinos.org/pipermail/trilinos-users/2015-March/004731.html
# and
# https://trilinos.org/pipermail/trilinos-users/2015-March/004802.html
options.extend([
'-DTeuchos_ENABLE_COMPLEX:BOOL=OFF',
'-DKokkosTSQR_ENABLE_Complex:BOOL=OFF'
])
options.extend([
'-DTPL_ENABLE_SuperLUDist:BOOL=ON',
'-DSuperLUDist_LIBRARY_DIRS=%s' %
spec['superlu-dist'].prefix.lib,
'-DSuperLUDist_INCLUDE_DIRS=%s' %
spec['superlu-dist'].prefix.include
])
if spec.satisfies('^superlu-dist@4.0:'):
options.extend([
'-DHAVE_SUPERLUDIST_LUSTRUCTINIT_2ARG:BOOL=ON'
])
else:
options.extend([
'-DTPL_ENABLE_SuperLUDist:BOOL=OFF',
])
if '+superlu' in spec:
options.extend([
'-DTPL_ENABLE_SuperLU:BOOL=ON',
'-DSuperLU_LIBRARY_DIRS=%s' %
spec['superlu'].prefix.lib,
'-DSuperLU_INCLUDE_DIRS=%s' %
spec['superlu'].prefix.include
])
else:
options.extend([
'-DTPL_ENABLE_SuperLU:BOOL=OFF',
])
if '+pnetcdf' in spec:
options.extend([
'-DTPL_ENABLE_Pnetcdf:BOOL=ON',
'-DTPL_Netcdf_Enables_Netcdf4:BOOL=ON',
'-DTPL_Netcdf_PARALLEL:BOOL=ON',
'-DPNetCDF_ROOT:PATH=%s' % spec['parallel-netcdf'].prefix
])
else:
options.extend([
'-DTPL_ENABLE_Pnetcdf:BOOL=OFF'
])
if '+zlib' in spec:
options.extend([
'-DTPL_ENABLE_Zlib:BOOL=ON',
'-DZlib_ROOT:PATH=%s' % spec['zlib'].prefix,
])
else:
options.extend([
'-DTPL_ENABLE_Zlib:BOOL=OFF'
])
if '+cgns' in spec:
options.extend([
'-DTPL_ENABLE_CGNS:BOOL=ON',
'-DCGNS_INCLUDE_DIRS:PATH=%s' % spec['cgns'].prefix.include,
'-DCGNS_LIBRARY_DIRS:PATH=%s' % spec['cgns'].prefix.lib
])
else:
options.extend([
'-DTPL_ENABLE_GGNS:BOOL=OFF'
])
# ################# Miscellaneous Stuff ######################
# OpenMP
if '+openmp' in spec:
options.extend([
'-DTrilinos_ENABLE_OpenMP:BOOL=ON',
'-DKokkos_ENABLE_OpenMP:BOOL=ON'
])
if '+tpetra' in spec:
options.extend([
'-DTpetra_INST_OPENMP:BOOL=ON'
])
# Fortran lib
if '+fortran' in spec:
if spec.satisfies('%gcc') or spec.satisfies('%clang'):
libgfortran = os.path.dirname(os.popen(
'%s --print-file-name libgfortran.a' %
join_path(mpi_bin, 'mpif90')).read())
options.extend([
'-DTrilinos_EXTRA_LINK_FLAGS:STRING=-L%s/ -lgfortran' % (
libgfortran),
'-DTrilinos_ENABLE_Fortran=ON'
])
# Explicit instantiation
if '+instantiate' in spec:
options.extend([
'-DTrilinos_ENABLE_EXPLICIT_INSTANTIATION:BOOL=ON'
])
if '+tpetra' in spec:
options.extend([
'-DTpetra_INST_DOUBLE:BOOL=ON',
'-DTpetra_INST_INT_LONG:BOOL=ON'
'-DTpetra_INST_COMPLEX_DOUBLE=%s' % (
'ON' if '+instantiate_cmplx' in spec else 'OFF'
)
])
# disable due to compiler / config errors:
if spec.satisfies('%xl') or spec.satisfies('%xl_r'):
options.extend([
'-DTrilinos_ENABLE_Pamgen:BOOL=OFF',
'-DTrilinos_ENABLE_Stokhos:BOOL=OFF'
])
if sys.platform == 'darwin':
options.extend([
'-DTrilinos_ENABLE_FEI=OFF'
])
if sys.platform == 'darwin' and macos_version() >= Version('10.12'):
# use @rpath on Sierra due to limit of dynamic loader
options.append('-DCMAKE_MACOSX_RPATH=ON')
else:
options.append('-DCMAKE_INSTALL_NAME_DIR:PATH=%s' %
self.prefix.lib)
if spec.satisfies('%intel') and spec.satisfies('@12.6.2'):
# Panzer uses some std:chrono that is not recognized by Intel
# Don't know which (maybe all) Trilinos versions this applies to
# Don't know which (maybe all) Intel versions this applies to
options.extend([
'-DTrilinos_ENABLE_Panzer:BOOL=OFF'
])
# collect CXX flags:
options.extend([
'-DCMAKE_CXX_FLAGS:STRING=%s' % (' '.join(cxx_flags)),
])
# disable due to compiler / config errors:
options.extend([
'-DTrilinos_ENABLE_Pike=OFF'
])
return options
@run_after('install')
def filter_python(self):
# When trilinos is built with Python, libpytrilinos is included
# through cmake configure files. Namely, Trilinos_LIBRARIES in
# TrilinosConfig.cmake contains pytrilinos. This leads to a
# run-time error: Symbol not found: _PyBool_Type and prevents
# Trilinos to be used in any C++ code, which links executable
# against the libraries listed in Trilinos_LIBRARIES. See
# https://github.com/Homebrew/homebrew-science/issues/2148#issuecomment-103614509
# A workaround is to remove PyTrilinos from the COMPONENTS_LIST :
if '+python' in self.spec:
filter_file(r'(SET\(COMPONENTS_LIST.*)(PyTrilinos;)(.*)',
(r'\1\3'),
'%s/cmake/Trilinos/TrilinosConfig.cmake' %
self.prefix.lib)
|
mfherbst/spack
|
var/spack/repos/builtin/packages/trilinos/package.py
|
Python
|
lgpl-2.1
| 32,336
|
[
"NetCDF"
] |
d0e3a3036e672ff76bcbaeea59535b9dafeaeb32348802c5d746b604e04aa0dc
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
"""
Example: Thompson sampling for Bayesian Optimization with GPs
=============================================================
In this example we show how to implement Thompson sampling for Bayesian optimization with Gaussian processes.
The implementation is based on this tutorial: https://gdmarmerola.github.io/ts-for-bayesian-optim/
.. image:: ../_static/img/examples/thompson_sampling.png
:align: center
"""
import argparse
import matplotlib.pyplot as plt
import numpy as np
import jax
import jax.numpy as jnp
import jax.random as random
from jax.scipy import linalg
import numpyro
import numpyro.distributions as dist
from numpyro.infer import SVI, Trace_ELBO
from numpyro.infer.autoguide import AutoDelta
numpyro.enable_x64()
# the function to be minimized. At y=0 to get a 1D cut at the origin
def ackley_1d(x, y=0):
out = (
-20 * jnp.exp(-0.2 * jnp.sqrt(0.5 * (x**2 + y**2)))
- jnp.exp(0.5 * (jnp.cos(2 * jnp.pi * x) + jnp.cos(2 * jnp.pi * y)))
+ jnp.e
+ 20
)
return out
# matern kernel with nu = 5/2
def matern52_kernel(X, Z, var=1.0, length=0.5, jitter=1.0e-6):
d = jnp.sqrt(0.5) * jnp.sqrt(jnp.power((X[:, None] - Z), 2.0)) / length
k = var * (1 + d + (d**2) / 3) * jnp.exp(-d)
if jitter:
# we are assuming a noise free process, but add a small jitter for numerical stability
k += jitter * jnp.eye(X.shape[0])
return k
def model(X, Y, kernel=matern52_kernel):
# set uninformative log-normal priors on our kernel hyperparameters
var = numpyro.sample("var", dist.LogNormal(0.0, 1.0))
length = numpyro.sample("length", dist.LogNormal(0.0, 1.0))
# compute kernel
k = kernel(X, X, var, length)
# sample Y according to the standard gaussian process formula
numpyro.sample(
"Y",
dist.MultivariateNormal(loc=jnp.zeros(X.shape[0]), covariance_matrix=k),
obs=Y,
)
class GP:
def __init__(self, kernel=matern52_kernel):
self.kernel = kernel
self.kernel_params = None
def fit(self, X, Y, rng_key, n_step):
self.X_train = X
# store moments of training y (to normalize)
self.y_mean = jnp.mean(Y)
self.y_std = jnp.std(Y)
# normalize y
Y = (Y - self.y_mean) / self.y_std
# setup optimizer and SVI
optim = numpyro.optim.Adam(step_size=0.005, b1=0.5)
svi = SVI(
model,
guide=AutoDelta(model),
optim=optim,
loss=Trace_ELBO(),
X=X,
Y=Y,
)
params, _ = svi.run(rng_key, n_step)
# get kernel parameters from guide with proper names
self.kernel_params = svi.guide.median(params)
# store cholesky factor of prior covariance
self.L = linalg.cho_factor(self.kernel(X, X, **self.kernel_params))
# store inverted prior covariance multiplied by y
self.alpha = linalg.cho_solve(self.L, Y)
return self.kernel_params
# do GP prediction for a given set of hyperparameters. this makes use of the well-known
# formula for gaussian process predictions
def predict(self, X, return_std=False):
# compute kernels between train and test data, etc.
k_pp = self.kernel(X, X, **self.kernel_params)
k_pX = self.kernel(X, self.X_train, **self.kernel_params, jitter=0.0)
# compute posterior covariance
K = k_pp - k_pX @ linalg.cho_solve(self.L, k_pX.T)
# compute posterior mean
mean = k_pX @ self.alpha
# we return both the mean function and the standard deviation
if return_std:
return (
(mean * self.y_std) + self.y_mean,
jnp.sqrt(jnp.diag(K * self.y_std**2)),
)
else:
return (mean * self.y_std) + self.y_mean, K * self.y_std**2
def sample_y(self, rng_key, X):
# get posterior mean and covariance
y_mean, y_cov = self.predict(X)
# draw one sample
return jax.random.multivariate_normal(rng_key, mean=y_mean, cov=y_cov)
# our TS-GP optimizer
class ThompsonSamplingGP:
"""Adapted to numpyro from https://gdmarmerola.github.io/ts-for-bayesian-optim/"""
# initialization
def __init__(
self, gp, n_random_draws, objective, x_bounds, grid_resolution=1000, seed=123
):
# Gaussian Process
self.gp = gp
# number of random samples before starting the optimization
self.n_random_draws = n_random_draws
# the objective is the function we're trying to optimize
self.objective = objective
# the bounds tell us the interval of x we can work
self.bounds = x_bounds
# interval resolution is defined as how many points we will use to
# represent the posterior sample
# we also define the x grid
self.grid_resolution = grid_resolution
self.X_grid = np.linspace(self.bounds[0], self.bounds[1], self.grid_resolution)
# also initializing our design matrix and target variable
self.X = np.array([])
self.y = np.array([])
self.rng_key = random.PRNGKey(seed)
# fitting process
def fit(self, X, y, n_step):
self.rng_key, subkey = random.split(self.rng_key)
# fitting the GP
self.gp.fit(X, y, rng_key=subkey, n_step=n_step)
# return the fitted model
return self.gp
# choose the next Thompson sample
def choose_next_sample(self, n_step=2_000):
# if we do not have enough samples, sample randomly from bounds
if self.X.shape[0] < self.n_random_draws:
self.rng_key, subkey = random.split(self.rng_key)
next_sample = random.uniform(
subkey, minval=self.bounds[0], maxval=self.bounds[1], shape=(1,)
)
# define dummy values for sample, mean and std to avoid errors when returning them
posterior_sample = np.array([np.mean(self.y)] * self.grid_resolution)
posterior_mean = np.array([np.mean(self.y)] * self.grid_resolution)
posterior_std = np.array([0] * self.grid_resolution)
# if we do, we fit the GP and choose the next point based on the posterior draw minimum
else:
# 1. Fit the GP to the observations we have
self.gp = self.fit(self.X, self.y, n_step=n_step)
# 2. Draw one sample (a function) from the posterior
self.rng_key, subkey = random.split(self.rng_key)
posterior_sample = self.gp.sample_y(subkey, self.X_grid)
# 3. Choose next point as the optimum of the sample
which_min = np.argmin(posterior_sample)
next_sample = self.X_grid[which_min]
# let us also get the std from the posterior, for visualization purposes
posterior_mean, posterior_std = self.gp.predict(
self.X_grid, return_std=True
)
# let us observe the objective and append this new data to our X and y
next_observation = self.objective(next_sample)
self.X = np.append(self.X, next_sample)
self.y = np.append(self.y, next_observation)
# returning values of interest
return (
self.X,
self.y,
self.X_grid,
posterior_sample,
posterior_mean,
posterior_std,
)
def main(args):
gp = GP(kernel=matern52_kernel)
# do inference
thompson = ThompsonSamplingGP(
gp, n_random_draws=args.num_random, objective=ackley_1d, x_bounds=(-4, 4)
)
fig, axes = plt.subplots(
args.num_samples - args.num_random, 1, figsize=(6, 12), sharex=True, sharey=True
)
for i in range(args.num_samples):
(
X,
y,
X_grid,
posterior_sample,
posterior_mean,
posterior_std,
) = thompson.choose_next_sample(
n_step=args.num_step,
)
if i >= args.num_random:
ax = axes[i - args.num_random]
# plot training data
ax.scatter(X, y, color="blue", marker="o", label="samples")
ax.axvline(
X_grid[posterior_sample.argmin()],
color="blue",
linestyle="--",
label="next sample",
)
ax.plot(X_grid, ackley_1d(X_grid), color="black", linestyle="--")
ax.plot(
X_grid,
posterior_sample,
color="red",
linestyle="-",
label="posterior sample",
)
# plot 90% confidence level of predictions
ax.fill_between(
X_grid,
posterior_mean - posterior_std,
posterior_mean + posterior_std,
color="red",
alpha=0.5,
)
ax.set_ylabel("Y")
if i == args.num_samples - 1:
ax.set_xlabel("X")
plt.legend(
loc="upper center",
bbox_to_anchor=(0.5, -0.15),
fancybox=True,
shadow=True,
ncol=3,
)
fig.suptitle("Thompson sampling")
fig.tight_layout()
plt.show()
if __name__ == "__main__":
assert numpyro.__version__.startswith("0.9.0")
parser = argparse.ArgumentParser(description="Thompson sampling example")
parser.add_argument(
"--num-random", nargs="?", default=2, type=int, help="number of random draws"
)
parser.add_argument(
"--num-samples",
nargs="?",
default=10,
type=int,
help="number of Thompson samples",
)
parser.add_argument(
"--num-step",
nargs="?",
default=2_000,
type=int,
help="number of steps for optimization",
)
parser.add_argument("--device", default="cpu", type=str, help='use "cpu" or "gpu".')
args = parser.parse_args()
numpyro.set_platform(args.device)
main(args)
|
pyro-ppl/numpyro
|
examples/thompson_sampling.py
|
Python
|
apache-2.0
| 10,104
|
[
"Gaussian"
] |
48990e3ad9c4be7f21e1b2e84fd9113c26dde43d6e7f632ea75fa435d3bbb67a
|
#!/usr/bin/env python
"""
xds_profile_mitai.py
(c) RIKEN 2015. All rights reserved.
Author: Keitaro Yamashita
This software is released under the new BSD License; see LICENSE.
"""
import os, sys, math, re
def write_xplor_map(res, NX, NZ, filename):
ofs = open(filename, "w")
ofs.write("\n")
ofs.write(" 2 !NTITLE\n")
ofs.write("REMARKS COMMENT1\n")
ofs.write("REMARKS COMMENT2\n")
ofs.write(("%8d%8d%8d" %(NX-1,0,NX-1))*2 + "%8d%8d%8d" %(NZ-1,0,NZ-1) + "\n")
ofs.write("%12.5e%12.5e%12.5e%12.5e%12.5e%12.5e\n" % (NX,NX,NZ,90,90,90))
ofs.write("ZYX\n")
acc = []
for i in xrange(NZ):
if i > 0:
ofs.write("\n")
ofs.write("%8d\n" % i)
count = 0
for j in xrange(NX):
for k in xrange(NX):
try:
ofs.write("%12.5e" % res[i][j][k])
acc.append(res[i][j][k])
except:
print "EXCEPTION!=", i, j, k
count += 1
if count > 5:
ofs.write("\n")
count = 0
ofs.write("\n")
ofs.write("%8d\n" % -9999)
mean = float(sum(acc))/len(acc)
stddev = math.sqrt(sum(map(lambda x:(x-mean)**2, acc))/len(acc))
ofs.write("%12.4f %12.4f \n" % (mean, stddev))
# write_xplor_map()
def run(intlp):
lines = []
profile_ids = []
read_flag = False
prefix = "profile"
ofs_pml = open("load_%s.pml" % prefix, "w")
for l in open(intlp):
if "PROCESSING OF IMAGES " in l:
from_to = map(lambda x:int(x.strip()), l.replace("PROCESSING OF IMAGES","").split("..."))
print "images", from_to
profile_ids.append("image%.3d-%.3d" % tuple(from_to))
if "***** AVERAGE THREE-DIMENSIONAL PROFILE" in l or "RUN-AVERAGE OF PROFILE #" in l:
r = re.search("\*\*\*\*\* RUN-AVERAGE OF PROFILE # ([1-9]) \*\*\*\*\*", l)
if r:
print "run-average", r.group(1)
profile_ids.append("run_average_%s" % r.group(1))
read_flag = True
lines.append([])
continue
if "REFLECTION INTENSITIES INTEGRATED BY PROFILE FITTING" in l:
read_flag = False
continue
if read_flag and re.search("[^-0-9 ]", l.rstrip()):
read_flag = False
continue
if read_flag:
lines[-1].append(l)
print
for i, (ll,pid) in enumerate(zip(lines, profile_ids)):
vals = []
for l in ll:
if l.strip() == "" and (len(vals) == 0 or len(vals[-1]) != 0):
vals.append([])
continue
sp = re.findall("...", l.rstrip())
tmp = []
for s in sp:
if s.strip() == "":
tmp.append([])
else:
tmp[-1].append(int(s))
if len(tmp) > 0:
vals[-1].append(tmp)
if len(vals) > 0 and len(vals[-1]) == 0:
del vals[-1]
nx = len(vals[0][0][0])
nz = sum(map(lambda x:len(x[0]), vals))
print pid, "nx, nz=", nx, nz
data = [[[None for x in xrange(nx)] for y in xrange(nx)] for z in xrange(nz)] # [z][y][x]
z,y,x = 0,0,0
for iv, v in enumerate(vals):
y = 0
for ivv, vv in enumerate(v):
z = len(vals[0][0]) * iv
for vvv in vv:
assert len(data[z][y]) == len(vvv)
data[z][y] = vvv
#print z,y,data[z][y]
z += 1
y += 1
filename = "%s_%s_%.4d.xplor" % (prefix, pid, i+1)
write_xplor_map(data, nx, nz, filename)
ofs_pml.write("load %s, %s_%s_%d\n"%(filename, prefix, pid, i+1))
ofs_pml.write("isomesh msh_%d, %s_%s_%d\n"%(i+1, prefix, pid, i+1))
print
print "Start:"
print "pymol load_profile.pml"
# run()
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage: %s INTEGRATE.LP" % sys.argv[0]
sys.exit()
intlp_in = sys.argv[1]
run(intlp_in)
|
keitaroyam/yam_scripts
|
xds_profile_mitai.py
|
Python
|
bsd-3-clause
| 4,154
|
[
"PyMOL"
] |
6ec1b2f2a08109588f1323ce7f16ce63e32d645a2e6e1406dcc3aab3b18523e5
|
# This notebook has commands to download external data and software or parse
# external data. These shouldn't be stored on figshare because they are
# available over the web. In some instances I parse some of the external data
# into more useful formats.
import os
import shutil
import subprocess
import sys
from urllib2 import urlopen
def _download_file(url, dest):
req = urlopen(url)
with open(dest, 'w') as d:
shutil.copyfileobj(req, d)
def _download_and_gunzip(url, dest):
"""
Download a gzipped file url to dest and gunzip it.
Parameters
----------
url : str
URL for gzipped file to download.
dest : str
Full path to save gzipped file to. This file will be gunzipped.
"""
try:
os.makedirs(os.path.split(dest)[0])
except OSError:
pass
req = urlopen(url)
with open(dest, 'w') as d:
shutil.copyfileobj(req, d)
subprocess.check_call(['gunzip', dest])
def _download_and_untar(url, dest, outdir, remove_tarball=False):
"""
Download a tarball url to dest and decompress it in outdir.
Parameters
----------
url : str
URL to tarball to download.
dest : str
Full path to save tarball to.
outdir : str
Directory to save tarball to and decompress to.
remove_tarball : bool
If True, remove tarball after decompressing.
"""
req = urlopen(url)
with open(dest, 'w') as d:
shutil.copyfileobj(req, d)
subprocess.check_call('tar -xf {} -C {}'.format(dest, outdir), shell=True)
if remove_tarball:
os.remove(dest)
def make_rna_seq_metrics_files(
outdir,
gencode_gtf,
genome_fasta,
picard_path='$picard',
gtfToGenePred_path='gtfToGenePred',
):
"""Make refFlat file and rRNA interval list"""
import HTSeq
import itertools as it
rrna_fn = 'rrna.bed'
rest_fn = 'rest.gtf'
rrna = open(rrna_fn, 'w')
rest = open(rest_fn, 'w')
gene_rrna = False
gtf = it.islice(HTSeq.GFF_Reader(gencode_gtf), None)
line = gtf.next()
while line != '':
if line.type == 'gene':
if (line.attr['gene_type'] == 'rRNA' or
line.attr['gene_type'] == 'Mt_rRNA'):
gene_rrna = True
else:
gene_rrna = False
rest.write(line.get_gff_line())
else:
if gene_rrna:
if line.type == 'exon':
rrna.write('\t'.join([
line.iv.chrom, str(line.iv.start - 1), str(line.iv.end),
line.name, '.', line.iv.strand]) + '\n')
else:
rest.write(line.get_gff_line())
try:
line = gtf.next()
except StopIteration:
line = ''
rrna.close()
rest.close()
command = ('{} -ignoreGroupsWithoutExons -genePredExt -geneNameAsName2 {} '
'refFlat.tmp.txt'.format(gtfToGenePred_path, rest_fn))
subprocess.check_call(command, shell=True)
out_fn = os.path.join(outdir, 'gencode_no_rRNA.txt.gz')
command = ('paste <(cut -f 12 refFlat.tmp.txt) <(cut -f 1-10 '
'refFlat.tmp.txt) | gzip > {}'.format(out_fn))
subprocess.check_call(command, shell=True, executable='/bin/bash')
os.remove(rest_fn)
os.remove('refFlat.tmp.txt')
command = ('java -Xmx20g -jar -XX:-UseGCOverheadLimit -XX:-UseParallelGC '
'-Djava.io.tmpdir=. -jar {} CreateSequenceDictionary '
'REFERENCE={} OUTPUT=hg19.sam'.format(picard_path, genome_fasta))
subprocess.check_call(command, shell=True)
command = ('java -Xmx20g -jar -XX:-UseGCOverheadLimit -XX:-UseParallelGC '
'-Djava.io.tmpdir=. -jar {} BedToIntervalList '
'I={} SEQUENCE_DICTIONARY=hg19.sam OUTPUT={}'.format(
picard_path, rrna_fn, os.path.join(outdir, 'rrna.interval')))
subprocess.check_call(command, shell=True)
os.remove(rrna_fn)
os.remove('hg19.sam')
def download_igvtools(outdir):
src = ('http://data.broadinstitute.org/igv/projects/downloads/'
'igvtools_2.3.55.zip')
dest = os.path.join(outdir, 'igvtools_2.3.55.zip')
req = urlopen(src)
with open(dest, 'w') as f:
shutil.copyfileobj(req, f)
subprocess.check_call(['unzip', '-d', outdir, dest])
def download_grasp_database(outdir):
src = ('https://s3.amazonaws.com/NHLBI_Public/GRASP/GraspFullDataset2.zip')
dest = os.path.join(outdir, 'GraspFullDataset2.zip')
req = urlopen(src)
with open(dest, 'w') as f:
shutil.copyfileobj(req, f)
subprocess.check_call(['unzip', '-d', outdir, dest])
os.rename(os.path.join(outdir, 'GRASP2fullDataset'),
os.path.join(outdir, 'GRASP2fullDataset.tsv'))
os.remove(dest)
def download_nhgri_gwas_catalog(outdir):
src = ('https://www.ebi.ac.uk/gwas/api/search/downloads/alternative')
dest = os.path.join(outdir, 'nhgri_gwas_catalog.tsv')
_download_file(src, dest)
def download_encode_blacklist(outdir):
src = ('https://www.encodeproject.org/files/ENCFF001TDO/@@download/'
'ENCFF001TDO.bed.gz')
dest = os.path.join(outdir, 'encode_blacklist.bed.gz')
_download_and_gunzip(src, dest)
def download_blat(outdir):
src = 'http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64.v287/blat/blat'
dest = os.path.join(outdir, 'blat')
_download_file(src, dest)
subprocess.check_call(['chmod', '755', '{}'.format(dest)])
def download_weblogo(outdir):
"""
Download weblogo.
Parameters
----------
outdir : str
Directory to save weblogo to.
"""
src = ('http://weblogo.berkeley.edu/release/weblogo.2.8.2.tar.gz')
dest = os.path.join(outdir, 'weblogo')
_download_and_untar(src, dest, outdir)
def download_epacts(outdir):
"""
Download and install EPACTS. bin is put in the source directory. If you have
a specific version of R you are using with a project, you should probably
have that R in your path. I think EPACTS may install an R package into
whatever R it finds in your path although I'm not sure about this.
Parameters
----------
outdir : str
Directory to save EPACTS to.
"""
src = ('http://csg.sph.umich.edu/kang/epacts/download/EPACTS-3.2.6.tar.gz')
dest = os.path.join(outdir, 'EPACTS-3.2.6.tar.gz')
_download_and_untar(src, dest, outdir)
cwd = os.getcwd()
edir = os.path.join(outdir, 'EPACTS-3.2.6')
os.chdir(edir)
subprocess.check_call('./configure --prefix={}'.format(edir), shell=True)
subprocess.check_call('make > EPACTS_make.out 2> EPACTS_make.err',
shell=True)
subprocess.check_call(('make install > EPACTS_make_install.out 2> '
'EPACTS_make_install.err'), shell=True)
os.chdir(cwd)
def download_rsem(outdir, lncurses=False):
"""
Download RSEM.
Parameters
----------
outdir : str
Directory to save RSEM.
lncurses : bool
Set to true to use lncurses rather than lcurses to build RSEM. See
http://seqanswers.com/forums/showthread.php?t=6669 for more information.
"""
src = ('http://deweylab.biostat.wisc.edu/rsem/src/rsem-1.2.20.tar.gz')
dest = os.path.join(outdir, 'rsem-1.2.20.tar.gz')
_download_and_untar(src, dest, outdir)
cwd = os.getcwd()
os.chdir(os.path.join(outdir, 'rsem-1.2.20'))
if lncurses:
f = open(os.path.join('sam', 'Makefile'), 'r')
lines = f.read().replace('lcurses', 'lncurses')
f.close()
f = open(os.path.join('sam', 'Makefile'), 'w')
f.write(lines)
f.close()
subprocess.check_call('make')
os.chdir(cwd)
def download_fastx_toolkit(outdir):
"""
Download FASTX Toolkit.
Parameters
----------
outdir : str
Directory to save FASTX Toolkit.
"""
src = ('https://github.com/agordon/fastx_toolkit/releases/download/'
'0.0.14/fastx_toolkit-0.0.14.tar.bz2')
dest = os.path.join(outdir, 'fastx_toolkit-0.0.14.tar.bz2')
_download_and_untar(src, dest, outdir)
def download_fastqc(outdir):
"""
Download Gencode GTF.
Parameters
----------
outdir : str
Directory to save Gencode GTF to.
"""
src = ('http://www.bioinformatics.babraham.ac.uk/projects/fastqc/'
'fastqc_v0.11.2.zip')
dest = os.path.join(outdir, 'fastqc_v0.11.2.zip')
req = urlopen(src)
with open(dest, 'w') as f:
shutil.copyfileobj(req, f)
subprocess.check_call(['unzip', '-d', outdir, dest])
def download_snpeff(outdir):
"""
Download snpEff.
Parameters
----------
outdir : str
Directory to save snpEff to.
"""
url = ('http://sourceforge.net/projects/snpeff/'
'files/snpEff_v4_1g_core.zip/download')
dest = os.path.join(outdir, 'snpEff_v4_1g_core.zip')
req = urlopen(url)
with open(dest, 'w') as f:
shutil.copyfileobj(req, f)
subprocess.check_call(['unzip', '-d', outdir, dest])
def download_vcftools(outdir):
"""
Download and compile vcftools.
Parameters
----------
outdir : str
Directory to save vcftools to.
"""
url = ('http://sourceforge.net/projects/vcftools/'
'files/vcftools_0.1.12b.tar.gz/download')
dest = os.path.join(outdir, 'vcftools_0.1.12b.tar.gz')
_download_and_untar(url, dest, outdir)
cwd = os.getcwd()
os.chdir(os.path.join(outdir, 'vcftools_0.1.12b'))
subprocess.check_call('make')
os.chdir(cwd)
def download_subread(outdir):
"""
Download Subread. Includes featureCounts.
Parameters
----------
outdir : str
Directory to save Subread to.
"""
url = ('http://sourceforge.net/projects/subread/files/subread-1.4.6/'
'subread-1.4.6-Linux-x86_64.tar.gz/download')
dest = os.path.join(outdir,
'subread-1.4.6-Linux-x86_64.tar.gz')
_download_and_untar(url, dest, outdir)
def download_bcftools(outdir):
"""
Download and compile bcftools.
Parameters
----------
outdir : str
Directory to save bcftools to.
"""
url = ('https://github.com/samtools/bcftools/releases/download/1.2/'
'bcftools-1.2.tar.bz2')
dest = os.path.join(outdir, 'bcftools-1.2.tar.bz2')
_download_and_untar(url, dest, outdir)
cwd = os.getcwd()
edir = os.path.join(outdir, 'bcftools-1.2')
os.chdir(edir)
subprocess.check_call('make > make.out 2> make.err', shell=True)
subprocess.check_call(('make prefix={} install > make_install.out 2> '
'make_install.err'.format(edir)), shell=True)
os.chdir(cwd)
def download_htslib(outdir):
"""
Download and compile htslib.
Parameters
----------
outdir : str
Directory to save htslib to.
"""
url = ('https://github.com/samtools/htslib/releases/download/1.2.1/'
'htslib-1.2.1.tar.bz2')
dest = os.path.join(outdir, 'htslib-1.2.1.tar.bz2')
_download_and_untar(url, dest, outdir)
cwd = os.getcwd()
edir = os.path.join(outdir, 'htslib-1.2.1')
os.chdir(edir)
subprocess.check_call('make > make.out 2> make.err', shell=True)
subprocess.check_call(('make prefix={} install > make_install.out 2> '
'make_install.err'.format(edir)), shell=True)
os.chdir(cwd)
def download_samtools(outdir, lncurses=False):
"""
Download and compile samtools.
Parameters
----------
outdir : str
Directory to save samtools to.
lncurses : bool
Set to true to use lncurses rather than lcurses to build samtools. See
http://seqanswers.com/forums/showthread.php?t=6669 for more information.
"""
url = ('https://github.com/samtools/samtools/releases/download/1.2/'
'samtools-1.2.tar.bz2')
dest = os.path.join(outdir, 'samtools-1.2.tar.bz2')
_download_and_untar(url, dest, outdir)
cwd = os.getcwd()
edir = os.path.join(outdir, 'samtools-1.2')
os.chdir(edir)
if lncurses:
f = open('Makefile', 'r')
lines = f.read().replace('lcurses', 'lncurses')
f.close()
f = open('Makefile', 'w')
f.write(lines)
f.close()
subprocess.check_call('make > make.out 2> make.err', shell=True)
subprocess.check_call(('make prefix={} install > make_install.out 2> '
'make_install.err'.format(edir)), shell=True)
os.chdir(cwd)
def download_wgEncodeCrgMapabilityAlign100mer(outdir):
"""
Download wgEncodeCrgMapabilityAlign100mer mappability bigwig track.
Parameters
----------
outdir : str
Directory to save wgEncodeCrgMapabilityAlign100mer mappability bigwig
track to.
"""
req = urlopen(('http://hgdownload.cse.ucsc.edu/goldenPath/hg19/encodeDCC/'
'wgEncodeMapability/wgEncodeCrgMapabilityAlign100mer.bigWig'))
dest = os.path.join(outdir, 'wgEncodeCrgMapabilityAlign100mer.bigWig')
with open(dest, 'w') as d:
shutil.copyfileobj(req, d)
def download_hg19(
outdir,
create_sorted=True,
samtools_path='samtools',
picard_path='$picard',
twoBitToFa_path='twoBitToFa',
):
"""
Download hg19, create index, and sequence dictionary files.
Parameters
----------
outdir : str
Directory to save hg19 fasta to.
create_sorted : bool
Create hg19_sorted.fa version that has chromosomes ordered
lexicographically as expected by bedtools. The fasta is indexed and a
sequence dictionary is created as well.
samtools_path : str
Path to Samtools executable needed to index fasta.
"""
try:
os.makedirs(outdir)
except OSError:
pass
req = urlopen(
'http://hgdownload.cse.ucsc.edu/goldenPath/hg19/bigZips/hg19.2bit')
dest = os.path.join(outdir, 'hg19.2bit')
with open(dest, 'w') as d:
shutil.copyfileobj(req, d)
cwd = os.getcwd()
os.chdir(outdir)
# req = urlopen(
# 'http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/twoBitToFa')
# dest = os.path.join(outdir, 'twoBitToFa')
# with open(dest, 'w') as d:
# shutil.copyfileobj(req, d)
# subprocess.check_call('chmod 755 {}'.format(os.path.join(outdir,
# 'twoBitToFa')),
# shell=True)
# Convert from 2bit to fasta.
subprocess.check_call('{} hg19.2bit hg19.fa'.format(twoBitToFa_path),
shell=True)
# Index fasta.
subprocess.check_call('{} faidx hg19.fa'.format(samtools_path), shell=True)
# Create sequence dictionary.
c = ('java -jar {} CreateSequenceDictionary R=hg19.fa '
'O=hg19.dict'.format(picard_path))
subprocess.check_call(c, shell=True)
if create_sorted:
# Sort fasta lexographically. This is the native ordering for bedtools
# etc.
c = ('grep \\> hg19.fa | cut -d ">" -f 2 | sort | '
'awk \'{print $1".fa"}\' > order.txt')
subprocess.check_call(c, shell=True)
c = ('awk \'{if (substr($1, 1, 1) == ">") {x=substr($1, 2)}} '
'{print $0 >> x".fa"}\' hg19.fa')
subprocess.check_call(c, shell=True)
c = ('xargs -d\'\\n\' cat <order.txt >> hg19_sorted.fa')
subprocess.check_call(c, shell=True)
subprocess.check_call('rm chr*.fa', shell=True)
subprocess.check_call('rm order.txt', shell=True)
# Index sorted fasta.
subprocess.check_call('{} faidx hg19_sorted.fa'.format(samtools_path),
shell=True)
# Create sequence dictionary.
c = ('java -jar {} CreateSequenceDictionary R=hg19_sorted.fa '
'O=hg19_sorted.dict'.format(picard_path))
subprocess.check_call(c, shell=True)
os.chdir(cwd)
def download_htsjdk(outdir):
"""
Download STAR aligner.
Parameters
----------
outdir : str
Directory to save STAR to.
"""
url = 'https://github.com/samtools/htsjdk/tarball/master'
dest = os.path.join(outdir, 'samtools-htsjdk-1.127-10-g18192d8.tar.gz')
_download_and_untar(url, dest, outdir)
cwd = os.getcwd()
os.chdir(os.path.join(outdir, 'samtools-htsjdk-18192d8'))
subprocess.check_call(['ant', 'htsjdk-jar'])
os.chdir(cwd)
def download_star(outdir):
"""
Download STAR aligner.
Parameters
----------
outdir : str
Directory to save STAR to.
"""
url = 'https://github.com/alexdobin/STAR/archive/STAR_2.4.0h.tar.gz'
dest = os.path.join(outdir, 'STAR_2.4.0h.tar.gz')
_download_and_untar(url, dest, outdir)
def make_star_index(
outdir,
genome,
gtf,
threads=1,
star_path='STAR'):
"""
Make index for STAR aligner.
Parameters
----------
outdir : str
Directory to save index to.
genome : str
Path to genome fasta file.
gtf : str
Path to GTF gene annotation.
threads : int
Number of threads to use to make the index.
star_path : str
Path to STAR executable.
"""
try:
os.makedirs(outdir)
except OSError:
pass
subprocess.check_call([star_path, '--runThreadN', str(threads),
'--runMode', 'genomeGenerate',
'--sjdbOverhang', '250', '--genomeDir',
outdir, '--genomeFastaFiles',
genome, '--sjdbGTFfile', gtf],
shell=False)
cwd = os.getcwd()
if os.path.realpath(cwd) != os.path.realpath(outdir):
shutil.move('Log.out', outdir)
def download_picard(outdir):
"""
Download Picard tools.
Parameters
----------
outdir : str
Directory to save Picard tools to.
"""
url = 'https://github.com/broadinstitute/picard/archive/1.131.tar.gz'
dest = os.path.join(outdir, '1.131.tar.gz')
_download_and_untar(url, dest, outdir)
cwd = os.getcwd()
os.chdir(os.path.join(outdir, 'picard-1.131'))
subprocess.check_call('ant -lib lib/ant clone-htsjdk package-commands',
shell=True)
os.chdir(cwd)
def download_bedtools(outdir):
"""
Download Bedtools.
Parameters
----------
outdir : str
Directory to save Bedtools to.
"""
url = ('https://github.com/arq5x/bedtools2/releases/download/v2.23.0/'
'bedtools-2.23.0.tar.gz')
dest = os.path.join(outdir, 'bedtools-2.23.0.tar.gz')
_download_and_untar(url, dest, outdir)
os.rename(os.path.join(outdir, 'bedtools2'),
os.path.join(outdir, 'bedtools2-2.23.0'))
cwd = os.getcwd()
os.chdir(os.path.join(outdir, 'bedtools2-2.23.0'))
subprocess.check_call('make')
os.chdir(cwd)
raw_input('\n\n\nYou should add\n' +
os.path.join(outdir, 'bedtools2-2.23.0', 'bin') +
'\nto your path when using this environment so\n'
'pybedtools uses the correct bedtools installation.\n'
'Press any key to continue.\n\n\n')
def download_r(outdir):
"""
Download R.
Parameters
----------
outdir : str
Directory to save R to.
"""
rbase = 'R-3.1.1'
url = 'http://cran.stat.ucla.edu/src/base/R-3/R-3.1.1.tar.gz'
dest = os.path.join(outdir, '{}.tar.gz'.format(rbase))
_download_and_untar(url, dest, outdir)
cwd = os.getcwd()
os.chdir(outdir)
shutil.move(rbase, '{}-source'.format(rbase))
rpath = os.getcwd()
os.chdir('{}-source'.format(rbase))
subprocess.check_call(('./configure ' +
'--enable-R-shlib ' +
'--prefix={}/R-3.1.1 '.format(rpath) +
'> R_configure.out ',
'2> R_configure.err'),
shell=True)
subprocess.check_call('make > R_make.out 2> R_make.err', shell=True)
# subprocess.check_call('make check > R_check.out 2> R_check.err',
# shell=True)
subprocess.check_call('make install > R_install.out 2> R_install.err',
shell=True)
os.chdir(cwd)
# # I execute the following python file when starting
# # the notebook server to set the library path:
# #
# # import os
# # import subprocess
# #
# # import projectpy as ppy
# #
# # spath = os.path.join(ppy.root, 'software')
# # subprocess.check_call('export LDFLAGS="-Wl,-rpath,' +
# # '{}/R-3.1.1/lib64/R/lib"'.format(spath),
# # shell=True)
# # subprocess.check_call('export LD_LIBRARY_PATH="' +
# # '{}/R-3.1.1/lib64/R/lib:$LD_LIBRARY_PATH"'.format(spath),
# # shell=True)
#
# # In[9]:
#
# try:
# import rpy2.robjects as r
# except:
# cwd = os.getcwd()
# os.chdir(os.path.join(ppy.root, 'software'))
# spath = os.getcwd()
# """
# subprocess.check_call('export LDFLAGS="-Wl,-rpath,' +
# '{}/R-3.1.1/lib64/R/lib"'.format(spath),
# shell=True)
# subprocess.check_call('export LD_LIBRARY_PATH="' +
# '{}/R-3.1.1/lib64/R/lib:$LD_LIBRARY_PATH"'.format(spath),
# shell=True)
# """
# req = urlopen('https://pypi.python.org/packages/'
# 'source/r/rpy2/rpy2-2.4.2.tar.gz')
# dest = os.path.join(ppy.root, 'software', 'rpy2-2.4.2.tar.gz')
# with open(dest, 'w') as d:
# shutil.copyfileobj(req, d)
# subprocess.check_call('tar -xf {} -C {}'.format(dest, os.path.split(dest)[0]),
# shell=True)
# os.chdir('rpy2-2.4.2')
# subprocess.check_call('python setup.py build --r-home ' +
# '{}/R-3.1.1 install >& '.format(spath) +
# 'rpy2-2.4.2_R-3.1.1_log.txt', shell=True)
# os.chdir(cwd)
#
#
# # Note that you may have to restart the notebook at this
# # point if you are installing rpy2 for the first time so
# # that python knows the package exists.
#
# # In[11]:
#
# get_ipython().magic(u'load_ext rpy2.ipython')
#
#
# # # Bioconductor
# #
# # I'm going to install Bioconductor and some packages
# # that I'll use.
#
# # In[15]:
#
# get_ipython().run_cell_magic(u'R', u'', u'\nsource("http://bioconductor.org/biocLite.R")\nbiocLite(ask=FALSE)\nbiocLite("DEXSeq", ask=FALSE)\nbiocLite("Gviz", ask=FALSE)\nbiocLite("BiocParallel", ask=FALSE)')
def download_install_rpy2(r_path, outdir):
"""
Download and install rpy2. R must be installed and the LDFLAGS and
LD_LIBRARY_PATH must be set. If they are not set, you can run the method to
get instructions on how to set them.
Parameters
----------
r_path : str
Path to R executable. The R shared library will be inferred based on
this.
"""
cwd = os.getcwd()
# Path to R shared library.
s_path = os.path.join(os.path.split(os.path.split(r_path)[0])[0], 'lib64',
'R', 'lib')
lines = ('\n\n\nrpy2 has to be compiled against the version of R you\'ve \n'
'installed here. Note that you have to set ld flags and library\n'
'paths for installation. If you haven\'t already, paste the\n'
'following at the command line:\n\n')
sys.stdout.write(lines)
command = 'export PATH="{}:$PATH"\n'.format(os.path.split(r_path)[0])
sys.stdout.write(command)
command = 'export LDFLAGS="-Wl,-rpath,{}"\n'.format(s_path)
sys.stdout.write(command)
command = 'export LD_LIBRARY_PATH="{}:$LD_LIBRARY_PATH"\n\n'.format(s_path)
sys.stdout.write(command)
raw_input('If these environment variables are set correctly, press any\n'
'key to continue. Otherwise, exit and set them, then rerun.\n'
'You need to set the PATH and LD_LIBRARY_PATH for each bash\n'
'environment you want to use rpy2 in.\n\n')
sys.stdout.write('Downloading and installing rpy2.\n\n')
sys.stdout.flush()
url = ('https://pypi.python.org/packages/source/r/rpy2/rpy2-2.7.3.tar.gz')
dest = os.path.join(outdir, 'rpy2-2.7.3.tar.gz')
_download_and_untar(url, dest, outdir)
os.chdir(os.path.join(outdir, 'rpy2-2.7.3'))
r_home = os.path.split(os.path.split(r_path)[0])[0]
subprocess.check_call('python setup.py build --r-home ' +
'{} install >& '.format(r_home) +
'rpy2_install_log.txt', shell=True)
os.chdir(cwd)
def download_gencode_gtf(outdir):
"""
Download Gencode GTF.
Parameters
----------
outdir : str
Directory to save Gencode GTF to.
"""
src = ('ftp://ftp.sanger.ac.uk/pub/gencode/Gencode_human/release_19/'
'gencode.v19.annotation.gtf.gz')
dest = os.path.join(outdir, 'gencode.v19.annotation.gtf.gz')
_download_and_gunzip(src, dest)
def download_kheradpour_motifs(outdir):
req = urlopen('http://compbio.mit.edu/encode-motifs/motifs.txt')
dest = os.path.join(outdir, 'motifs.txt')
with open(dest, 'w') as d:
shutil.copyfileobj(req, d)
def download_gtfToGenePred(outdir):
req = urlopen('http://hgdownload.cse.ucsc.edu/admin/exe/'
'linux.x86_64/gtfToGenePred')
dest = os.path.join(outdir, 'gtfToGenePred')
with open(dest, 'w') as d:
shutil.copyfileobj(req, d)
subprocess.check_call(['chmod', '755', '{}'.format(dest)])
def download_liftOver(outdir):
req = urlopen('http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/'
'liftOver')
dest = os.path.join(outdir, 'liftOver')
with open(dest, 'w') as d:
shutil.copyfileobj(req, d)
subprocess.check_call(['chmod', '755', '{}'.format(dest)])
def download_bigWigAverageOverBed(outdir):
req = urlopen('http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/'
'bigWigAverageOverBed')
dest = os.path.join(outdir, 'bigWigAverageOverBed')
with open(dest, 'w') as d:
shutil.copyfileobj(req, d)
subprocess.check_call(['chmod', '755', '{}'.format(dest)])
def download_ucsc_tools(outdir):
"""Download all UCSC executables except blat."""
import re
url = 'http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64'
s = urlopen(url).read()
compiled = re.compile('<a href=".*">')
res = compiled.findall(s)
res = [x.split('"')[-2] for x in res[2:]]
res = [x for x in res if '/' not in x]
to_download = ['{}/{}'.format(url, x) for x in res]
for src in to_download:
dest = os.path.join(outdir, os.path.split(src)[1])
_download_file(src, dest)
subprocess.check_call(['chmod', '755', '{}'.format(dest)])
def download_bedGraphToBigWig(outdir):
req = urlopen('http://hgdownload.cse.ucsc.edu/admin/exe/'
'linux.x86_64/bedGraphToBigWig')
dest = os.path.join(outdir, 'bedGraphToBigWig')
with open(dest, 'w') as d:
shutil.copyfileobj(req, d)
subprocess.check_call(['chmod', '755', '{}'.format(dest)])
def install_bioconductor_dependencies():
"""
Installs bioconductor and some bioconductor packages into R using rpy2.
Packages installed are currently DESeq2 and DEXSeq.
"""
try:
# Have to import readline due to some weirdness with installing rpy2 in
# a conda environment etc.
# See https://github.com/ContinuumIO/anaconda-issues/issues/152.
import readline
import rpy2.robjects as robjects
except ImportError:
sys.stderr.write('rpy2 not installed.\n')
sys.exit(1)
robjects.r('source("http://bioconductor.org/biocLite.R")')
robjects.r('biocLite(ask=FALSE)')
robjects.r('biocLite("DESeq2", ask=FALSE)')
robjects.r('biocLite("DEXSeq", ask=FALSE)')
robjects.r('biocLite("Gviz", ask=FALSE)')
robjects.r('biocLite("goseq", ask=FALSE)')
robjects.r('biocLite("org.Hs.eg.db", ask=FALSE)')
robjects.r('biocLite("qvalue", ask=FALSE)')
def make_dexseq_annotation(gtf, out_gff):
try:
# Have to import readline due to some weirdness with installing rpy2 in
# a conda environment etc.
# See https://github.com/ContinuumIO/anaconda-issues/issues/152.
import readline
import rpy2.robjects as robjects
except ImportError:
sys.stderr.write('rpy2 not installed.\n')
sys.exit(1)
robjects.r('library(DEXSeq)')
scripts = robjects.r('system.file("python_scripts", package="DEXSeq")')
g = scripts.items()
scripts_path = g.next()[1]
script = os.path.join(scripts_path, 'dexseq_prepare_annotation.py')
command = 'python {} -r no {} {}'.format(script, gtf, out_gff)
subprocess.check_call(command, shell=True)
def rsem_prepare_reference(
fasta,
name,
gtf=None,
rsem_prepare_reference_path='rsem-prepare-reference',
):
"""
Run rsem-prepare-reference to make an RSEM reference.
Parameters
----------
fasta : str
Path to fasta to provide to RSEM. Should be a transcriptome fasta unless
gtf is specified. If gtf is specified, then this is a genome fasta.
name : str
Name of the RSEM reference.
rsem_path : str
Path to directory with RSEM executables.
gtf : str
Path to GTF file that defines genes and transcripts to provide to RSEM.
"""
command = '{} {} {}'.format(rsem_prepare_reference_path, fasta, name)
if gtf:
command += ' --gtf {}'.format(gtf)
subprocess.check_call(command, shell=True)
def download_roadmap_gwas(outdir):
"""
Download GWAS data compiled for Roadmap project.
"""
import re
try:
os.makedirs(outdir)
except OSError:
pass
url = 'http://www.broadinstitute.org/~anshul/roadmap/gwas'
s = urlopen(url).read()
compiled = re.compile('"EUR.*txt\.gz"')
res = [x.strip('"') for x in compiled.findall(s)]
to_download = ['{}/{}'.format(url, x) for x in res]
for src in to_download:
dest = os.path.join(outdir, os.path.split(src)[1])
_download_and_gunzip(src, dest)
def download_roadmap_25_state_chromatin_model(outdir):
"""
Download 25 state chromatin model from Roadmap Epigenomics. There is a bed
file for each cell type as well as an annotation file with state information
and file to convert the roadmap ID's to human readable cell types. Bed files
are sorted so they work with bedtools -sorted option.
Parameters
----------
outdir : str
Directory to save files to.
"""
import re
try:
os.makedirs(outdir)
except OSError:
pass
pattern = 'href="E\d\d\d_25_imputed12marks_mnemonics.bed.gz"'
compiled = re.compile(pattern)
url = ('http://egg2.wustl.edu/roadmap/data/byFileType'
'/chromhmmSegmentations/ChmmModels/imputed12marks/jointModel/final')
s = urlopen(url).read()
res = compiled.findall(s)
assert len(res) is 127
res = [x[5:].strip('"') for x in res]
to_download = ['{}/{}'.format(url, x) for x in res]
for src in to_download:
dest = os.path.join(outdir, os.path.split(src)[1])
sorted_dest = '{}_sorted.bed'.format(dest.strip('.bed.gz'))
_download_and_gunzip(src, dest)
subprocess.check_call('sort -k 1,1 -k2,2n {} > {}'.format(
dest.strip('.gz'), sorted_dest), shell=True)
os.remove(dest.strip('.gz'))
to_download = []
to_download.append('http://egg2.wustl.edu/roadmap/data/byFileType/'
'chromhmmSegmentations/ChmmModels/imputed12marks/'
'jointModel/final/EIDlegend.txt')
to_download.append('http://egg2.wustl.edu/roadmap/data/byFileType/'
'chromhmmSegmentations/ChmmModels/imputed12marks/'
'jointModel/final/annotation_25_imputed12marks.txt')
for src in to_download:
dest = os.path.join(outdir, os.path.split(src)[1])
_download_file(src, dest)
# Now I'll make a more useful annotation file that has better column names
# and a few extra columns.
import numpy as np
import pandas as pd
state_annot = pd.read_table(
os.path.join(outdir, 'annotation_25_imputed12marks.txt.tsv'),
index_col=0)
state_annot.index.name = 'state_number'
state_annot.columns = [x.lower().replace(' ', '_') for x in
state_annot.columns]
state_annot['name'] = (state_annot.index.astype(str).values + '_' +
state_annot.mnemonic)
state_annot['color_code_norm'] = state_annot['color_code'].apply(
lambda x: np.array([int(x) for x in x.split(',')]))
state_annot['color_code_norm'] = state_annot['color_code_norm'].apply(
lambda x: x / 255.)
state_annot['color_code_norm'] = state_annot.color_code_norm.apply(
lambda x: ','.join([str(y) for y in x]))
state_annot.to_csv(os.path.join(outdir, 'frazer_parsed_annotation.tsv'),
sep='\t')
def download_roadmap_18_state_chromatin_model(outdir):
"""
Download 18 state chromatin model from Roadmap Epigenomics. There is a bed
file for each cell type as well as an annotation file with state information
and file to convert the roadmap ID's to human readable cell types. Bed files
are sorted so they work with bedtools -sorted option.
Parameters
----------
outdir : str
Directory to save files to.
"""
import re
try:
os.makedirs(outdir)
except OSError:
pass
pattern = 'href="E\d\d\d_18_core_K27ac_mnemonics.bed.gz"'
compiled = re.compile(pattern)
url = ('http://egg2.wustl.edu/roadmap/data/byFileType/'
'chromhmmSegmentations/ChmmModels/core_K27ac/jointModel/final')
s = urlopen(url).read()
res = compiled.findall(s)
res = [x[5:].strip('"') for x in res]
to_download = ['{}/{}'.format(url, x) for x in res]
for src in to_download:
dest = os.path.join(outdir, os.path.split(src)[1])
sorted_dest = '{}_sorted.bed'.format(dest.strip('.bed.gz'))
_download_and_gunzip(src, dest)
subprocess.check_call('sort -k 1,1 -k2,2n {} > {}'.format(
dest.strip('.gz'), sorted_dest), shell=True)
os.remove(dest.strip('.gz'))
to_download = []
to_download.append('http://egg2.wustl.edu/roadmap/data/byFileType/'
'chromhmmSegmentations/ChmmModels/core_K27ac/'
'jointModel/final/browserlabelmap_18_core_K27ac.tab')
to_download.append('http://egg2.wustl.edu/roadmap/data/byFileType/'
'chromhmmSegmentations/ChmmModels/core_K27ac/jointModel/'
'final/colormap_18_core_K27ac.tab')
to_download.append('http://egg2.wustl.edu/roadmap/data/byFileType/'
'chromhmmSegmentations/ChmmModels/imputed12marks/'
'jointModel/final/EIDlegend.txt')
for src in to_download:
dest = os.path.join(outdir, os.path.split(src)[1])
_download_file(src, dest)
# The 18 state model doesn't have a nice annotation file like the 25 state
# model (although the table is on the website), so I'm just putting the info
# in here and I'll write the file myself.
columns = [u'STATE NO.', u'MNEMONIC', u'DESCRIPTION', u'COLOR NAME',
u'COLOR CODE']
vals = [[1, 'TssA', 'Active TSS', 'Red', '255,0,0'],
[2, 'TssFlnk', 'Flanking TSS', 'Orange Red', '255,69,0'],
[3, 'TssFlnkU', 'Flanking TSS Upstream', 'Orange Red', '255,69,0'],
[4, 'TssFlnkD', 'Flanking TSS Downstream', 'Orange Red', '255,69,0'],
[5, 'Tx', 'Strong transcription', 'Green', '0,128,0'],
[6, 'TxWk', 'Weak transcription', 'DarkGreen', '0,100,0'],
[7, 'EnhG1', 'Genic enhancer1', 'GreenYellow', '194,225,5'],
[8, 'EnhG2', 'Genic enhancer2', 'GreenYellow', '194,225,5'],
[9, 'EnhA1', 'Active Enhancer 1', 'Orange', '255,195,77'],
[10, 'EnhA2', 'Active Enhancer 2', 'Orange', '255,195,77'],
[11, 'EnhWk', 'Weak Enhancer', 'Yellow', '255,255,0'],
[12, 'ZNF/Rpts', 'ZNF genes & repeats', 'Medium Aquamarine',
'102,205,170'],
[13, 'Het', 'Heterochromatin', 'PaleTurquoise', '138,145,208'],
[14, 'TssBiv', 'Bivalent/Poised TSS', 'IndianRed', '205,92,92'],
[15, 'EnhBiv', 'Bivalent Enhancer', 'DarkKhaki', '189,183,107'],
[16, 'ReprPC', 'Repressed PolyComb', 'Silver', '128,128,128'],
[17, 'ReprPC', 'Weak Repressed PolyComb', 'Gainsboro', '192,192,192'],
[18, 'Quies', 'Quiescent/Low', 'White', '255,255,255']]
import numpy as np
import pandas as pd
df = pd.DataFrame(vals, columns=columns)
df.to_csv(os.path.join(outdir, 'frazer_annotation.tsv'), index=None,
sep='\t')
# Now I'll make a more useful annotation file that has better column names
# and a few extra columns.
state_annot = pd.read_table(os.path.join(outdir, 'frazer_annotation.tsv'),
index_col=0)
state_annot.index.name = 'state_number'
state_annot.columns = [x.lower().replace(' ', '_') for x in
state_annot.columns]
state_annot['name'] = (state_annot.index.astype(str).values + '_' +
state_annot.mnemonic)
state_annot['color_code_norm'] = state_annot['color_code'].apply(
lambda x: np.array([int(x) for x in x.split(',')]))
state_annot['color_code_norm'] = state_annot['color_code_norm'].apply(
lambda x: x / 255.)
state_annot['color_code_norm'] = state_annot.color_code_norm.apply(
lambda x: ','.join([str(y) for y in x]))
state_annot.to_csv(os.path.join(outdir, 'frazer_parsed_annotation.tsv'),
sep='\t')
def download_roadmap_15_state_chromatin_model(outdir):
"""
Download 15 state chromatin model from Roadmap Epigenomics. There is a bed
file for each cell type as well as an annotation file with state information
and file to convert the roadmap ID's to human readable cell types. Bed files
are sorted so they work with bedtools -sorted option.
Parameters
----------
outdir : str
Directory to save files to.
"""
try:
os.makedirs(outdir)
except OSError:
pass
src = ('http://egg2.wustl.edu/roadmap/data/byFileType/chromhmmSegmentations'
'/ChmmModels/coreMarks/jointModel/final/all.mnemonics.bedFiles.tgz')
dest = os.path.join(outdir, os.path.split(src)[1])
_download_and_untar(src, dest, outdir, remove_tarball=True)
import glob
fns = glob.glob(os.path.join(outdir, '*bed.gz'))
for fn in fns:
subprocess.check_call(['gunzip', fn])
sorted_fn = '{}_sorted.bed'.format(fn.strip('.bed.gz'))
subprocess.check_call('sort -k 1,1 -k2,2n {} > {}'.format(
fn.strip('.gz'), sorted_fn), shell=True)
os.remove(fn.strip('.gz'))
to_download = []
to_download.append('http://egg2.wustl.edu/roadmap/data/byFileType/'
'chromhmmSegmentations/ChmmModels/imputed12marks/'
'jointModel/final/EIDlegend.txt')
to_download.append('http://egg2.wustl.edu/roadmap/data/byFileType/'
'chromhmmSegmentations/ChmmModels/coreMarks/jointModel'
'/final/labelmap_15_coreMarks.tab')
to_download.append('http://egg2.wustl.edu/roadmap/data/byFileType/'
'chromhmmSegmentations/ChmmModels/coreMarks/jointModel'
'/final/colormap_15_coreMarks.tab')
for src in to_download:
dest = os.path.join(outdir, os.path.split(src)[1])
_download_file(src, dest)
# The 15 state model doesn't have a nice annotation file like the 25 state
# model (although the table is on the website), so I'm just putting the info
# in here and I'll write the file myself.
columns = [u'STATE NO.', u'MNEMONIC', u'DESCRIPTION', u'COLOR NAME',
u'COLOR CODE']
vals = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
['TssA', 'TssAFlnk', 'TxFlnk', 'Tx', 'TxWk', 'EnhG', 'Enh',
'ZNF/Rpts', 'Het', 'TssBiv', 'BivFlnk', 'EnhBiv', 'ReprPC',
'ReprPCWk', 'Quies'],
['Active TSS', 'Flanking Active TSS', "Transcr. at gene 5' and 3'",
'Strong transcription', 'Weak transcription', 'Genic enhancers',
'Enhancers', 'ZNF genes & repeats', 'Heterochromatin',
'Bivalent/Poised TSS', 'Flanking Bivalent TSS/Enh',
'Bivalent Enhancer', 'Repressed PolyComb',
'Weak Repressed PolyComb', 'Quiescent/Low'],
['Red', 'Orange Red', 'LimeGreen', 'Green', 'DarkGreen',
'GreenYellow', 'Yellow', 'Medium Aquamarine', 'PaleTurquoise',
'IndianRed', 'DarkSalmon', 'DarkKhaki', 'Silver', 'Gainsboro',
'White'],
['255,0,0', '255,69,0', '50,205,50', '0,128,0', '0,100,0',
'194,225,5', '255,255,0', '102,205,170', '138,145,208',
'205,92,92', '233,150,122', '189,183,107', '128,128,128',
'192,192,192', '255,255,255']
]
import numpy as np
import pandas as pd
df = pd.DataFrame(vals, index=columns).T
df.to_csv(os.path.join(outdir, 'frazer_annotation.tsv'), index=None,
sep='\t')
# Now I'll make a more useful annotation file that has better column names
# and a few extra columns.
state_annot = pd.read_table(os.path.join(outdir, 'frazer_annotation.tsv'),
index_col=0)
state_annot.index.name = 'state_number'
state_annot.columns = [x.lower().replace(' ', '_') for x in
state_annot.columns]
state_annot['name'] = (state_annot.index.astype(str).values + '_' +
state_annot.mnemonic)
state_annot['color_code_norm'] = state_annot['color_code'].apply(
lambda x: np.array([int(x) for x in x.split(',')]))
state_annot['color_code_norm'] = state_annot['color_code_norm'].apply(
lambda x: x / 255.)
state_annot['color_code_norm'] = state_annot.color_code_norm.apply(
lambda x: ','.join([str(y) for y in x]))
state_annot.to_csv(os.path.join(outdir, 'frazer_parsed_annotation.tsv'),
sep='\t')
|
frazer-lab/pipelines
|
cdpipelines/prepare.py
|
Python
|
mit
| 42,543
|
[
"Bioconductor",
"HTSeq"
] |
d0467b5a7384932cac76d42da7f77b853a76de3e1929e2ffb5c71a87fdec75ac
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import os
import sys
from spinnaker.run import check_run_and_monitor
from spinnaker.run import check_run_quick
from spinnaker.run import run_and_monitor
from spinnaker.run import run_quick
def get_repository_dir(name):
"""Determine the local directory that a given repository is in.
We assume that refresh_source is being run in the build directory
that contains all the repositories. Except spinnaker/ itself is not
in the build directory so special case it.
Args:
name [string]: The repository name.
"""
if name == 'spinnaker':
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
else:
return name
class SourceRepository(
collections.namedtuple('SourceRepository', ['name', 'owner'])):
"""Denotes a github repository.
Attributes:
name: The [short] name of the repository.
owner: The github user name owning the repository
"""
pass
class Refresher(object):
"""Provides branch management capabilities across Spinnaker repositories.
The Spinnaker repositories are federated across several independent
repositories. This class provides convenient support to update local
repositories from remote and vice-versa.
The origin repository is specified using --github_user option. This specifies
the github repository owner for the origin repositories. It is only relevant
when a repository needs to be cloned to establish a local repository. The
value 'upstream' can be used to indicate that the repository should be cloned
from its authoritative source as opposed to another user's fork.
When the refresher clones new repositories, it establishes an "upstream"
remote to the authoritative repository (based on hard-coded mappings)
unless the origin is the upstream. Upstream pulls are disabled (including
when the origin is the upstream) and only the master branch can be pulled
from upstream.
If --pull_branch is used then the local repositories will pull their current
branch from the origin repository. If a local repository does not yet exist,
then it will be cloned from the --github_user using the branch specified
by --pull_branch. The --pull_origin option is similar but implies that the
branch is 'master'. This is intended to perform complete updates of the
local repositories.
--push_branch (or --push_master, implying 'master' branch) will push the
local repository branch back to the origin, but only if the local repository
is in the specified branch. This is for safety to prevent accidental pushes.
It is assumed that multi-repository changes will have a common feature-branch
name, and not all repositories will be affected.
Of course, individual repositories can still be managed using explicit git
commands. This class is intended for cross-cutting management.
"""
__OPTIONAL_REPOSITORIES = [SourceRepository('citest', 'google')]
__REQUIRED_REPOSITORIES = [
SourceRepository('spinnaker', 'spinnaker'),
SourceRepository('clouddriver', 'spinnaker'),
SourceRepository('orca', 'spinnaker'),
SourceRepository('front50', 'spinnaker'),
SourceRepository('echo', 'spinnaker'),
SourceRepository('rosco', 'spinnaker'),
SourceRepository('gate', 'spinnaker'),
SourceRepository('fiat', 'spinnaker'),
SourceRepository('igor', 'spinnaker'),
SourceRepository('deck', 'spinnaker')]
@property
def pull_branch(self):
"""Gets the branch that we want to pull.
This may raise a ValueError if the specification is inconsistent.
This is determined lazily rather than at construction to be consistent
with the push_branch property.
"""
if self.__options.pull_origin:
if (self.__options.pull_branch
and self.__options.pull_branch != 'master'):
raise ValueError(
'--pull_origin is incompatible with --pull_branch={branch}'
.format(branch=self.__options.pull_branch))
return 'master'
return self.__options.pull_branch
@property
def push_branch(self):
"""Gets the branch that we want to push.
This may raise a ValueError if the specification is inconsistent.
This is determined lazily rather than at construction because the
option to push is not necessarily present depending on the use case.
"""
if self.__options.push_master:
if (self.__options.push_branch
and self.__options.push_branch != 'master'):
raise ValueError(
'--push_origin is incompatible with --push_branch={branch}'
.format(branch=self.__options.push_branch))
return 'master'
return self.__options.push_branch
def __init__(self, options):
self.__options = options
self.__extra_repositories = self.__OPTIONAL_REPOSITORIES
if options.extra_repos:
for extra in options.extra_repos.split(','):
pair = extra.split('=')
if len(pair) != 2:
raise ValueError(
'Invalid --extra_repos value "{extra}"'.format(extra=extra))
self.__extra_repositories.append(SourceRepository(pair[0], pair[1]))
def get_remote_repository_url(self, path, which='origin'):
"""Determine the repository that a given path is from.
Args:
path [string]: The path to the repository
which [string]: The remote repository name (origin or upstream).
Returns:
The origin url for path, or None if not a git repository.
"""
result = run_quick('git -C {path} config --get remote.{which}.url'
.format(path=path, which=which),
echo=False)
if result.returncode:
return None
return result.stdout.strip()
def get_local_branch_name(self, name):
"""Determine which git branch a local repository is in.
Args:
name [string]: The repository name.
Returns:
The name of the branch.
"""
result = run_quick('git -C "{dir}" rev-parse --abbrev-ref HEAD'
.format(dir=get_repository_dir(name)),
echo=False)
if result.returncode:
error = 'Could not determine branch: ' + result.stdout.strip()
raise RuntimeError(error)
return result.stdout.strip()
def get_github_repository_url(self, repository, owner=None):
"""Determine the URL for a given github repository.
Args:
repository [string]: The upstream SourceRepository.
owner [string]: The explicit owner for the repository we want.
If not provided then use the github_user in the bound options.
"""
user = owner or self.__options.github_user
if not user:
raise ValueError('No --github_user specified.')
if user == 'default' or user == 'upstream':
user = repository.owner
url_pattern = ('https://github.com/{user}/{name}.git'
if self.__options.use_https
else 'git@github.com:{user}/{name}.git')
return url_pattern.format(user=user, name=repository.name)
def git_clone(self, repository, owner=None):
"""Clone the specified repository
Args:
repository [string]: The name of the github repository (without owner).
owner [string]: An explicit repository owner.
If not provided use the configured options.
"""
name = repository.name
repository_dir = get_repository_dir(name)
upstream_user = repository.owner
branch = self.pull_branch or 'master'
origin_url = self.get_github_repository_url(repository, owner=owner)
upstream_url = 'https://github.com/{upstream_user}/{name}.git'.format(
upstream_user=upstream_user, name=name)
# Don't echo because we're going to hide some failure.
print 'Cloning {name} from {origin_url} -b {branch}.'.format(
name=name, origin_url=origin_url, branch=branch)
shell_result = run_and_monitor(
'git clone {url} -b {branch}'.format(url=origin_url, branch=branch),
echo=False)
if not shell_result.returncode:
if shell_result.stdout:
print shell_result.stdout
else:
if repository in self.__extra_repositories:
sys.stderr.write('WARNING: Missing optional repository {name}.\n'
.format(name=name))
sys.stderr.write(' Continue on without it.\n')
return
sys.stderr.write(shell_result.stderr or shell_result.stdout)
sys.stderr.write(
'FATAL: Cannot continue without required repository {name}.\n'
' Consider using github to fork one from {upstream}.\n'.
format(name=name, upstream=upstream_url))
raise SystemExit('Repository {url} not found.'.format(url=origin_url))
if self.__options.add_upstream and origin_url != upstream_url:
print ' Adding upstream repository {upstream}.'.format(
upstream=upstream_url)
check_run_quick('git -C "{dir}" remote add upstream {url}'
.format(dir=repository_dir, url=upstream_url),
echo=False)
if self.__options.disable_upstream_push:
which = 'upstream' if origin_url != upstream_url else 'origin'
print ' Disabling git pushes to {which} {upstream}'.format(
which=which, upstream=upstream_url)
check_run_quick(
'git -C "{dir}" remote set-url --push {which} disabled'
.format(dir=repository_dir, which=which),
echo=False)
def pull_from_origin(self, repository):
"""Pulls the current branch from the git origin.
Args:
repository [string]: The local repository to update.
"""
name = repository.name
repository_dir = get_repository_dir(name)
if not os.path.exists(repository_dir):
self.git_clone(repository)
return
print 'Updating {name} from origin'.format(name=name)
branch = self.get_local_branch_name(name)
if branch != self.pull_branch:
if self.__options.force_pull:
sys.stderr.write(
'WARNING: Updating {name} branch={branch}, *NOT* "{want}"\n'
.format(name=name, branch=branch, want=self.pull_branch))
else:
sys.stderr.write(
'WARNING: Skipping {name} because branch={branch},'
' *NOT* "{want}"\n'
.format(name=name, branch=branch, want=self.pull_branch))
return
try:
check_run_and_monitor('git -C "{dir}" pull origin {branch} --tags'
.format(dir=repository_dir, branch=branch),
echo=True)
except RuntimeError:
result = check_run_and_monitor('git -C "{dir}" branch -r'
.format(dir=repository_dir),
echo=False)
if result.stdout.find('origin/{branch}\n') >= 0:
raise
sys.stderr.write(
'WARNING {name} branch={branch} is not known to the origin.\n'
.format(name=name, branch=branch))
def pull_from_upstream_if_master(self, repository):
"""Pulls the master branch from the upstream repository.
This will only have effect if the local repository exists
and is currently in the master branch.
Args:
repository [string]: The name of the local repository to update.
"""
name = repository.name
repository_dir = get_repository_dir(name)
if not os.path.exists(repository_dir):
self.pull_from_origin(repository)
branch = self.get_local_branch_name(name)
if branch != 'master':
sys.stderr.write('Skipping {name} because it is in branch={branch}.\n'
.format(name=name, branch=branch))
return
print 'Pulling master {name} from upstream'.format(name=name)
check_run_and_monitor('git -C "{dir}" pull upstream master --tags'
.format(dir=repository_dir),
echo=True)
def push_to_origin_if_target_branch(self, repository):
"""Pushes the current target branch of the local repository to the origin.
This will only have effect if the local repository exists
and is currently in the target branch.
Args:
repository [string]: The name of the local repository to push from.
"""
name = repository.name
repository_dir = get_repository_dir(name)
if not os.path.exists(repository_dir):
sys.stderr.write('Skipping {name} because it does not yet exist.\n'
.format(name=name))
return
branch = self.get_local_branch_name(name)
if branch != self.push_branch:
sys.stderr.write(
'Skipping {name} because it is in branch={branch}, not {want}.\n'
.format(name=name, branch=branch, want=self.push_branch))
return
print 'Pushing {name} to origin.'.format(name=name)
check_run_and_monitor('git -C "{dir}" push origin {branch} --tags'.format(
dir=repository_dir, branch=self.push_branch),
echo=True)
def push_all_to_origin_if_target_branch(self):
"""Push all the local repositories current target branch to origin.
This will skip any local repositories that are not currently in the
target branch.
"""
all_repos = self.__REQUIRED_REPOSITORIES + self.__extra_repositories
for repository in all_repos:
self.push_to_origin_if_target_branch(repository)
def pull_all_from_upstream_if_master(self):
"""Pull all the upstream master branches into their local repository.
This will skip any local repositories that are not currently in the master
branch.
"""
all_repos = self.__REQUIRED_REPOSITORIES + self.__extra_repositories
for repository in all_repos:
self.pull_from_upstream_if_master(repository)
def pull_all_from_origin(self):
"""Pull all the origin target branches into their local repository.
This will skip any local repositories that are not currently in the
target branch.
"""
all_repos = self.__REQUIRED_REPOSITORIES + self.__extra_repositories
for repository in all_repos:
try:
self.pull_from_origin(repository)
except RuntimeError as ex:
if repository in self.__extra_repositories and not os.path.exists(
get_repository_dir(repository)):
sys.stderr.write(
'IGNORING error "{msg}" in optional repository {name}'
' because the local repository does not yet exist.\n'
.format(msg=ex.message, name=repository.name))
else:
raise
def __determine_spring_config_location(self):
root = '{dir}/config'.format(
dir=os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
home = os.path.join(os.environ['HOME'] + '/.spinnaker')
return '{root}/,{home}/'.format(home=home, root=root)
def write_gradle_run_script(self, repository):
"""Generate a dev_run.sh script for the local repository.
Args:
repository [string]: The name of the local repository to generate in.
"""
name = repository.name
path = '{name}/start_dev.sh'.format(name=name)
with open(path, 'w') as f:
f.write("""#!/bin/bash
d=$(dirname "$0")
cd "$d"
LOG_DIR=${{LOG_DIR:-../logs}}
DEF_SYS_PROPERTIES="-Dspring.config.location='{spring_location}'"
bash -c "(./gradlew $DEF_SYS_PROPERTIES $@ > '$LOG_DIR/{name}.log') 2>&1\
| tee -a '$LOG_DIR/{name}.log' >& '$LOG_DIR/{name}.err' &"
""".format(name=name,
spring_location=self.__determine_spring_config_location()))
os.chmod(path, 0777)
def write_deck_run_script(self, repository):
"""Generate a dev_run.sh script for running deck locally.
Args:
repository [string]: The name of the local repository to generate in.
"""
name = repository.name
path = '{name}/start_dev.sh'.format(name=name)
with open(path, 'w') as f:
f.write("""#!/bin/bash
d=$(dirname "$0")
cd "$d"
LOG_DIR=${{LOG_DIR:-../logs}}
if [[ node_modules -ot .git ]]; then
# Update npm, otherwise assume nothing changed and we're good.
npm install >& "$LOG_DIR/deck.log"
else
echo "deck npm node_modules looks up to date already."
fi
# Append to the log file we just started.
bash -c "(npm start >> '$LOG_DIR/{name}.log') 2>&1\
| tee -a '$LOG_DIR/{name}.log' >& '$LOG_DIR/{name}.err' &"
""".format(name=name))
os.chmod(path, 0777)
def update_spinnaker_run_scripts(self):
"""Regenerate the local dev_run.sh script for each local repository."""
for repository in self.__REQUIRED_REPOSITORIES:
name = repository.name
if not os.path.exists(name):
continue
if name == 'deck':
self.write_deck_run_script(repository)
else:
self.write_gradle_run_script(repository)
@classmethod
def init_extra_argument_parser(cls, parser):
"""Initialize additional arguments for managing remote repositories.
This is to sync the origin and upstream repositories. The intent
is to ultimately sync the origin from the upstream repository, but
this might be in two steps so the upstream can be verified [again]
before pushing the changes to the origin.
"""
# Note that we only pull the master branch from upstream.
# Pulling other branches don't normally make sense.
parser.add_argument('--pull_upstream', default=False,
action='store_true',
help='If the local branch is master, then refresh it'
' from the upstream repository.'
' Otherwise leave as is.')
parser.add_argument('--nopull_upstream',
dest='pull_upstream',
action='store_false')
# Note we only push target branches to origin specified by --push_branch
# To push another branch, you must explicitly push it with git
# (or another invocation).
parser.add_argument('--push_master', action='store_true',
help='Push the current branch to origin if it is'
' master. This is the same as --push_branch=master.')
parser.add_argument('--nopush_master', dest='push_master',
action='store_false')
parser.add_argument('--push_branch', default='',
help='If specified and the local repository is in'
' this branch then push it to the origin'
' repository. Otherwise do not push it.')
@classmethod
def init_argument_parser(cls, parser):
"""Initialize command-line arguments."""
parser.add_argument('--use_https', default=True, action='store_true',
help='Use https when cloning github repositories.')
parser.add_argument('--use_ssh', dest='use_https', action='store_false',
help='Use SSH when cloning github repositories.')
parser.add_argument('--add_upstream', default=True,
action='store_true',
help='Add upstream repository when cloning.')
parser.add_argument('--noadd_upstream', dest='add_upstream',
action='store_false')
parser.add_argument('--disable_upstream_push', default=True,
action='store_true',
help='Disable future pushes to the upstream'
' repository when cloning a repository.')
parser.add_argument('--nodisable_upstream_push',
dest='disable_upstream_push',
action='store_false')
parser.add_argument('--pull_origin', default=False,
action='store_true',
help='Refresh the local branch from the origin.'
' If cloning, then clone the master branch.'
' See --pull_branch for a more general option.')
parser.add_argument('--nopull_origin', dest='pull_origin',
action='store_false')
parser.add_argument('--pull_branch', default='',
help='Refresh the local branch from the origin if'
' it is in the specified branch,'
' otherwise skip it.'
' If cloning, then clone this branch.')
parser.add_argument('--force_pull', default=False,
help='Force pulls, even if the current branch'
' differs from the pulled branch.')
parser.add_argument(
'--extra_repos', default=None,
help='A comma-delimited list of name=owner optional repositories.'
'name is the repository name,'
' owner is the authoritative github user name owning it.'
' The --github_user will still be used to determine the origin.')
parser.add_argument('--github_user', default=None,
help='Pull from this github user\'s repositories.'
' If the user is "default" then use the'
' authoritative (upstream) repository.')
@classmethod
def main(cls):
parser = argparse.ArgumentParser()
cls.init_argument_parser(parser)
cls.init_extra_argument_parser(parser)
options = parser.parse_args()
refresher = cls(options)
in_repository_url = refresher.get_remote_repository_url('.')
if in_repository_url:
sys.stderr.write(
'ERROR: You cannot run this script from within a local repository.\n'
' This directory is from "{url}".\n'
' Did you intend to be in the parent directory?\n'
.format(url=in_repository_url))
return -1
try:
# This is ok. Really we want to look for an exception validating these
# properties so we can fail with a friendly error rather than stack.
if (refresher.pull_branch != refresher.push_branch
and refresher.pull_branch and refresher.push_branch):
sys.stderr.write(
'WARNING: pulling branch {pull} and pushing branch {push}'
.format(pull=refresher.pull_branch,
push=refresher.push_branch))
except Exception as ex:
sys.stderr.write('FAILURE: {0}\n'.format(ex.message))
return -1
nothing = True
if options.pull_upstream:
nothing = False
refresher.pull_all_from_upstream_if_master()
if refresher.push_branch:
nothing = False
refresher.push_all_to_origin_if_target_branch()
if refresher.pull_branch:
nothing = False
refresher.pull_all_from_origin()
refresher.update_spinnaker_run_scripts()
if nothing:
sys.stderr.write('No pull/push options were specified.\n')
else:
print 'DONE'
return 0
if __name__ == '__main__':
sys.exit(Refresher.main())
|
stitchfix/spinnaker
|
dev/refresh_source.py
|
Python
|
apache-2.0
| 24,119
|
[
"ORCA"
] |
4bd5d42fdb3cdb5cfaeb0039aadf86068a9ec6a2aceeca6f8bf10a2eb4a6c3f2
|
'''
wrapper for lists of galaxy objects, each method returns lists, unless they
are setting attributes.
'''
import numpy as np
import itertools
from .starpop import StarPop
__all__ = ['Galaxies']
class Galaxies(StarPop):
'''
wrapper for lists of galaxy objects, each method returns lists, unless they
are setting attributes.
'''
def __init__(self, galaxy_objects):
self.galaxies = np.asarray(galaxy_objects)
self.filter1s = np.unique([g.filter1 for g in galaxy_objects])
self.filter2s = np.unique([g.filter2 for g in galaxy_objects])
def sum_attr(self, *attrs):
for attr, g in itertools.product(attrs, self.galaxies):
g.__setattr__('sum_%s' % attr, np.sum(g.data.get_col(attr)))
def all_stages(self, *stages):
'''
adds the indices of any stage as attributes to galaxy.
If the stage isn't found, -1 is returned.
'''
[g.all_stages(*stages) for g in self.galaxies]
return
def squish(self, *attrs, **kwargs):
'''
concatenates an attribute or many attributes and adds them to galaxies
instance -- with an 's' at the end to pluralize them... that might
be stupid.
ex
for gal in gals:
gal.ra = gal.data['ra']
gal.dec = gal.data['dec']
gs = Galaxies.galaxies(gals)
gs.squish('color', 'mag2', 'ra', 'dec')
gs.ras ...
'''
inds = kwargs.get('inds', np.arange(len(self.galaxies)))
new_attrs = kwargs.get('new_attrs', None)
if new_attrs is not None:
assert len(new_attrs) == len(attrs), \
'new attribute titles must be list same length as given attributes.'
for i, attr in enumerate(attrs):
# do we have a name for the new attribute?
if new_attrs is not None:
new_attr = new_attrs[i]
else:
new_attr = '%ss' % attr
new_list = [g.__getattribute__(attr) for g in self.galaxies[inds]]
# is attr an array of arrays, or is it now an array?
try:
new_val = np.concatenate(new_list)
except ValueError:
new_val = np.array(new_list)
self.__setattr__(new_attr, new_val)
def finite_key(self, key):
return [g for g in self.galaxies if np.isfinite(g.__dict__[key])]
def select_on_key(self, key, val):
''' ex filter2 == F814W works great with strings or exact g.key==val.
rounds z to four places, no error handling.
'''
key = key.lower()
if key == 'z':
gs = [g for g in self.galaxies if
np.round(g.__dict__[key], 4) == val]
else:
gs = [g for g in self.galaxies if g.__dict__[key] == val]
return gs
def group_by_z(self):
if not hasattr(self, 'zs'):
return
zsf = self.zs[np.isfinite(self.zs)]
d = {}
for z in zsf:
key = 'Z%.4f' % z
d[key] = self.select_on_key('z', z)
d['no z'] = [g for g in self.galaxies if np.isnan(g.z)]
return d
def intersection(self, **kwargs):
'''
ex kwargs = {'filter2':'F814W', 'filter1':'F555W'}
will return a list of galaxy objects that match all kwarg values.
'''
gs_tmp = self.galaxies
gs = [self.select_on_key(k, v) for k, v in kwargs.items()]
for i in range(len(gs)):
gs_tmp = list(set(gs_tmp) & set(gs[i]))
return gs_tmp
|
philrosenfield/ResolvedStellarPops
|
galaxies/galaxies.py
|
Python
|
bsd-3-clause
| 3,569
|
[
"Galaxy"
] |
50d3ef922519800cd1ca20860cb83b0748e068b8c14828712b9681e1f1c07332
|
"""A kernel manager relating notebooks and kernels
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
from tornado import web
from IPython.kernel.multikernelmanager import MultiKernelManager
from IPython.utils.traitlets import (
Dict, List, Unicode,
)
from IPython.html.utils import to_os_path
from IPython.utils.py3compat import getcwd
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class MappingKernelManager(MultiKernelManager):
"""A KernelManager that handles notebook mapping and HTTP error handling"""
def _kernel_manager_class_default(self):
return "IPython.kernel.ioloop.IOLoopKernelManager"
kernel_argv = List(Unicode)
root_dir = Unicode(getcwd(), config=True)
def _root_dir_changed(self, name, old, new):
"""Do a bit of validation of the root dir."""
if not os.path.isabs(new):
# If we receive a non-absolute path, make it absolute.
self.root_dir = os.path.abspath(new)
return
if not os.path.exists(new) or not os.path.isdir(new):
raise TraitError("kernel root dir %r is not a directory" % new)
#-------------------------------------------------------------------------
# Methods for managing kernels and sessions
#-------------------------------------------------------------------------
def _handle_kernel_died(self, kernel_id):
"""notice that a kernel died"""
self.log.warn("Kernel %s died, removing from map.", kernel_id)
self.remove_kernel(kernel_id)
def cwd_for_path(self, path):
"""Turn API path into absolute OS path."""
os_path = to_os_path(path, self.root_dir)
# in the case of notebooks and kernels not being on the same filesystem,
# walk up to root_dir if the paths don't exist
while not os.path.exists(os_path) and os_path != self.root_dir:
os_path = os.path.dirname(os_path)
return os_path
def start_kernel(self, kernel_id=None, path=None, **kwargs):
"""Start a kernel for a session an return its kernel_id.
Parameters
----------
kernel_id : uuid
The uuid to associate the new kernel with. If this
is not None, this kernel will be persistent whenever it is
requested.
path : API path
The API path (unicode, '/' delimited) for the cwd.
Will be transformed to an OS path relative to root_dir.
"""
if kernel_id is None:
kwargs['extra_arguments'] = self.kernel_argv
if path is not None:
kwargs['cwd'] = self.cwd_for_path(path)
kernel_id = super(MappingKernelManager, self).start_kernel(**kwargs)
self.log.info("Kernel started: %s" % kernel_id)
self.log.debug("Kernel args: %r" % kwargs)
# register callback for failed auto-restart
self.add_restart_callback(kernel_id,
lambda : self._handle_kernel_died(kernel_id),
'dead',
)
else:
self._check_kernel_id(kernel_id)
self.log.info("Using existing kernel: %s" % kernel_id)
return kernel_id
def shutdown_kernel(self, kernel_id, now=False):
"""Shutdown a kernel by kernel_id"""
self._check_kernel_id(kernel_id)
super(MappingKernelManager, self).shutdown_kernel(kernel_id, now=now)
def kernel_model(self, kernel_id):
"""Return a dictionary of kernel information described in the
JSON standard model."""
self._check_kernel_id(kernel_id)
model = {"id":kernel_id}
return model
def list_kernels(self):
"""Returns a list of kernel_id's of kernels running."""
kernels = []
kernel_ids = super(MappingKernelManager, self).list_kernel_ids()
for kernel_id in kernel_ids:
model = self.kernel_model(kernel_id)
kernels.append(model)
return kernels
# override _check_kernel_id to raise 404 instead of KeyError
def _check_kernel_id(self, kernel_id):
"""Check a that a kernel_id exists and raise 404 if not."""
if kernel_id not in self:
raise web.HTTPError(404, u'Kernel does not exist: %s' % kernel_id)
|
WillisXChen/django-oscar
|
oscar/lib/python2.7/site-packages/IPython/html/services/kernels/kernelmanager.py
|
Python
|
bsd-3-clause
| 4,895
|
[
"Brian"
] |
8ef2fd63c9f2cb450ceaf5ce51ae0350b9e84d2a80416d224649dce64aa22271
|
#!/usr/bin/python
#
# Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for ipaddr module."""
import unittest
import time
import ipaddr
# Compatibility function to cast str to bytes objects
if ipaddr._compat_has_real_bytes:
_cb = lambda bytestr: bytes(bytestr, 'charmap')
else:
_cb = str
class IpaddrUnitTest(unittest.TestCase):
def setUp(self):
self.ipv4 = ipaddr.IPv4Network('1.2.3.4/24')
self.ipv4_hostmask = ipaddr.IPv4Network('10.0.0.1/0.255.255.255')
self.ipv6 = ipaddr.IPv6Network('2001:658:22a:cafe:200:0:0:1/64')
def tearDown(self):
del(self.ipv4)
del(self.ipv4_hostmask)
del(self.ipv6)
del(self)
def testRepr(self):
self.assertEqual("IPv4Network('1.2.3.4/32')",
repr(ipaddr.IPv4Network('1.2.3.4')))
self.assertEqual("IPv6Network('::1/128')",
repr(ipaddr.IPv6Network('::1')))
def testAutoMasking(self):
addr1 = ipaddr.IPv4Network('1.1.1.255/24')
addr1_masked = ipaddr.IPv4Network('1.1.1.0/24')
self.assertEqual(addr1_masked, addr1.masked())
addr2 = ipaddr.IPv6Network('2000:cafe::efac:100/96')
addr2_masked = ipaddr.IPv6Network('2000:cafe::/96')
self.assertEqual(addr2_masked, addr2.masked())
# issue57
def testAddressIntMath(self):
self.assertEqual(ipaddr.IPv4Address('1.1.1.1') + 255,
ipaddr.IPv4Address('1.1.2.0'))
self.assertEqual(ipaddr.IPv4Address('1.1.1.1') - 256,
ipaddr.IPv4Address('1.1.0.1'))
self.assertEqual(ipaddr.IPv6Address('::1') + (2**16 - 2),
ipaddr.IPv6Address('::ffff'))
self.assertEqual(ipaddr.IPv6Address('::ffff') - (2**16 - 2),
ipaddr.IPv6Address('::1'))
def testInvalidStrings(self):
self.assertRaises(ValueError, ipaddr.IPNetwork, '')
self.assertRaises(ValueError, ipaddr.IPNetwork, 'www.google.com')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1.2.3')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1.2.3.4.5')
self.assertRaises(ValueError, ipaddr.IPNetwork, '301.2.2.2')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:6:7')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:6:7:')
self.assertRaises(ValueError, ipaddr.IPNetwork, ':2:3:4:5:6:7:8')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:6:7:8:9')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:6:7:8:')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1::3:4:5:6::8')
self.assertRaises(ValueError, ipaddr.IPNetwork, 'a:')
self.assertRaises(ValueError, ipaddr.IPNetwork, ':')
self.assertRaises(ValueError, ipaddr.IPNetwork, ':::')
self.assertRaises(ValueError, ipaddr.IPNetwork, '::a:')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1ffff::')
self.assertRaises(ValueError, ipaddr.IPNetwork, '0xa::')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:6:1a.2.3.4')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:1.2.3.4:8')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network, '')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network,
'google.com')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network,
'::1.2.3.4')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network, '')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'google.com')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'1.2.3.4')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'cafe:cafe::/128/190')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'1234:axy::b')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Address,
'1234:axy::b')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Address,
'2001:db8:::1')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Address,
'2001:888888::1')
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Address(1)._ip_int_from_string,
'1.a.2.3')
self.assertEqual(False, ipaddr.IPv4Network(1)._is_hostmask('1.a.2.3'))
def testGetNetwork(self):
self.assertEqual(int(self.ipv4.network), 16909056)
self.assertEqual(str(self.ipv4.network), '1.2.3.0')
self.assertEqual(str(self.ipv4_hostmask.network), '10.0.0.0')
self.assertEqual(int(self.ipv6.network),
42540616829182469433403647294022090752)
self.assertEqual(str(self.ipv6.network),
'2001:658:22a:cafe::')
self.assertEqual(str(self.ipv6.hostmask),
'::ffff:ffff:ffff:ffff')
def testBadVersionComparison(self):
# These should always raise TypeError
v4addr = ipaddr.IPAddress('1.1.1.1')
v4net = ipaddr.IPNetwork('1.1.1.1')
v6addr = ipaddr.IPAddress('::1')
v6net = ipaddr.IPAddress('::1')
self.assertRaises(TypeError, v4addr.__lt__, v6addr)
self.assertRaises(TypeError, v4addr.__gt__, v6addr)
self.assertRaises(TypeError, v4net.__lt__, v6net)
self.assertRaises(TypeError, v4net.__gt__, v6net)
self.assertRaises(TypeError, v6addr.__lt__, v4addr)
self.assertRaises(TypeError, v6addr.__gt__, v4addr)
self.assertRaises(TypeError, v6net.__lt__, v4net)
self.assertRaises(TypeError, v6net.__gt__, v4net)
def testMixedTypeComparison(self):
v4addr = ipaddr.IPAddress('1.1.1.1')
v4net = ipaddr.IPNetwork('1.1.1.1/32')
v6addr = ipaddr.IPAddress('::1')
v6net = ipaddr.IPNetwork('::1/128')
self.assertRaises(TypeError, lambda: v4addr < v4net)
self.assertRaises(TypeError, lambda: v4addr > v4net)
self.assertRaises(TypeError, lambda: v4net < v4addr)
self.assertRaises(TypeError, lambda: v4net > v4addr)
self.assertRaises(TypeError, lambda: v6addr < v6net)
self.assertRaises(TypeError, lambda: v6addr > v6net)
self.assertRaises(TypeError, lambda: v6net < v6addr)
self.assertRaises(TypeError, lambda: v6net > v6addr)
# with get_mixed_type_key, you can sort addresses and network.
self.assertEqual([v4addr, v4net], sorted([v4net, v4addr],
key=ipaddr.get_mixed_type_key))
self.assertEqual([v6addr, v6net], sorted([v6net, v6addr],
key=ipaddr.get_mixed_type_key))
def testIpFromInt(self):
self.assertEqual(self.ipv4.ip, ipaddr.IPv4Network(16909060).ip)
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Network, 2**32)
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Network, -1)
ipv4 = ipaddr.IPNetwork('1.2.3.4')
ipv6 = ipaddr.IPNetwork('2001:658:22a:cafe:200:0:0:1')
self.assertEqual(ipv4, ipaddr.IPNetwork(int(ipv4)))
self.assertEqual(ipv6, ipaddr.IPNetwork(int(ipv6)))
v6_int = 42540616829182469433547762482097946625
self.assertEqual(self.ipv6.ip, ipaddr.IPv6Network(v6_int).ip)
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv6Network, 2**128)
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv6Network, -1)
self.assertEqual(ipaddr.IPNetwork(self.ipv4.ip).version, 4)
self.assertEqual(ipaddr.IPNetwork(self.ipv6.ip).version, 6)
if ipaddr._compat_has_real_bytes: # on python3+
def testIpFromPacked(self):
ip = ipaddr.IP
self.assertEqual(self.ipv4.ip,
ip(_cb('\x01\x02\x03\x04')).ip)
self.assertEqual(ip('255.254.253.252'),
ip(_cb('\xff\xfe\xfd\xfc')))
self.assertRaises(ValueError, ipaddr.IP, _cb('\x00' * 3))
self.assertRaises(ValueError, ipaddr.IP, _cb('\x00' * 5))
self.assertEqual(self.ipv6.ip,
ip(_cb('\x20\x01\x06\x58\x02\x2a\xca\xfe'
'\x02\x00\x00\x00\x00\x00\x00\x01')).ip)
self.assertEqual(ip('ffff:2:3:4:ffff::'),
ip(_cb('\xff\xff\x00\x02\x00\x03\x00\x04' +
'\xff\xff' + '\x00' * 6)))
self.assertEqual(ip('::'),
ip(_cb('\x00' * 16)))
self.assertRaises(ValueError, ip, _cb('\x00' * 15))
self.assertRaises(ValueError, ip, _cb('\x00' * 17))
def testGetIp(self):
self.assertEqual(int(self.ipv4.ip), 16909060)
self.assertEqual(str(self.ipv4.ip), '1.2.3.4')
self.assertEqual(str(self.ipv4_hostmask.ip), '10.0.0.1')
self.assertEqual(int(self.ipv6.ip),
42540616829182469433547762482097946625)
self.assertEqual(str(self.ipv6.ip),
'2001:658:22a:cafe:200::1')
def testGetNetmask(self):
self.assertEqual(int(self.ipv4.netmask), 4294967040L)
self.assertEqual(str(self.ipv4.netmask), '255.255.255.0')
self.assertEqual(str(self.ipv4_hostmask.netmask), '255.0.0.0')
self.assertEqual(int(self.ipv6.netmask),
340282366920938463444927863358058659840)
self.assertEqual(self.ipv6.prefixlen, 64)
def testZeroNetmask(self):
ipv4_zero_netmask = ipaddr.IPv4Network('1.2.3.4/0')
self.assertEqual(int(ipv4_zero_netmask.netmask), 0)
self.assert_(ipv4_zero_netmask._is_valid_netmask(str(0)))
ipv6_zero_netmask = ipaddr.IPv6Network('::1/0')
self.assertEqual(int(ipv6_zero_netmask.netmask), 0)
self.assert_(ipv6_zero_netmask._is_valid_netmask(str(0)))
def testGetBroadcast(self):
self.assertEqual(int(self.ipv4.broadcast), 16909311L)
self.assertEqual(str(self.ipv4.broadcast), '1.2.3.255')
self.assertEqual(int(self.ipv6.broadcast),
42540616829182469451850391367731642367)
self.assertEqual(str(self.ipv6.broadcast),
'2001:658:22a:cafe:ffff:ffff:ffff:ffff')
def testGetPrefixlen(self):
self.assertEqual(self.ipv4.prefixlen, 24)
self.assertEqual(self.ipv6.prefixlen, 64)
def testGetSupernet(self):
self.assertEqual(self.ipv4.supernet().prefixlen, 23)
self.assertEqual(str(self.ipv4.supernet().network), '1.2.2.0')
self.assertEqual(ipaddr.IPv4Network('0.0.0.0/0').supernet(),
ipaddr.IPv4Network('0.0.0.0/0'))
self.assertEqual(self.ipv6.supernet().prefixlen, 63)
self.assertEqual(str(self.ipv6.supernet().network),
'2001:658:22a:cafe::')
self.assertEqual(ipaddr.IPv6Network('::0/0').supernet(),
ipaddr.IPv6Network('::0/0'))
def testGetSupernet3(self):
self.assertEqual(self.ipv4.supernet(3).prefixlen, 21)
self.assertEqual(str(self.ipv4.supernet(3).network), '1.2.0.0')
self.assertEqual(self.ipv6.supernet(3).prefixlen, 61)
self.assertEqual(str(self.ipv6.supernet(3).network),
'2001:658:22a:caf8::')
def testGetSupernet4(self):
self.assertRaises(ValueError, self.ipv4.supernet, prefixlen_diff=2,
new_prefix=1)
self.assertRaises(ValueError, self.ipv4.supernet, new_prefix=25)
self.assertEqual(self.ipv4.supernet(prefixlen_diff=2),
self.ipv4.supernet(new_prefix=22))
self.assertRaises(ValueError, self.ipv6.supernet, prefixlen_diff=2,
new_prefix=1)
self.assertRaises(ValueError, self.ipv6.supernet, new_prefix=65)
self.assertEqual(self.ipv6.supernet(prefixlen_diff=2),
self.ipv6.supernet(new_prefix=62))
def testIterSubnets(self):
self.assertEqual(self.ipv4.subnet(), list(self.ipv4.iter_subnets()))
self.assertEqual(self.ipv6.subnet(), list(self.ipv6.iter_subnets()))
def testFancySubnetting(self):
self.assertEqual(sorted(self.ipv4.subnet(prefixlen_diff=3)),
sorted(self.ipv4.subnet(new_prefix=27)))
self.assertRaises(ValueError, self.ipv4.subnet, new_prefix=23)
self.assertRaises(ValueError, self.ipv4.subnet,
prefixlen_diff=3, new_prefix=27)
self.assertEqual(sorted(self.ipv6.subnet(prefixlen_diff=4)),
sorted(self.ipv6.subnet(new_prefix=68)))
self.assertRaises(ValueError, self.ipv6.subnet, new_prefix=63)
self.assertRaises(ValueError, self.ipv6.subnet,
prefixlen_diff=4, new_prefix=68)
def testGetSubnet(self):
self.assertEqual(self.ipv4.subnet()[0].prefixlen, 25)
self.assertEqual(str(self.ipv4.subnet()[0].network), '1.2.3.0')
self.assertEqual(str(self.ipv4.subnet()[1].network), '1.2.3.128')
self.assertEqual(self.ipv6.subnet()[0].prefixlen, 65)
def testGetSubnetForSingle32(self):
ip = ipaddr.IPv4Network('1.2.3.4/32')
subnets1 = [str(x) for x in ip.subnet()]
subnets2 = [str(x) for x in ip.subnet(2)]
self.assertEqual(subnets1, ['1.2.3.4/32'])
self.assertEqual(subnets1, subnets2)
def testGetSubnetForSingle128(self):
ip = ipaddr.IPv6Network('::1/128')
subnets1 = [str(x) for x in ip.subnet()]
subnets2 = [str(x) for x in ip.subnet(2)]
self.assertEqual(subnets1, ['::1/128'])
self.assertEqual(subnets1, subnets2)
def testSubnet2(self):
ips = [str(x) for x in self.ipv4.subnet(2)]
self.assertEqual(
ips,
['1.2.3.0/26', '1.2.3.64/26', '1.2.3.128/26', '1.2.3.192/26'])
ipsv6 = [str(x) for x in self.ipv6.subnet(2)]
self.assertEqual(
ipsv6,
['2001:658:22a:cafe::/66',
'2001:658:22a:cafe:4000::/66',
'2001:658:22a:cafe:8000::/66',
'2001:658:22a:cafe:c000::/66'])
def testSubnetFailsForLargeCidrDiff(self):
self.assertRaises(ValueError, self.ipv4.subnet, 9)
self.assertRaises(ValueError, self.ipv6.subnet, 65)
def testSupernetFailsForLargeCidrDiff(self):
self.assertRaises(ValueError, self.ipv4.supernet, 25)
self.assertRaises(ValueError, self.ipv6.supernet, 65)
def testSubnetFailsForNegativeCidrDiff(self):
self.assertRaises(ValueError, self.ipv4.subnet, -1)
self.assertRaises(ValueError, self.ipv6.subnet, -1)
def testGetNumHosts(self):
self.assertEqual(self.ipv4.numhosts, 256)
self.assertEqual(self.ipv4.subnet()[0].numhosts, 128)
self.assertEqual(self.ipv4.supernet().numhosts, 512)
self.assertEqual(self.ipv6.numhosts, 18446744073709551616)
self.assertEqual(self.ipv6.subnet()[0].numhosts, 9223372036854775808)
self.assertEqual(self.ipv6.supernet().numhosts, 36893488147419103232)
def testContains(self):
self.assertTrue(ipaddr.IPv4Network('1.2.3.128/25') in self.ipv4)
self.assertFalse(ipaddr.IPv4Network('1.2.4.1/24') in self.ipv4)
self.assertTrue(self.ipv4 in self.ipv4)
self.assertTrue(self.ipv6 in self.ipv6)
# We can test addresses and string as well.
addr1 = ipaddr.IPv4Address('1.2.3.37')
self.assertTrue(addr1 in self.ipv4)
# issue 61, bad network comparison on like-ip'd network objects
# with identical broadcast addresses.
self.assertFalse(ipaddr.IPv4Network('1.1.0.0/16').__contains__(
ipaddr.IPv4Network('1.0.0.0/15')))
def testBadAddress(self):
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network,
'poop')
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Network, '1.2.3.256')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'poopv6')
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Network, '1.2.3.4/32/24')
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Network, '10/8')
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv6Network, '10/8')
def testBadNetMask(self):
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv4Network, '1.2.3.4/')
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv4Network, '1.2.3.4/33')
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv4Network, '1.2.3.4/254.254.255.256')
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv4Network, '1.1.1.1/240.255.0.0')
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv6Network, '::1/')
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv6Network, '::1/129')
def testNth(self):
self.assertEqual(str(self.ipv4[5]), '1.2.3.5')
self.assertRaises(IndexError, self.ipv4.__getitem__, 256)
self.assertEqual(str(self.ipv6[5]),
'2001:658:22a:cafe::5')
def testGetitem(self):
# http://code.google.com/p/ipaddr-py/issues/detail?id=15
addr = ipaddr.IPv4Network('172.31.255.128/255.255.255.240')
self.assertEqual(28, addr.prefixlen)
addr_list = list(addr)
self.assertEqual('172.31.255.128', str(addr_list[0]))
self.assertEqual('172.31.255.128', str(addr[0]))
self.assertEqual('172.31.255.143', str(addr_list[-1]))
self.assertEqual('172.31.255.143', str(addr[-1]))
self.assertEqual(addr_list[-1], addr[-1])
def testEquals(self):
self.assertTrue(self.ipv4 == ipaddr.IPv4Network('1.2.3.4/24'))
self.assertFalse(self.ipv4 == ipaddr.IPv4Network('1.2.3.4/23'))
self.assertFalse(self.ipv4 == ipaddr.IPv6Network('::1.2.3.4/24'))
self.assertFalse(self.ipv4 == '')
self.assertFalse(self.ipv4 == [])
self.assertFalse(self.ipv4 == 2)
self.assertTrue(self.ipv6 ==
ipaddr.IPv6Network('2001:658:22a:cafe:200::1/64'))
self.assertFalse(self.ipv6 ==
ipaddr.IPv6Network('2001:658:22a:cafe:200::1/63'))
self.assertFalse(self.ipv6 == ipaddr.IPv4Network('1.2.3.4/23'))
self.assertFalse(self.ipv6 == '')
self.assertFalse(self.ipv6 == [])
self.assertFalse(self.ipv6 == 2)
def testNotEquals(self):
addr1 = ipaddr.IPAddress('1.2.3.4')
self.assertFalse(self.ipv4 != ipaddr.IPv4Network('1.2.3.4/24'))
self.assertFalse(self.ipv4 == addr1)
self.assertTrue(self.ipv4 != ipaddr.IPv4Network('1.2.3.4/23'))
self.assertTrue(self.ipv4 != ipaddr.IPv6Network('::1.2.3.4/24'))
self.assertTrue(self.ipv4 != '')
self.assertTrue(self.ipv4 != [])
self.assertTrue(self.ipv4 != 2)
addr2 = ipaddr.IPAddress('2001:658:22a:cafe:200::1')
self.assertFalse(self.ipv6 !=
ipaddr.IPv6Network('2001:658:22a:cafe:200::1/64'))
self.assertFalse(self.ipv6 == addr2)
self.assertTrue(self.ipv6 !=
ipaddr.IPv6Network('2001:658:22a:cafe:200::1/63'))
self.assertTrue(self.ipv6 != ipaddr.IPv4Network('1.2.3.4/23'))
self.assertTrue(self.ipv6 != '')
self.assertTrue(self.ipv6 != [])
self.assertTrue(self.ipv6 != 2)
def testSlash32Constructor(self):
self.assertEquals(str(ipaddr.IPv4Network('1.2.3.4/255.255.255.255')),
'1.2.3.4/32')
def testSlash128Constructor(self):
self.assertEquals(str(ipaddr.IPv6Network('::1/128')),
'::1/128')
def testSlash0Constructor(self):
self.assertEquals(str(ipaddr.IPv4Network('1.2.3.4/0.0.0.0')),
'1.2.3.4/0')
def testCollapsing(self):
# test only IP addresses including some duplicates
ip1 = ipaddr.IPv4Address('1.1.1.0')
ip2 = ipaddr.IPv4Address('1.1.1.1')
ip3 = ipaddr.IPv4Address('1.1.1.2')
ip4 = ipaddr.IPv4Address('1.1.1.3')
ip5 = ipaddr.IPv4Address('1.1.1.4')
ip6 = ipaddr.IPv4Address('1.1.1.0')
# check that addreses are subsumed properly.
collapsed = ipaddr.collapse_address_list([ip1, ip2, ip3, ip4, ip5, ip6])
self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.1.0/30'),
ipaddr.IPv4Network('1.1.1.4/32')])
# test a mix of IP addresses and networks including some duplicates
ip1 = ipaddr.IPv4Address('1.1.1.0')
ip2 = ipaddr.IPv4Address('1.1.1.1')
ip3 = ipaddr.IPv4Address('1.1.1.2')
ip4 = ipaddr.IPv4Address('1.1.1.3')
ip5 = ipaddr.IPv4Network('1.1.1.4/30')
ip6 = ipaddr.IPv4Network('1.1.1.4/30')
# check that addreses are subsumed properly.
collapsed = ipaddr.collapse_address_list([ip5, ip1, ip2, ip3, ip4, ip6])
self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.1.0/29')])
# test only IP networks
ip1 = ipaddr.IPv4Network('1.1.0.0/24')
ip2 = ipaddr.IPv4Network('1.1.1.0/24')
ip3 = ipaddr.IPv4Network('1.1.2.0/24')
ip4 = ipaddr.IPv4Network('1.1.3.0/24')
ip5 = ipaddr.IPv4Network('1.1.4.0/24')
# stored in no particular order b/c we want CollapseAddr to call [].sort
ip6 = ipaddr.IPv4Network('1.1.0.0/22')
# check that addreses are subsumed properly.
collapsed = ipaddr.collapse_address_list([ip1, ip2, ip3, ip4, ip5, ip6])
self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.0.0/22'),
ipaddr.IPv4Network('1.1.4.0/24')])
# test that two addresses are supernet'ed properly
collapsed = ipaddr.collapse_address_list([ip1, ip2])
self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.0.0/23')])
# test same IP networks
ip_same1 = ip_same2 = ipaddr.IPv4Network('1.1.1.1/32')
self.assertEqual(ipaddr.collapse_address_list([ip_same1, ip_same2]),
[ip_same1])
# test same IP addresses
ip_same1 = ip_same2 = ipaddr.IPv4Address('1.1.1.1')
self.assertEqual(ipaddr.collapse_address_list([ip_same1, ip_same2]),
[ipaddr.IPNetwork('1.1.1.1/32')])
ip1 = ipaddr.IPv6Network('::2001:1/100')
ip2 = ipaddr.IPv6Network('::2002:1/120')
ip3 = ipaddr.IPv6Network('::2001:1/96')
# test that ipv6 addresses are subsumed properly.
collapsed = ipaddr.collapse_address_list([ip1, ip2, ip3])
self.assertEqual(collapsed, [ip3])
# the toejam test
ip1 = ipaddr.IPAddress('1.1.1.1')
ip2 = ipaddr.IPAddress('::1')
self.assertRaises(TypeError, ipaddr.collapse_address_list,
[ip1, ip2])
def testSummarizing(self):
#ip = ipaddr.IPAddress
#ipnet = ipaddr.IPNetwork
summarize = ipaddr.summarize_address_range
ip1 = ipaddr.IPAddress('1.1.1.0')
ip2 = ipaddr.IPAddress('1.1.1.255')
# test a /24 is sumamrized properly
self.assertEqual(summarize(ip1, ip2)[0], ipaddr.IPNetwork('1.1.1.0/24'))
# test an IPv4 range that isn't on a network byte boundary
ip2 = ipaddr.IPAddress('1.1.1.8')
self.assertEqual(summarize(ip1, ip2), [ipaddr.IPNetwork('1.1.1.0/29'),
ipaddr.IPNetwork('1.1.1.8')])
ip1 = ipaddr.IPAddress('1::')
ip2 = ipaddr.IPAddress('1:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
# test a IPv6 is sumamrized properly
self.assertEqual(summarize(ip1, ip2)[0], ipaddr.IPNetwork('1::/16'))
# test an IPv6 range that isn't on a network byte boundary
ip2 = ipaddr.IPAddress('2::')
self.assertEqual(summarize(ip1, ip2), [ipaddr.IPNetwork('1::/16'),
ipaddr.IPNetwork('2::/128')])
# test exception raised when first is greater than last
self.assertRaises(ValueError, summarize, ipaddr.IPAddress('1.1.1.0'),
ipaddr.IPAddress('1.1.0.0'))
# test exception raised when first and last aren't IP addresses
self.assertRaises(TypeError, summarize,
ipaddr.IPNetwork('1.1.1.0'),
ipaddr.IPNetwork('1.1.0.0'))
self.assertRaises(TypeError, summarize,
ipaddr.IPNetwork('1.1.1.0'), ipaddr.IPNetwork('1.1.0.0'))
# test exception raised when first and last are not same version
self.assertRaises(TypeError, summarize, ipaddr.IPAddress('::'),
ipaddr.IPNetwork('1.1.0.0'))
def testAddressComparison(self):
self.assertTrue(ipaddr.IPAddress('1.1.1.1') <=
ipaddr.IPAddress('1.1.1.1'))
self.assertTrue(ipaddr.IPAddress('1.1.1.1') <=
ipaddr.IPAddress('1.1.1.2'))
self.assertTrue(ipaddr.IPAddress('::1') <= ipaddr.IPAddress('::1'))
self.assertTrue(ipaddr.IPAddress('::1') <= ipaddr.IPAddress('::2'))
def testNetworkComparison(self):
# ip1 and ip2 have the same network address
ip1 = ipaddr.IPv4Network('1.1.1.0/24')
ip2 = ipaddr.IPv4Network('1.1.1.1/24')
ip3 = ipaddr.IPv4Network('1.1.2.0/24')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEquals(ip1.compare_networks(ip2), 0)
self.assertTrue(ip1._get_networks_key() == ip2._get_networks_key())
self.assertEquals(ip1.compare_networks(ip3), -1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
ip1 = ipaddr.IPv6Network('2001::2000/96')
ip2 = ipaddr.IPv6Network('2001::2001/96')
ip3 = ipaddr.IPv6Network('2001:ffff::2000/96')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEquals(ip1.compare_networks(ip2), 0)
self.assertTrue(ip1._get_networks_key() == ip2._get_networks_key())
self.assertEquals(ip1.compare_networks(ip3), -1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
# Test comparing different protocols.
# Should always raise a TypeError.
ipv6 = ipaddr.IPv6Network('::/0')
ipv4 = ipaddr.IPv4Network('0.0.0.0/0')
self.assertRaises(TypeError, ipv4.__lt__, ipv6)
self.assertRaises(TypeError, ipv4.__gt__, ipv6)
self.assertRaises(TypeError, ipv6.__lt__, ipv4)
self.assertRaises(TypeError, ipv6.__gt__, ipv4)
# Regression test for issue 19.
ip1 = ipaddr.IPNetwork('10.1.2.128/25')
self.assertFalse(ip1 < ip1)
self.assertFalse(ip1 > ip1)
ip2 = ipaddr.IPNetwork('10.1.3.0/24')
self.assertTrue(ip1 < ip2)
self.assertFalse(ip2 < ip1)
self.assertFalse(ip1 > ip2)
self.assertTrue(ip2 > ip1)
ip3 = ipaddr.IPNetwork('10.1.3.0/25')
self.assertTrue(ip2 < ip3)
self.assertFalse(ip3 < ip2)
self.assertFalse(ip2 > ip3)
self.assertTrue(ip3 > ip2)
# <=, >=
self.assertTrue(ipaddr.IPNetwork('1.1.1.1') <=
ipaddr.IPNetwork('1.1.1.1'))
self.assertTrue(ipaddr.IPNetwork('1.1.1.1') <=
ipaddr.IPNetwork('1.1.1.2'))
self.assertFalse(ipaddr.IPNetwork('1.1.1.2') <=
ipaddr.IPNetwork('1.1.1.1'))
self.assertTrue(ipaddr.IPNetwork('::1') <= ipaddr.IPNetwork('::1'))
self.assertTrue(ipaddr.IPNetwork('::1') <= ipaddr.IPNetwork('::2'))
self.assertFalse(ipaddr.IPNetwork('::2') <= ipaddr.IPNetwork('::1'))
def testStrictNetworks(self):
self.assertRaises(ValueError, ipaddr.IPNetwork, '192.168.1.1/24',
strict=True)
self.assertRaises(ValueError, ipaddr.IPNetwork, '::1/120', strict=True)
def testOverlaps(self):
other = ipaddr.IPv4Network('1.2.3.0/30')
other2 = ipaddr.IPv4Network('1.2.2.0/24')
other3 = ipaddr.IPv4Network('1.2.2.64/26')
self.assertTrue(self.ipv4.overlaps(other))
self.assertFalse(self.ipv4.overlaps(other2))
self.assertTrue(other2.overlaps(other3))
def testEmbeddedIpv4(self):
ipv4_string = '192.168.0.1'
ipv4 = ipaddr.IPv4Network(ipv4_string)
v4compat_ipv6 = ipaddr.IPv6Network('::%s' % ipv4_string)
self.assertEquals(int(v4compat_ipv6.ip), int(ipv4.ip))
v4mapped_ipv6 = ipaddr.IPv6Network('::ffff:%s' % ipv4_string)
self.assertNotEquals(v4mapped_ipv6.ip, ipv4.ip)
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'2001:1.1.1.1:1.1.1.1')
def testIPVersion(self):
self.assertEqual(self.ipv4.version, 4)
self.assertEqual(self.ipv6.version, 6)
def testPacked(self):
self.assertEqual(self.ipv4.packed,
_cb('\x01\x02\x03\x04'))
self.assertEqual(ipaddr.IPv4Network('255.254.253.252').packed,
_cb('\xff\xfe\xfd\xfc'))
self.assertEqual(self.ipv6.packed,
_cb('\x20\x01\x06\x58\x02\x2a\xca\xfe'
'\x02\x00\x00\x00\x00\x00\x00\x01'))
self.assertEqual(ipaddr.IPv6Network('ffff:2:3:4:ffff::').packed,
_cb('\xff\xff\x00\x02\x00\x03\x00\x04\xff\xff'
+ '\x00' * 6))
self.assertEqual(ipaddr.IPv6Network('::1:0:0:0:0').packed,
_cb('\x00' * 6 + '\x00\x01' + '\x00' * 8))
def testIpStrFromPrefixlen(self):
ipv4 = ipaddr.IPv4Network('1.2.3.4/24')
self.assertEquals(ipv4._ip_string_from_prefix(), '255.255.255.0')
self.assertEquals(ipv4._ip_string_from_prefix(28), '255.255.255.240')
def testIpType(self):
ipv4net = ipaddr.IPNetwork('1.2.3.4')
ipv4addr = ipaddr.IPAddress('1.2.3.4')
ipv6net = ipaddr.IPNetwork('::1.2.3.4')
ipv6addr = ipaddr.IPAddress('::1.2.3.4')
self.assertEquals(ipaddr.IPv4Network, type(ipv4net))
self.assertEquals(ipaddr.IPv4Address, type(ipv4addr))
self.assertEquals(ipaddr.IPv6Network, type(ipv6net))
self.assertEquals(ipaddr.IPv6Address, type(ipv6addr))
def testReservedIpv4(self):
# test networks
self.assertEquals(True, ipaddr.IPNetwork('224.1.1.1/31').is_multicast)
self.assertEquals(False, ipaddr.IPNetwork('240.0.0.0').is_multicast)
self.assertEquals(True, ipaddr.IPNetwork('192.168.1.1/17').is_private)
self.assertEquals(False, ipaddr.IPNetwork('192.169.0.0').is_private)
self.assertEquals(True, ipaddr.IPNetwork('10.255.255.255').is_private)
self.assertEquals(False, ipaddr.IPNetwork('11.0.0.0').is_private)
self.assertEquals(True, ipaddr.IPNetwork('172.31.255.255').is_private)
self.assertEquals(False, ipaddr.IPNetwork('172.32.0.0').is_private)
self.assertEquals(True,
ipaddr.IPNetwork('169.254.100.200/24').is_link_local)
self.assertEquals(False,
ipaddr.IPNetwork('169.255.100.200/24').is_link_local)
self.assertEquals(True,
ipaddr.IPNetwork('127.100.200.254/32').is_loopback)
self.assertEquals(True, ipaddr.IPNetwork('127.42.0.0/16').is_loopback)
self.assertEquals(False, ipaddr.IPNetwork('128.0.0.0').is_loopback)
# test addresses
self.assertEquals(True, ipaddr.IPAddress('224.1.1.1').is_multicast)
self.assertEquals(False, ipaddr.IPAddress('240.0.0.0').is_multicast)
self.assertEquals(True, ipaddr.IPAddress('192.168.1.1').is_private)
self.assertEquals(False, ipaddr.IPAddress('192.169.0.0').is_private)
self.assertEquals(True, ipaddr.IPAddress('10.255.255.255').is_private)
self.assertEquals(False, ipaddr.IPAddress('11.0.0.0').is_private)
self.assertEquals(True, ipaddr.IPAddress('172.31.255.255').is_private)
self.assertEquals(False, ipaddr.IPAddress('172.32.0.0').is_private)
self.assertEquals(True,
ipaddr.IPAddress('169.254.100.200').is_link_local)
self.assertEquals(False,
ipaddr.IPAddress('169.255.100.200').is_link_local)
self.assertEquals(True,
ipaddr.IPAddress('127.100.200.254').is_loopback)
self.assertEquals(True, ipaddr.IPAddress('127.42.0.0').is_loopback)
self.assertEquals(False, ipaddr.IPAddress('128.0.0.0').is_loopback)
self.assertEquals(True, ipaddr.IPNetwork('0.0.0.0').is_unspecified)
def testReservedIpv6(self):
self.assertEquals(True, ipaddr.IPNetwork('ffff::').is_multicast)
self.assertEquals(True, ipaddr.IPNetwork(2**128-1).is_multicast)
self.assertEquals(True, ipaddr.IPNetwork('ff00::').is_multicast)
self.assertEquals(False, ipaddr.IPNetwork('fdff::').is_multicast)
self.assertEquals(True, ipaddr.IPNetwork('fecf::').is_site_local)
self.assertEquals(True, ipaddr.IPNetwork(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEquals(False, ipaddr.IPNetwork('fbf:ffff::').is_site_local)
self.assertEquals(False, ipaddr.IPNetwork('ff00::').is_site_local)
self.assertEquals(True, ipaddr.IPNetwork('fc00::').is_private)
self.assertEquals(True, ipaddr.IPNetwork(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEquals(False, ipaddr.IPNetwork('fbff:ffff::').is_private)
self.assertEquals(False, ipaddr.IPNetwork('fe00::').is_private)
self.assertEquals(True, ipaddr.IPNetwork('fea0::').is_link_local)
self.assertEquals(True, ipaddr.IPNetwork('febf:ffff::').is_link_local)
self.assertEquals(False, ipaddr.IPNetwork('fe7f:ffff::').is_link_local)
self.assertEquals(False, ipaddr.IPNetwork('fec0::').is_link_local)
self.assertEquals(True, ipaddr.IPNetwork('0:0::0:01').is_loopback)
self.assertEquals(False, ipaddr.IPNetwork('::1/127').is_loopback)
self.assertEquals(False, ipaddr.IPNetwork('::').is_loopback)
self.assertEquals(False, ipaddr.IPNetwork('::2').is_loopback)
self.assertEquals(True, ipaddr.IPNetwork('0::0').is_unspecified)
self.assertEquals(False, ipaddr.IPNetwork('::1').is_unspecified)
self.assertEquals(False, ipaddr.IPNetwork('::/127').is_unspecified)
# test addresses
self.assertEquals(True, ipaddr.IPAddress('ffff::').is_multicast)
self.assertEquals(True, ipaddr.IPAddress(2**128-1).is_multicast)
self.assertEquals(True, ipaddr.IPAddress('ff00::').is_multicast)
self.assertEquals(False, ipaddr.IPAddress('fdff::').is_multicast)
self.assertEquals(True, ipaddr.IPAddress('fecf::').is_site_local)
self.assertEquals(True, ipaddr.IPAddress(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEquals(False, ipaddr.IPAddress('fbf:ffff::').is_site_local)
self.assertEquals(False, ipaddr.IPAddress('ff00::').is_site_local)
self.assertEquals(True, ipaddr.IPAddress('fc00::').is_private)
self.assertEquals(True, ipaddr.IPAddress(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEquals(False, ipaddr.IPAddress('fbff:ffff::').is_private)
self.assertEquals(False, ipaddr.IPAddress('fe00::').is_private)
self.assertEquals(True, ipaddr.IPAddress('fea0::').is_link_local)
self.assertEquals(True, ipaddr.IPAddress('febf:ffff::').is_link_local)
self.assertEquals(False, ipaddr.IPAddress('fe7f:ffff::').is_link_local)
self.assertEquals(False, ipaddr.IPAddress('fec0::').is_link_local)
self.assertEquals(True, ipaddr.IPAddress('0:0::0:01').is_loopback)
self.assertEquals(True, ipaddr.IPAddress('::1').is_loopback)
self.assertEquals(False, ipaddr.IPAddress('::2').is_loopback)
self.assertEquals(True, ipaddr.IPAddress('0::0').is_unspecified)
self.assertEquals(False, ipaddr.IPAddress('::1').is_unspecified)
# some generic IETF reserved addresses
self.assertEquals(True, ipaddr.IPAddress('100::').is_reserved)
self.assertEquals(True, ipaddr.IPNetwork('4000::1/128').is_reserved)
def testIpv4Mapped(self):
self.assertEqual(ipaddr.IPAddress('::ffff:192.168.1.1').ipv4_mapped,
ipaddr.IPAddress('192.168.1.1'))
self.assertEqual(ipaddr.IPAddress('::c0a8:101').ipv4_mapped, None)
self.assertEqual(ipaddr.IPAddress('::ffff:c0a8:101').ipv4_mapped,
ipaddr.IPAddress('192.168.1.1'))
def testAddrExclude(self):
addr1 = ipaddr.IPNetwork('10.1.1.0/24')
addr2 = ipaddr.IPNetwork('10.1.1.0/26')
addr3 = ipaddr.IPNetwork('10.2.1.0/24')
addr4 = ipaddr.IPAddress('10.1.1.0')
self.assertEqual(addr1.address_exclude(addr2),
[ipaddr.IPNetwork('10.1.1.64/26'),
ipaddr.IPNetwork('10.1.1.128/25')])
self.assertRaises(ValueError, addr1.address_exclude, addr3)
self.assertRaises(TypeError, addr1.address_exclude, addr4)
self.assertEqual(addr1.address_exclude(addr1), [])
def testHash(self):
self.assertEquals(hash(ipaddr.IPNetwork('10.1.1.0/24')),
hash(ipaddr.IPNetwork('10.1.1.0/24')))
self.assertEquals(hash(ipaddr.IPAddress('10.1.1.0')),
hash(ipaddr.IPAddress('10.1.1.0')))
ip1 = ipaddr.IPAddress('10.1.1.0')
ip2 = ipaddr.IPAddress('1::')
dummy = {}
dummy[self.ipv4] = None
dummy[self.ipv6] = None
dummy[ip1] = None
dummy[ip2] = None
self.assertTrue(self.ipv4 in dummy)
self.assertTrue(ip2 in dummy)
def testCopyConstructor(self):
addr1 = ipaddr.IPNetwork('10.1.1.0/24')
addr2 = ipaddr.IPNetwork(addr1)
addr3 = ipaddr.IPNetwork('2001:658:22a:cafe:200::1/64')
addr4 = ipaddr.IPNetwork(addr3)
addr5 = ipaddr.IPv4Address('1.1.1.1')
addr6 = ipaddr.IPv6Address('2001:658:22a:cafe:200::1')
self.assertEqual(addr1, addr2)
self.assertEqual(addr3, addr4)
self.assertEqual(addr5, ipaddr.IPv4Address(addr5))
self.assertEqual(addr6, ipaddr.IPv6Address(addr6))
def testCompressIPv6Address(self):
test_addresses = {
'1:2:3:4:5:6:7:8': '1:2:3:4:5:6:7:8/128',
'2001:0:0:4:0:0:0:8': '2001:0:0:4::8/128',
'2001:0:0:4:5:6:7:8': '2001::4:5:6:7:8/128',
'2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'2001:0::3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'0:0:3:0:0:0:0:ffff': '0:0:3::ffff/128',
'0:0:0:4:0:0:0:ffff': '::4:0:0:0:ffff/128',
'0:0:0:0:5:0:0:ffff': '::5:0:0:ffff/128',
'1:0:0:4:0:0:7:8': '1::4:0:0:7:8/128',
'0:0:0:0:0:0:0:0': '::/128',
'0:0:0:0:0:0:0:0/0': '::/0',
'0:0:0:0:0:0:0:1': '::1/128',
'2001:0658:022a:cafe:0000:0000:0000:0000/66':
'2001:658:22a:cafe::/66',
}
for uncompressed, compressed in test_addresses.items():
self.assertEquals(compressed, str(ipaddr.IPv6Network(uncompressed)))
def testExplodeShortHandIpStr(self):
addr1 = ipaddr.IPv6Network('2001::1')
self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0001',
addr1._explode_shorthand_ip_string(str(addr1.ip)))
self.assertEqual('0000:0000:0000:0000:0000:0000:0000:0001',
ipaddr.IPv6Network('::1/128').exploded)
def testIntRepresentation(self):
self.assertEqual(16909060, int(self.ipv4))
self.assertEqual(42540616829182469433547762482097946625, int(self.ipv6))
def testHexRepresentation(self):
self.assertEqual(hex(0x1020304),
hex(self.ipv4))
self.assertEqual(hex(0x20010658022ACAFE0200000000000001),
hex(self.ipv6))
# backwards compatibility
def testBackwardsCompability(self):
self.assertEqual(ipaddr.CollapseAddrList(
[ipaddr.IPNetwork('1.1.0.0/24'), ipaddr.IPNetwork('1.1.1.0/24')]),
[ipaddr.IPNetwork('1.1.0.0/23')])
self.assertEqual(ipaddr.IPNetwork('::42:0/112').AddressExclude(
ipaddr.IPNetwork('::42:8000/113')),
[ipaddr.IPNetwork('::42:0/113')])
self.assertTrue(ipaddr.IPNetwork('1::/8').CompareNetworks(
ipaddr.IPNetwork('2::/9')) < 0)
self.assertEqual(ipaddr.IPNetwork('1::/16').Contains(
ipaddr.IPNetwork('2::/16')), False)
self.assertEqual(ipaddr.IPNetwork('0.0.0.0/0').Subnet(),
[ipaddr.IPNetwork('0.0.0.0/1'),
ipaddr.IPNetwork('128.0.0.0/1')])
self.assertEqual(ipaddr.IPNetwork('::/127').Subnet(),
[ipaddr.IPNetwork('::/128'),
ipaddr.IPNetwork('::1/128')])
self.assertEqual(ipaddr.IPNetwork('1.0.0.0/32').Supernet(),
ipaddr.IPNetwork('1.0.0.0/31'))
self.assertEqual(ipaddr.IPNetwork('::/121').Supernet(),
ipaddr.IPNetwork('::/120'))
self.assertEqual(ipaddr.IPNetwork('10.0.0.02').IsRFC1918(), True)
self.assertEqual(ipaddr.IPNetwork('10.0.0.0').IsMulticast(), False)
self.assertEqual(ipaddr.IPNetwork('127.255.255.255').IsLoopback(), True)
self.assertEqual(ipaddr.IPNetwork('169.255.255.255').IsLinkLocal(),
False)
def testForceVersion(self):
self.assertEqual(ipaddr.IPNetwork(1).version, 4)
self.assertEqual(ipaddr.IPNetwork(1, version=6).version, 6)
def testWithStar(self):
self.assertEqual(str(self.ipv4.with_prefixlen), "1.2.3.4/24")
self.assertEqual(str(self.ipv4.with_netmask), "1.2.3.4/255.255.255.0")
self.assertEqual(str(self.ipv4.with_hostmask), "1.2.3.4/0.0.0.255")
self.assertEqual(str(self.ipv6.with_prefixlen),
'2001:658:22a:cafe:200::1/64')
# rfc3513 sec 2.3 says that ipv6 only uses cidr notation for
# subnets
self.assertEqual(str(self.ipv6.with_netmask),
'2001:658:22a:cafe:200::1/64')
# this probably don't make much sense, but it's included for
# compatability with ipv4
self.assertEqual(str(self.ipv6.with_hostmask),
'2001:658:22a:cafe:200::1/::ffff:ffff:ffff:ffff')
def testNetworkElementCaching(self):
# V4 - make sure we're empty
self.assertFalse(self.ipv4._cache.has_key('network'))
self.assertFalse(self.ipv4._cache.has_key('broadcast'))
self.assertFalse(self.ipv4._cache.has_key('hostmask'))
# V4 - populate and test
self.assertEqual(self.ipv4.network, ipaddr.IPv4Address('1.2.3.0'))
self.assertEqual(self.ipv4.broadcast, ipaddr.IPv4Address('1.2.3.255'))
self.assertEqual(self.ipv4.hostmask, ipaddr.IPv4Address('0.0.0.255'))
# V4 - check we're cached
self.assertTrue(self.ipv4._cache.has_key('network'))
self.assertTrue(self.ipv4._cache.has_key('broadcast'))
self.assertTrue(self.ipv4._cache.has_key('hostmask'))
# V6 - make sure we're empty
self.assertFalse(self.ipv6._cache.has_key('network'))
self.assertFalse(self.ipv6._cache.has_key('broadcast'))
self.assertFalse(self.ipv6._cache.has_key('hostmask'))
# V6 - populate and test
self.assertEqual(self.ipv6.network,
ipaddr.IPv6Address('2001:658:22a:cafe::'))
self.assertEqual(self.ipv6.broadcast, ipaddr.IPv6Address(
'2001:658:22a:cafe:ffff:ffff:ffff:ffff'))
self.assertEqual(self.ipv6.hostmask,
ipaddr.IPv6Address('::ffff:ffff:ffff:ffff'))
# V6 - check we're cached
self.assertTrue(self.ipv6._cache.has_key('network'))
self.assertTrue(self.ipv6._cache.has_key('broadcast'))
self.assertTrue(self.ipv6._cache.has_key('hostmask'))
def testIsValidIp(self):
ip = ipaddr.IPv6Address('::')
self.assertTrue(ip._is_valid_ip('2001:658:22a:cafe:200::1'))
self.assertTrue(ip._is_valid_ip('::ffff:10.10.0.0'))
self.assertTrue(ip._is_valid_ip('::ffff:192.168.0.0'))
self.assertFalse(ip._is_valid_ip('2001:658:22a::::1'))
self.assertFalse(ip._is_valid_ip(':658:22a:cafe:200::1'))
self.assertFalse(ip._is_valid_ip('2001:658:22a:cafe:200:'))
self.assertFalse(ip._is_valid_ip('2001:658:22a:cafe:200:127.0.0.1::1'))
self.assertFalse(ip._is_valid_ip('2001:658:22a:cafe:200::127.0.1'))
self.assertFalse(ip._is_valid_ip('2001:658:22a:zzzz:200::1'))
self.assertFalse(ip._is_valid_ip('2001:658:22a:cafe1:200::1'))
if __name__ == '__main__':
unittest.main()
|
nouiz/fredericbastien-ipaddr-py-speed-up
|
tags/2.1.4/ipaddr_test.py
|
Python
|
apache-2.0
| 46,220
|
[
"FEFF"
] |
83f67b0b1cd7abb7dd680733d4dd78cfa0d2eb0896aec30dd0d1433e855e5e2d
|
import numpy as np
from .._common import in_search_space
from .._helpers import SampleResult, register
__all__ = [
"sample",
]
def sample(
fun,
bounds,
x0=None,
args=(),
maxiter=100,
stepsize=0.1,
perc=1.0,
seed=None,
constraints=None,
return_all=True,
callback=None,
):
"""
Sample the variable space using the Metropolis-Hastings algorithm.
Parameters
----------
fun : callable
The objective function to be sampled. Must be in the form ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array and args is a tuple of any additional fixed parameters needed to completely specify the function.
bounds : array_like
Bounds for variables. ``(min, max)`` pairs for each element in ``x``, defining the finite lower and upper bounds for the sampling argument of ``fun``. It is required to have ``len(bounds) == len(x)``. ``len(bounds)`` is used to determine the number of parameters in ``x``.
x0 : array_like or None, optional, default None
Initial sample. Array of real elements of size (``ndim``,), where ``ndim`` is the number of independent variables.
args : tuple, optional, default None
Extra arguments passed to the objective function.
maxiter : int, optional, default 100
Total number of samples to generate.
stepsize : scalar or array_like, optional, default 0.1
Standard deviation of Gaussian perturbation (as a fraction of feasible space defined by ``bounds``).
perc : scalar, optional, default 1.0
Number of dimensions to perturb at each iteration (as a fraction of total number of variables).
seed : int or None, optional, default None
Seed for random number generator.
constraints : str or None, optional, default None
Constraints definition:
- None: no constraint
- 'Reject': infeasible solutions are always rejected
return_all : bool, optional, default True
Set to True to return an array with shape (``maxiter``, ``ndim``) of all the samples.
callback : callable or None, optional, default None
Called after each iteration. It is a callable with the signature ``callback(xk, SampleResult state)``, where ``xk`` is the current population and ``state`` is a :class:`stochopy.sample.SampleResult` object with the same fields as the ones from the return.
Returns
-------
:class:`stochopy.sample.SampleResult`
The sampling result represented as a :class:`stochopy.sample.SampleResult`. Important attributes are:
- ``x``: the best sample array
- ``fun``: the best sample function value
- ``xall``: the samples array
- '`funall``: the samples' function value array
"""
# Cost function
if not hasattr(fun, "__call__"):
raise TypeError()
# Dimensionality and search space
if np.ndim(bounds) != 2:
raise ValueError()
ndim = len(bounds)
lower, upper = np.transpose(bounds)
# Initial guess x0
if x0 is not None and len(x0) != ndim:
raise ValueError()
# Step size
if np.ndim(stepsize) == 0:
stepsize = np.full(ndim, stepsize)
if len(stepsize) != ndim:
raise ValueError()
stepsize *= 0.5 * (upper - lower)
# Number of dimensions to perturb per iteration
if not 0.0 <= perc <= 1.0:
raise ValueError()
ndim_per_iter = max(1, int(perc * ndim))
# Seed
if seed is not None:
np.random.seed(seed)
# Callback
if callback is not None and not hasattr(callback, "__call__"):
raise ValueError()
# Initialize arrays
xall = np.empty((maxiter, ndim))
funall = np.empty(maxiter)
xall[0] = x0 if x0 is not None else np.random.uniform(lower, upper)
funall[0] = fun(xall[0], *args)
# First iteration for callback
if callback is not None:
res = SampleResult(x=xall[0], fun=funall[0], nit=1, accept_ratio=1.0)
if return_all:
res.update({"xall": xall[:1], "funall": funall[:1]})
callback(xall[0], res)
# Metropolis-Hastings algorithm
i = 1
n_accepted = 0
imin, fmin = 0, np.Inf
while i < maxiter:
for j in np.arange(0, ndim, ndim_per_iter):
jmax = min(ndim, j + ndim_per_iter - 1)
perturbation = np.random.randn(jmax - j + 1) * stepsize[j : jmax + 1]
xall[i] = xall[i - 1].copy()
xall[i, j : jmax + 1] += perturbation
accept = False
if in_search_space(xall[i], lower, upper, constraints):
funall[i] = fun(xall[i], *args)
log_alpha = min(0.0, funall[i - 1] - funall[i])
accept = log_alpha > np.log(np.random.rand())
if accept:
n_accepted += 1
if funall[i] < fmin:
imin, fmin = i, funall[i]
else:
xall[i] = xall[i - 1]
funall[i] = funall[i - 1]
i += 1
if callback is not None:
res = SampleResult(
x=xall[imin], fun=funall[imin], nit=i, accept_ratio=n_accepted / i,
)
if return_all:
res.update({"xall": xall[: i - 1], "funall": funall[: i - 1]})
callback(xall[i - 1], res)
if i == maxiter:
break
res = SampleResult(
x=xall[imin], fun=fmin, nit=maxiter, accept_ratio=n_accepted / maxiter,
)
if return_all:
res.update({"xall": xall, "funall": funall})
return res
register("mcmc", sample)
|
keurfonluu/StochOPy
|
stochopy/sample/mcmc/_mcmc.py
|
Python
|
mit
| 5,627
|
[
"Gaussian"
] |
f22810b275734e8de972a7a2665eb254638aac36cf967a8a4e897189ef1433bc
|
import sys
from time import time
import pandas as pd
from pandas import DataFrame
import numpy as np
import matplotlib.pyplot as plt
import itertools
import matplotlib as mpl
from scipy import linalg
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
from sklearn import mixture
np.random.seed(42)
###Get command line arguments
clusterType = sys.argv[1] #Clustering algorithm
fileID = sys.argv[2]; #fileID
set = sys.argv[3]; #Set
numSpeakers = sys.argv[4]; #Number of Speakers
blockLength = sys.argv[5]; #Block length
hopLength = sys.argv[6]; #Hop length
thresholdOrder = sys.argv[7] #Adaptive Threshold order
extraid = int(sys.argv[8]); #extraid
gmm_co_var_type = sys.argv[9]; #'full' or 'tied'
###Prepare output file path
outputRoot = "/Users/avrosh/Documents/Coursework/7100_Spring_16/Dataset/dataset/"+set+"/"+"set"+set+"_S"+numSpeakers+"_"+hopLength+"_"+blockLength+"_"+fileID+"_"+thresholdOrder
if extraid != 0:
outputRoot = outputRoot + "_" + str(extraid)
outputRoot = outputRoot + "_" + clusterType + ".csv"
# print outputRoot
txtResultFile = open(outputRoot, "w")
###Prepare input file path
path = "/Users/avrosh/Documents/Coursework/7100_Spring_16/Dataset/dataset/"+set+"/features/set"+set+"_"+hopLength+"_"+blockLength+"_S"+numSpeakers+"_"+fileID+"_"+thresholdOrder
if extraid != 0:
path = path + "_" + str(extraid)
path = path + ".csv"
#print path
f = open(path)
f.readline()
###Read data
data = np.loadtxt(fname = f, delimiter=',')
all_labels = data[:,0]
labels = all_labels[all_labels != 0]
#labels = data[:,0]
#print labels
#normalize data
#features = scale(data[:,1:])
features = data[data[:,0] != 0]
features = scale(features[:,1:])
unscaled_features = features[:,1:]
#features = data[:,1:]
#print features
n_samples, n_features = features.shape
n_speakers = len(np.unique(labels))
speaker_ids = np.unique(labels)
print speaker_ids
print ("n_speakers %d \nn_samples %d \nn_features %d" % (n_speakers,n_samples,n_features))
sample_size = 300
print(79 * '_')
###Method
def visualize_gmm(data,gmm):
##Visualize data
reduced_data = PCA(n_components=2).fit_transform(data)
gmm.fit(reduced_data)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm','k'])
for speaker in speaker_ids:
speaker_labels = np.argwhere(labels==speaker)
plt.scatter(reduced_data[speaker_labels,0],
reduced_data[speaker_labels,1],
color=next(color_iter))
for i, (clf, title) in enumerate([(gmm, 'GMM')]):
splot = plt.subplot(1, 1, 1 + i)
Y_ = clf.predict(reduced_data)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
# if not np.any(Y_ == i):
# continue
# plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# print X[Y_ == i, 0]
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-6, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
###Method
def visualize_kmeans(data):
########################################################################
#Visualize data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++',n_clusters=n_speakers,n_init=10)
kmeans.fit(reduced_data)
#step size of mesh
h = .02
#Plot the decision boundary
x_min, x_max = reduced_data[:,0].min() - 1, reduced_data[:,0].max() + 1
y_min, y_max = reduced_data[:,1].min() - 1, reduced_data[:,1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
#Obtain labels for each point in mesh
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
#Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
#Colour Cycler
colorcycler = itertools.cycle(['r', 'g', 'b', 'y','c','k','w','m'])
for speaker in speaker_ids:
speaker_labels = np.argwhere(labels==speaker)
# for every_speaker in speaker_labels:
# j = j + 1
# txtResultFile.write("{0},{1}".format(np.int_(speaker),np.int_(every_speaker)))
# if i==len(speaker_ids):
# if j<len(speaker_labels):
# txtResultFile.write(",")
# else:
# txtResultFile.write(",")
plt.scatter(reduced_data[speaker_labels,0],
reduced_data[speaker_labels,1],
color=next(colorcycler))
#plt.plot(reduced_data[:,0], reduced_data[:,1], 'k.',markersize=2)
#plt.plot(reduced_data[:,0],reduced_data[:,1],'g^', reduced_data[:,0])
#plot the centroids as white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:,0],centroids[:,1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the speakers (PCA-reduced data)')
plt.xlim(x_min,x_max)
plt.ylim(y_min,y_max)
plt.xticks(())
plt.yticks(())
plt.show()
###Method
def cluster(estimator, name, data):
t0 = time()
estimator.fit(data)
estimated_labels = estimator.predict(data)
homogeneity_score = metrics.homogeneity_score(labels,estimated_labels)
completeness_score = metrics.completeness_score(labels, estimated_labels)
v_measure_score = metrics.v_measure_score(labels, estimated_labels)
adjusted_rand_score = metrics.adjusted_rand_score(labels, estimated_labels)
adjusted_mutual_info_score = metrics.adjusted_mutual_info_score(labels, estimated_labels)
# silhouette_score = metrics.silhouette_score(features, estimated_labels,
# metric='euclidean',
# sample_size=sample_size)
i=0
j=0
for label in all_labels:
i = i + 1;
txtResultFile.write("{0}".format(label))
txtResultFile.write(",")
if label == 0:
txtResultFile.write("{0}".format(-1))
else:
txtResultFile.write("{0}".format(estimated_labels[j]))
j = j + 1
if i<len(all_labels):
txtResultFile.write("\n")
print('Name: % 9s \n'
'Time: %.2fs \n'
'Homogeneity Score: %.3f \n'
'Completeness Score: %.3f \n'
'V Measure score: %.3f \n'
'Adjusted rand score: %.3f \n'
'Adjusted Mutual Info score: %.3f \n'
% (name, (time()-t0),
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_rand_score,
adjusted_mutual_info_score))
print(79 * '_')
#KMeans
if (clusterType == "kmeans"):
cluster(KMeans(init='k-means++', n_clusters=n_speakers, n_init=10),
name='k-means++',
data=features)
visualize_kmeans(features)
##KMeans with random initialization
if (clusterType == "kmeans-rand"):
cluster(KMeans(init='random', n_clusters=n_speakers, n_init=10),
name='Random',
data=features)
visualize_kmeans(features)
#
##KMeans PCA
#in this case the seeding of the centers in deterministic, hence we run the algorithm only once
if (clusterType == "kmeans-pca"):
pca = PCA(n_components=n_speakers).fit(features)
cluster(KMeans(init=pca.components_, n_clusters=n_speakers, n_init=1),
name='PCA-based',
data=features)
visualize_kmeans(features)
##GMM
# Fit a mixture of Gaussians with EM using five components
if (clusterType == "gmm"):
gmm = mixture.GMM(n_components=n_speakers-1, covariance_type=gmm_co_var_type)
cluster(gmm,
name='gmm',
data=features)
visualize_gmm(features,gmm)
##GMM-PCA
# Fit a mixture of Gaussians with EM using five components
if (clusterType == "gmm-pca"):
reduced_data = PCA(n_components=10).fit_transform(unscaled_features)
reduced_data = scale(reduced_data)
gmm = mixture.GMM(n_components=n_speakers, covariance_type=gmm_co_var_type)
cluster(gmm,
name='gmm-pca',
data=reduced_data)
visualize_gmm(reduced_data,gmm)
###Close output file
txtResultFile.close()
|
avroshk/7100_Spring_16
|
Dataset/create/cluster.py
|
Python
|
mit
| 9,309
|
[
"Gaussian"
] |
ae2484527cf78f0a3854d0f3290da8181e36e3dd2fa4e7e873300d013c4cecb3
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Opencv(CMakePackage):
"""OpenCV is released under a BSD license and hence it's free for both
academic and commercial use. It has C++, C, Python and Java interfaces and
supports Windows, Linux, Mac OS, iOS and Android. OpenCV was designed for
computational efficiency and with a strong focus on real-time applications.
Written in optimized C/C++, the library can take advantage of multi-core
processing. Enabled with OpenCL, it can take advantage of the hardware
acceleration of the underlying heterogeneous compute platform. Adopted all
around the world, OpenCV has more than 47 thousand people of user community
and estimated number of downloads exceeding 9 million. Usage ranges from
interactive art, to mines inspection, stitching maps on the web or through
advanced robotics.
"""
homepage = 'http://opencv.org/'
url = 'https://github.com/Itseez/opencv/archive/3.1.0.tar.gz'
version('master', git="https://github.com/opencv/opencv.git", branch="master")
version('3.4.1', 'a0b7a47899e67b3490ea31edc4f6e8e6')
version('3.4.0', '170732dc760e5f7ddeccbe53ba5d16a6')
version('3.3.1', 'b1ed9aea030bb5bd9df28524d97de84c')
version('3.3.0', '98a4e4c6f23ec725e808a891dc11eec4')
version('3.2.0', '1ea44a1d98c126ad40079d8eb914a72e')
version('3.1.0', 'a0669e22172dfc3225835b180744c9f0')
version('2.4.13.2', 'fe52791ce523681a67036def4c25261b')
version('2.4.13.1', 'f6d354500d5013e60dc0fc44b07a63d1')
version('2.4.13', '8feb45a71adad89b8017a777477c3eff')
version('2.4.12.3', '2496a4a4caf8fecfbfc294fbe6a814b0')
version('2.4.12.2', 'bc0c60c2ea1cf4078deef99569912fc7')
version('2.4.12.1', '7192f51434710904b5e3594872b897c3')
# Standard variants
variant('shared', default=True,
description='Enables the build of shared libraries')
variant('lapack', default=True, description='Include Lapack library support')
variant('powerpc', default=False, description='Enable PowerPC for GCC')
variant('vsx', default=False, description='Enable POWER8 and above VSX (64-bit little-endian)')
variant('fast-math', default=False,
description='Enable -ffast-math (not recommended for GCC 4.6.x)')
# OpenCV modules
variant('calib3d', default=True, description='calib3d module')
variant('core', default=True, description='Include opencv_core module into the OpenCV build')
variant('dnn', default=True, description='Build DNN support')
variant('features2d', default=True, description='features2d module')
variant('flann', default=True, description='flann module')
variant('highgui', default=True, description='Include opencv_highgui module into the OpenCV build')
variant('imgproc', default=True, description='Include opencv_imgproc module into the OpenCV build')
variant('java', default=True,
description='Activates support for Java')
variant('ml', default=True, description='Build ML support')
variant('python', default=True,
description='Enables the build of Python extensions')
variant('stitching', default=True, description='stitching module')
variant('superres', default=True, description='superres module')
variant('ts', default=True, description='Include opencv_ts module into the OpenCV build')
variant('video', default=True, description='video module')
variant('videostab', default=True, description='videostab module')
variant('videoio', default=True, description='videoio module')
# Optional 3rd party components
variant('cuda', default=True, description='Activates support for CUDA')
variant('eigen', default=True, description='Activates support for eigen')
variant('ipp', default=True, description='Activates support for IPP')
variant('ipp_iw', default=True, description='Build IPP IW from source')
variant('jasper', default=True, description='Activates support for JasPer')
variant('jpeg', default=True, description='Include JPEG support')
variant('opencl', default=True, description='Include OpenCL Runtime support')
variant('opencl_svm', default=True, description='Include OpenCL Shared Virtual Memory support')
variant('openclamdfft', default=True, description='Include OpenCL AMD OpenCL FFT library support')
variant('openclamdblas', default=True, description='Include OpenCL AMD OpenCL BLAS library support')
variant('openmp', default=True, description='Activates support for OpenMP threads')
variant('pthreads_pf', default=True, description='Use pthreads-based parallel_for')
variant('png', default=True, description='Include PNG support')
variant('qt', default=False, description='Activates support for QT')
variant('gtk', default=True, description='Activates support for GTK')
variant('tiff', default=True, description='Include TIFF support')
variant('vtk', default=True, description='Activates support for VTK')
variant('zlib', default=True, description='Build zlib from source')
# Patch to fix conflict between CUDA and OpenCV (reproduced with 3.3.0
# and 3.4.1) header file that have the same name.Problem is fixed in
# the current development branch of OpenCV. See #8461 for more information.
patch('dnn_cuda.patch', when='@3.3.0:3.4.1+cuda+dnn')
depends_on('eigen~mpfr', when='+eigen', type='build')
depends_on('zlib', when='+zlib')
depends_on('libpng', when='+png')
depends_on('jpeg', when='+jpeg')
depends_on('libtiff', when='+tiff')
depends_on('jasper', when='+jasper')
depends_on('cuda', when='+cuda')
depends_on('gtkplus', when='+gtk')
depends_on('vtk', when='+vtk')
depends_on('qt', when='+qt')
depends_on('java', when='+java')
depends_on('py-numpy', when='+python', type=('build', 'run'))
depends_on('protobuf@3.1.0', when='@3.3.0: +dnn')
depends_on('ffmpeg', when='+videoio')
depends_on('mpi', when='+videoio')
extends('python', when='+python')
def cmake_args(self):
spec = self.spec
# Standard variants
args = [
'-DBUILD_SHARED_LIBS:BOOL={0}'.format((
'ON' if '+shared' in spec else 'OFF')),
'-DENABLE_PRECOMPILED_HEADERS:BOOL=OFF',
'-DWITH_LAPACK={0}'.format((
'ON' if '+lapack' in spec else 'OFF')),
'-DENABLE_POWERPC={0}'.format((
'ON' if '+powerpc' in spec else 'OFF')),
'-DENABLE_VSX={0}'.format((
'ON' if '+vsx' in spec else 'OFF')),
'-DENABLE_FAST_MATH={0}'.format((
'ON' if '+fast-math' in spec else 'OFF')),
]
# modules
args.extend([
'-DBUILD_opencv_calib3d={0}'.format((
'ON' if '+calib3d' in spec else 'OFF')),
'-DBUILD_opencv_core:BOOL={0}'.format((
'ON' if '+core' in spec else 'OFF')),
'-DBUILD_opencv_dnn:BOOL={0}'.format((
'ON' if '+dnn' in spec else 'OFF')),
'-DBUILD_opencv_features2d={0}'.format((
'ON' if '+features2d' in spec else 'OFF')),
'-DBUILD_opencv_flann={0}'.format((
'ON' if '+flann' in spec else 'OFF')),
'-DBUILD_opencv_highgui:BOOL={0}'.format((
'ON' if '+highgui' in spec else 'OFF')),
'-DBUILD_opencv_imgproc:BOOL={0}'.format((
'ON' if '+imgproc' in spec else 'OFF')),
'-DBUILD_opencv_java:BOOL={0}'.format((
'ON' if '+java' in spec else 'OFF')),
'-DBUILD_opencv_ml={0}'.format((
'ON' if '+ml' in spec else 'OFF')),
'-DBUILD_opencv_stitching={0}'.format((
'ON' if '+stitching' in spec else 'OFF')),
'-DBUILD_opencv_superres={0}'.format((
'ON' if '+superres' in spec else 'OFF')),
'-DBUILD_opencv_ts={0}'.format((
'ON' if '+ts' in spec else 'OFF')),
'-DBUILD_opencv_video={0}'.format((
'ON' if '+video' in spec else 'OFF')),
'-DBUILD_opencv_videostab={0}'.format((
'ON' if '+videostab' in spec else 'OFF')),
'-DBUILD_opencv_videoio={0}'.format((
'ON' if '+videoio' in spec else 'OFF')),
])
# 3rd party components
args.extend([
'-DBUILD_IPP_IW:BOOL={0}'.format((
'ON' if '+ipp_iw' in spec else 'OFF')),
'-DWITH_CUDA:BOOL={0}'.format((
'ON' if '+cuda' in spec else 'OFF')),
'-DWITH_EIGEN:BOOL={0}'.format((
'ON' if '+eigen' in spec else 'OFF')),
'-DWITH_IPP:BOOL={0}'.format((
'ON' if '+ipp' in spec else 'OFF')),
'-DWITH_JASPER:BOOL={0}'.format((
'ON' if '+jasper' in spec else 'OFF')),
'-DWITH_JPEG:BOOL={0}'.format((
'ON' if '+jpeg' in spec else 'OFF')),
'-DWITH_OPENCL:BOOL={0}'.format((
'ON' if '+opencl' in spec else 'OFF')),
'-DWITH_OPENCL_SVM:BOOL={0}'.format((
'ON' if '+opencl_svm' in spec else 'OFF')),
'-DWITH_OPENCLAMDFFT:BOOL={0}'.format((
'ON' if '+openclamdfft' in spec else 'OFF')),
'-DWITH_OPENCLAMDBLAS:BOOL={0}'.format((
'ON' if '+openclamdblas' in spec else 'OFF')),
'-DWITH_OPENMP:BOOL={0}'.format((
'ON' if '+openmp' in spec else 'OFF')),
'-DWITH_PTHREADS_PF:BOOL={0}'.format((
'ON' if '+pthreads_pf' in spec else 'OFF')),
'-DWITH_PNG:BOOL={0}'.format((
'ON' if '+png' in spec else 'OFF')),
'-DWITH_QT:BOOL={0}'.format((
'ON' if '+qt' in spec else 'OFF')),
'-DWITH_TIFF:BOOL={0}'.format((
'ON' if '+tiff' in spec else 'OFF')),
'-DWITH_VTK:BOOL={0}'.format((
'ON' if '+vtk' in spec else 'OFF')),
])
# Media I/O
if '+zlib' in spec:
zlib = spec['zlib']
args.extend([
'-DZLIB_LIBRARY_{0}:FILEPATH={1}'.format((
'DEBUG' if 'build_type=Debug' in spec else 'RELEASE'),
zlib.libs[0]),
'-DZLIB_INCLUDE_DIR:PATH={0}'.format(
zlib.headers.directories[0])
])
if '+png' in spec:
libpng = spec['libpng']
args.extend([
'-DPNG_LIBRARY_{0}:FILEPATH={1}'.format((
'DEBUG' if 'build_type=Debug' in spec else 'RELEASE'),
libpng.libs[0]),
'-DPNG_INCLUDE_DIR:PATH={0}'.format(
libpng.headers.directories[0])
])
if '+jpeg' in spec:
libjpeg = spec['jpeg']
args.extend([
'-DBUILD_JPEG:BOOL=OFF',
'-DJPEG_LIBRARY:FILEPATH={0}'.format(libjpeg.libs[0]),
'-DJPEG_INCLUDE_DIR:PATH={0}'.format(
libjpeg.headers.directories[0])
])
if '+tiff' in spec:
libtiff = spec['libtiff']
args.extend([
'-DTIFF_LIBRARY_{0}:FILEPATH={1}'.format((
'DEBUG' if 'build_type=Debug' in spec else 'RELEASE'),
libtiff.libs[0]),
'-DTIFF_INCLUDE_DIR:PATH={0}'.format(
libtiff.headers.directories[0])
])
if '+jasper' in spec:
jasper = spec['jasper']
args.extend([
'-DJASPER_LIBRARY_{0}:FILEPATH={1}'.format((
'DEBUG' if 'build_type=Debug' in spec else 'RELEASE'),
jasper.libs[0]),
'-DJASPER_INCLUDE_DIR:PATH={0}'.format(
jasper.headers.directories[0])
])
# GUI
if '+gtk' not in spec:
args.extend([
'-DWITH_GTK:BOOL=OFF',
'-DWITH_GTK_2_X:BOOL=OFF'
])
elif '^gtkplus@3:' in spec:
args.extend([
'-DWITH_GTK:BOOL=ON',
'-DWITH_GTK_2_X:BOOL=OFF'
])
elif '^gtkplus@2:3' in spec:
args.extend([
'-DWITH_GTK:BOOL=OFF',
'-DWITH_GTK_2_X:BOOL=ON'
])
# Python
if '+python' in spec:
python_exe = spec['python'].command.path
python_lib = spec['python'].libs[0]
python_include_dir = spec['python'].headers.directories[0]
if '^python@3:' in spec:
args.extend([
'-DBUILD_opencv_python3=ON',
'-DPYTHON3_EXECUTABLE={0}'.format(python_exe),
'-DPYTHON3_LIBRARY={0}'.format(python_lib),
'-DPYTHON3_INCLUDE_DIR={0}'.format(python_include_dir),
'-DBUILD_opencv_python2=OFF',
])
elif '^python@2:3' in spec:
args.extend([
'-DBUILD_opencv_python2=ON',
'-DPYTHON2_EXECUTABLE={0}'.format(python_exe),
'-DPYTHON2_LIBRARY={0}'.format(python_lib),
'-DPYTHON2_INCLUDE_DIR={0}'.format(python_include_dir),
'-DBUILD_opencv_python3=OFF',
])
else:
args.extend([
'-DBUILD_opencv_python2=OFF',
'-DBUILD_opencv_python3=OFF'
])
return args
@property
def libs(self):
shared = "+shared" in self.spec
return find_libraries(
"libopencv_*", root=self.prefix, shared=shared, recursive=True
)
|
matthiasdiener/spack
|
var/spack/repos/builtin/packages/opencv/package.py
|
Python
|
lgpl-2.1
| 14,977
|
[
"VTK"
] |
55907b4da11759748f701c3ddd9c3b1819a0ac07e31c630347766ff8856c892c
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
from oslo_policy import policy as oslo_policy
from oslo_serialization import jsonutils
import nova.conf
from nova.conf import paths
from nova import policies
import nova.policy
from nova.tests.unit import fake_policy
CONF = nova.conf.CONF
class RealPolicyFixture(fixtures.Fixture):
"""Load the live policy for tests.
A base policy fixture that starts with the assumption that you'd
like to load and enforce the shipped default policy in tests.
Provides interfaces to tinker with both the contents and location
of the policy file before loading to allow overrides. To do this
implement ``_prepare_policy`` in the subclass, and adjust the
``policy_file`` accordingly.
"""
def _prepare_policy(self):
"""Allow changing of the policy before we get started"""
pass
def setUp(self):
super(RealPolicyFixture, self).setUp()
# policy_file can be overridden by subclasses
self.policy_file = paths.state_path_def('etc/nova/policy.json')
self._prepare_policy()
CONF.set_override('policy_file', self.policy_file, group='oslo_policy')
nova.policy.reset()
nova.policy.init()
self.addCleanup(nova.policy.reset)
def set_rules(self, rules, overwrite=True):
policy = nova.policy._ENFORCER
policy.set_rules(oslo_policy.Rules.from_dict(rules),
overwrite=overwrite)
def add_missing_default_rules(self, rules):
"""Adds default rules and their values to the given rules dict.
The given rulen dict may have an incomplete set of policy rules.
This method will add the default policy rules and their values to
the dict. It will not override the existing rules.
"""
for rule in policies.list_rules():
if rule.name not in rules:
rules[rule.name] = rule.check_str
class PolicyFixture(RealPolicyFixture):
"""Load a fake policy from nova.tests.unit.fake_policy
This overrides the policy with a completely fake and synthetic
policy file.
NOTE(sdague): the use of this is deprecated, and we should unwind
the tests so that they can function with the real policy. This is
mostly legacy because our default test instances and default test
contexts don't match up. It appears that in many cases fake_policy
was just modified to whatever makes tests pass, which makes it
dangerous to be used in tree. Long term a NullPolicy fixture might
be better in those cases.
"""
def _prepare_policy(self):
self.policy_dir = self.useFixture(fixtures.TempDir())
self.policy_file = os.path.join(self.policy_dir.path,
'policy.json')
# load the fake_policy data and add the missing default rules.
policy_rules = jsonutils.loads(fake_policy.policy_data)
self.add_missing_default_rules(policy_rules)
with open(self.policy_file, 'w') as f:
jsonutils.dump(policy_rules, f)
CONF.set_override('policy_dirs', [], group='oslo_policy')
class RoleBasedPolicyFixture(RealPolicyFixture):
"""Load a modified policy which allows all actions only by a single role.
This fixture can be used for testing role based permissions as it
provides a version of the policy which stomps over all previous
declaration and makes every action only available to a single
role.
"""
def __init__(self, role="admin", *args, **kwargs):
super(RoleBasedPolicyFixture, self).__init__(*args, **kwargs)
self.role = role
def _prepare_policy(self):
# Convert all actions to require the specified role
policy = {}
for rule in policies.list_rules():
policy[rule.name] = 'role:%s' % self.role
self.policy_dir = self.useFixture(fixtures.TempDir())
self.policy_file = os.path.join(self.policy_dir.path, 'policy.json')
with open(self.policy_file, 'w') as f:
jsonutils.dump(policy, f)
|
jianghuaw/nova
|
nova/tests/unit/policy_fixture.py
|
Python
|
apache-2.0
| 4,663
|
[
"TINKER"
] |
3e396c9a3d677e66a0f84516568f70eed09664ef1240bbc5894114baa5b96923
|
#!/usr/bin/env python
"""Run signal-to-reference alignments
"""
from __future__ import print_function
import sys
from signalAlignLib import *
from multiprocessing import Process, Queue, current_process, Manager
from serviceCourse.file_handlers import FolderHandler
from argparse import ArgumentParser
from random import shuffle
def parse_args():
parser = ArgumentParser(description=__doc__)
# required arguments
parser.add_argument('--file_directory', '-d', action='store',
dest='files_dir', required=True, type=str, default=None,
help="directory with MinION fast5 reads to align")
parser.add_argument('--ref', '-r', action='store',
dest='ref', required=True, type=str,
help="reference sequence to align to, in FASTA")
parser.add_argument('--output_location', '-o', action='store', dest='out',
required=True, type=str, default=None,
help="directory to put the alignments")
# optional arguments
parser.add_argument("--2d", action='store_true', dest="twoD", default=False)
parser.add_argument("--bwt", action='store', dest="bwt", default=None)
parser.add_argument('--in_template_hmm', '-T', action='store', dest='in_T_Hmm',
required=False, type=str, default=None,
help="input HMM for template events, if you don't want the default")
parser.add_argument('--in_complement_hmm', '-C', action='store', dest='in_C_Hmm',
required=False, type=str, default=None,
help="input HMM for complement events, if you don't want the default")
parser.add_argument('--template_hdp', '-tH', action='store', dest='templateHDP', default=None,
help="template serialized HDP file")
parser.add_argument('--complement_hdp', '-cH', action='store', dest='complementHDP', default=None,
help="complement serialized HDP file")
parser.add_argument('--degenerate', '-x', action='store', dest='degenerate', default="variant",
help="Specify degenerate nucleotide options: "
"variant -> {ACGT}, cytosine2 -> {CE} cytosine3 -> {CEO} adenosine -> {AI}")
parser.add_argument('--stateMachineType', '-smt', action='store', dest='stateMachineType', type=str,
default="threeState", help="decide which model to use, threeState by default")
parser.add_argument('--file_of_files', '-fofn', action='store', dest='fofn', required=False, type=str, default=None,
help="text file containing absolute paths to files to use")
parser.add_argument('--threshold', '-t', action='store', dest='threshold', type=float, required=False,
default=None, help="posterior match probability threshold, Default: 0.01")
parser.add_argument('--diagonalExpansion', '-e', action='store', dest='diag_expansion', type=int,
required=False, default=None, help="number of diagonals to expand around each anchor")
parser.add_argument('--constraintTrim', '-m', action='store', dest='constraint_trim', type=int,
required=False, default=None, help='amount to remove from an anchor constraint')
parser.add_argument('--target_regions', '-q', action='store', dest='target_regions', type=str,
required=False, default=None, help="tab separated table with regions to align to")
parser.add_argument("--motif", action="store", dest="motif_key", default=None)
#parser.add_argument('--ambiguity_positions', '-p', action='store', required=False, default=None,
# dest='substitution_file', help="Ambiguity positions")
parser.add_argument('--jobs', '-j', action='store', dest='nb_jobs', required=False,
default=4, type=int, help="number of jobs to run in parallel")
parser.add_argument('--nb_files', '-n', action='store', dest='nb_files', required=False,
default=500, type=int, help="maximum number of reads to align")
parser.add_argument('--ambig_char', '-X', action='store', required=False, default="X", type=str, dest='ambig_char',
help="Character to substitute at positions, default is 'X'.")
parser.add_argument('--output_format', '-f', action='store', default="full", dest='outFmt',
help="output format: full, variantCaller, or assignments. Default: full")
parser.add_argument('--debug', action='store_true', dest="DEBUG", default=False)
args = parser.parse_args()
return args
def make_degenerate_reference_iterator(input_sequence, block_size=1, step=6):
"""
input_sequence: string, input nucleotide sequence
out_path: string, path to directory to put new sequences with substituted degenerate characters
block_size: not implemented
step: number of bases between degenerate characters
:return (subbed sequence, complement subbed sequence)
"""
complement_sequence = reverse_complement(dna=input_sequence, reverse=False, complement=True)
for s in xrange(0, step):
positions = xrange(s, len(input_sequence), step)
t_seq = list(input_sequence)
c_seq = list(complement_sequence)
for position in positions:
t_seq[position] = "X"
c_seq[position] = "X"
yield ''.join(t_seq), ''.join(c_seq)
def write_degenerate_reference_set(input_fasta, out_path):
# get the first sequence from the FASTA
seq = ""
for header, comment, sequence in read_fasta(input_fasta):
seq += sequence
break
for i, s in enumerate(make_degenerate_reference_iterator(input_sequence=seq)):
with open(out_path + "forward_sub{i}.txt".format(i=i), 'w') as f:
f.write("{seq}".format(seq=s[0]))
with open(out_path + "backward_sub{i}.txt".format(i=i), 'w') as f:
f.write("{seq}".format(seq=s[1]))
def aligner(work_queue, done_queue):
try:
for f in iter(work_queue.get, 'STOP'):
alignment = SignalAlignment(**f)
alignment.run()
except Exception, e:
done_queue.put("%s failed with %s" % (current_process().name, e.message))
def concat_variant_call_files(path):
concat_command = "cat {path}/*.tsv > {path}/probs.tsv".format(path=path)
os.system(concat_command)
return
def main(args):
# parse args
args = parse_args()
command_line = " ".join(sys.argv[:])
print("Command Line: {cmdLine}\n".format(cmdLine=command_line), file=sys.stderr)
start_message = """
# Starting Signal Align
# Aligning files from: {fileDir}
# Aligning to reference: {reference}
# Aligning maximum of {nbFiles} files
# Using model: {model}
# Using banding: True
# Aligning to regions in: {regions}
# Non-default template HMM: {inThmm}
# Non-default complement HMM: {inChmm}
# Template HDP: {tHdp}
# Complement HDP: {cHdp}
""".format(fileDir=args.files_dir, reference=args.ref, nbFiles=args.nb_files,
inThmm=args.in_T_Hmm, inChmm=args.in_C_Hmm, model=args.stateMachineType, regions=args.target_regions,
tHdp=args.templateHDP, cHdp=args.complementHDP)
print(start_message, file=sys.stdout)
if args.files_dir is None and args.fofn is None:
print("Need to provide directory with .fast5 files of fofn", file=sys.stderr)
sys.exit(1)
if not os.path.isfile(args.ref):
print("Did not find valid reference file, looked for it {here}".format(here=args.ref), file=sys.stderr)
sys.exit(1)
# make directory to put temporary files
temp_folder = FolderHandler()
temp_dir_path = temp_folder.open_folder(args.out + "tempFiles_alignment")
reference_map = process_reference_fasta(fasta=args.ref,
motif_key=args.motif_key,
work_folder=temp_folder,
sub_char=args.ambig_char)
# index the reference for bwa
if args.bwt is not None:
print("signalAlign - using provided BWT %s" % args.bwt)
bwa_ref_index = args.bwt
else:
print("signalAlign - indexing reference", file=sys.stderr)
bwa_ref_index = get_bwa_index(args.ref, temp_dir_path)
print("signalAlign - indexing reference, done", file=sys.stderr)
# parse the target regions, if provided
if args.target_regions is not None:
target_regions = TargetRegions(args.target_regions)
else:
target_regions = None
# setup workers for multiprocessing
workers = args.nb_jobs
work_queue = Manager().Queue()
done_queue = Manager().Queue()
jobs = []
# list of read files
if args.fofn is not None:
fast5s = [x for x in parse_fofn(args.fofn) if x.endswith(".fast5")]
else:
fast5s = [args.files_dir + x for x in os.listdir(args.files_dir) if x.endswith(".fast5")]
nb_files = args.nb_files
if nb_files < len(fast5s):
shuffle(fast5s)
fast5s = fast5s[:nb_files]
print("[runSignalAlign]:NOTICE: Got {} files to align".format(len(fast5s)), file=sys.stdout)
for fast5 in fast5s:
alignment_args = {
"reference_map": reference_map,
"path_to_EC_refs": None, # TODO refactor this out!
"destination": temp_dir_path,
"stateMachineType": args.stateMachineType,
"bwa_index": bwa_ref_index,
"in_templateHmm": args.in_T_Hmm,
"in_complementHmm": args.in_C_Hmm,
"in_templateHdp": args.templateHDP,
"in_complementHdp": args.complementHDP,
"output_format": args.outFmt,
"in_fast5": fast5,
"threshold": args.threshold,
"diagonal_expansion": args.diag_expansion,
"constraint_trim": args.constraint_trim,
"target_regions": target_regions,
"degenerate": degenerate_enum(args.degenerate),
"twoD_chemistry": args.twoD,
}
#alignment = SignalAlignment(**alignment_args)
#alignment.run()
work_queue.put(alignment_args)
for w in xrange(workers):
p = Process(target=aligner, args=(work_queue, done_queue))
p.start()
jobs.append(p)
work_queue.put('STOP')
for p in jobs:
p.join()
done_queue.put('STOP')
print("\n# signalAlign - finished alignments\n", file=sys.stderr)
print("\n# signalAlign - finished alignments\n", file=sys.stdout)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
ArtRand/signalAlign
|
scripts/runSignalAlign.py
|
Python
|
mit
| 10,710
|
[
"BWA"
] |
cf16e8df3f6191079f87bfa5c1d7e2d5dca83401c18014157e83f60888ca6b34
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 17 18:15:40 2016
@author: Radu
"""
import struct
from neuron import h
def get_net_params(tempdata_address):
text_file_location = tempdata_address + "cell_params"
result = []
text_file = open(text_file_location, 'rb')
value = text_file.read(8)
while value:
result.append(struct.unpack("d",value))
value = text_file.read(8)
#print result
#print len(result)
text_file.close()
return result
def get_tempdata_address(double_escaped = 0):
h('systype = unix_mac_pc()')
if h.systype == 3:
if double_escaped:
tempdata_address = '..\\\\..\\\\tempdata\\\\'
else:
tempdata_address = '..\\..\\tempdata\\'
else:
tempdata_address = "../tempdata/"
return tempdata_address
def get_mn_geom_address(double_escaped = 0):
h('systype = unix_mac_pc()')
if h.systype == 3:
if double_escaped:
mn_geom_address = '..\\\\mn_geometries\\\\'
else:
mn_geom_address = '..\\mn_geometries\\'
else:
mn_geom_address = "mn_geometries/"
return mn_geom_address
def get_comsol_voltage(tempdata_address):
text_file_location = tempdata_address + "matlab_v_extra"
result = []
text_file = open(text_file_location, 'rb')
value = text_file.read(8)
while value:
result.append(struct.unpack("d",value))
value = text_file.read(8)
text_file.close()
return result
|
penguinscontrol/Spinal-Cord-Modeling
|
Python/helper_functions.py
|
Python
|
gpl-2.0
| 1,497
|
[
"NEURON"
] |
d2ff23f43385ab178fd8d7fb650416f389418ce7e1120bb0ac2aa32700921898
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements a neutron diffraction (ND) pattern calculator.
"""
import json
import os
from math import asin, cos, degrees, pi, radians, sin
import numpy as np
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from .core import (
AbstractDiffractionPatternCalculator,
DiffractionPattern,
get_unique_families,
)
__author__ = "Yuta Suzuki"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Yuta Suzuki"
__email__ = "resnant@outlook.jp"
__date__ = "4/19/18"
with open(os.path.join(os.path.dirname(__file__), "neutron_scattering_length.json")) as f:
# This table was cited from "Neutron Data Booklet" 2nd ed (Old City 2003).
ATOMIC_SCATTERING_LEN = json.load(f)
class NDCalculator(AbstractDiffractionPatternCalculator):
"""
Computes the powder neutron diffraction pattern of a crystal structure.
This code is a slight modification of XRDCalculator in
pymatgen.analysis.diffraction.xrd. See it for details of the algorithm.
Main changes by using neutron instead of X-ray are as follows:
1. Atomic scattering length is a constant.
2. Polarization correction term of Lorentz factor is unnecessary.
Reference:
Marc De Graef and Michael E. McHenry, Structure of Materials 2nd ed,
Chapter13, Cambridge University Press 2003.
"""
def __init__(self, wavelength=1.54184, symprec=0, debye_waller_factors=None):
"""
Initializes the ND calculator with a given radiation.
Args:
wavelength (float): The wavelength of neutron in angstroms.
Defaults to 1.54, corresponds to Cu K_alpha x-ray radiation.
symprec (float): Symmetry precision for structure refinement. If
set to 0, no refinement is done. Otherwise, refinement is
performed using spglib with provided precision.
debye_waller_factors ({element symbol: float}): Allows the
specification of Debye-Waller factors. Note that these
factors are temperature dependent.
"""
self.wavelength = wavelength
self.symprec = symprec
self.debye_waller_factors = debye_waller_factors or {}
def get_pattern(self, structure, scaled=True, two_theta_range=(0, 90)):
"""
Calculates the powder neutron diffraction pattern for a structure.
Args:
structure (Structure): Input structure
scaled (bool): Whether to return scaled intensities. The maximum
peak is set to a value of 100. Defaults to True. Use False if
you need the absolute values to combine ND plots.
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
Returns:
(NDPattern)
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
wavelength = self.wavelength
latt = structure.lattice
is_hex = latt.is_hexagonal()
# Obtained from Bragg condition. Note that reciprocal lattice
# vector length is 1 / d_hkl.
min_r, max_r = (
(0, 2 / wavelength)
if two_theta_range is None
else [2 * sin(radians(t / 2)) / wavelength for t in two_theta_range]
)
# Obtain crystallographic reciprocal lattice points within range
recip_latt = latt.reciprocal_lattice_crystallographic
recip_pts = recip_latt.get_points_in_sphere([[0, 0, 0]], [0, 0, 0], max_r)
if min_r:
recip_pts = [pt for pt in recip_pts if pt[1] >= min_r]
# Create a flattened array of coeffs, fcoords and occus. This is
# used to perform vectorized computation of atomic scattering factors
# later. Note that these are not necessarily the same size as the
# structure as each partially occupied specie occupies its own
# position in the flattened array.
coeffs = []
fcoords = []
occus = []
dwfactors = []
for site in structure:
for sp, occu in site.species.items():
try:
c = ATOMIC_SCATTERING_LEN[sp.symbol]
except KeyError:
raise ValueError(
"Unable to calculate ND pattern as "
"there is no scattering coefficients for"
" %s." % sp.symbol
)
coeffs.append(c)
dwfactors.append(self.debye_waller_factors.get(sp.symbol, 0))
fcoords.append(site.frac_coords)
occus.append(occu)
coeffs = np.array(coeffs)
fcoords = np.array(fcoords)
occus = np.array(occus)
dwfactors = np.array(dwfactors)
peaks = {}
two_thetas = []
for hkl, g_hkl, ind, _ in sorted(recip_pts, key=lambda i: (i[1], -i[0][0], -i[0][1], -i[0][2])):
# Force miller indices to be integers.
hkl = [int(round(i)) for i in hkl]
if g_hkl != 0:
d_hkl = 1 / g_hkl
# Bragg condition
theta = asin(wavelength * g_hkl / 2)
# s = sin(theta) / wavelength = 1 / 2d = |ghkl| / 2 (d =
# 1/|ghkl|)
s = g_hkl / 2
# Calculate Debye-Waller factor
dw_correction = np.exp(-dwfactors * (s**2))
# Vectorized computation of g.r for all fractional coords and
# hkl.
g_dot_r = np.dot(fcoords, np.transpose([hkl])).T[0]
# Structure factor = sum of atomic scattering factors (with
# position factor exp(2j * pi * g.r and occupancies).
# Vectorized computation.
f_hkl = np.sum(coeffs * occus * np.exp(2j * pi * g_dot_r) * dw_correction)
# Lorentz polarization correction for hkl
lorentz_factor = 1 / (sin(theta) ** 2 * cos(theta))
# Intensity for hkl is modulus square of structure factor.
i_hkl = (f_hkl * f_hkl.conjugate()).real
two_theta = degrees(2 * theta)
if is_hex:
# Use Miller-Bravais indices for hexagonal lattices.
hkl = (hkl[0], hkl[1], -hkl[0] - hkl[1], hkl[2])
# Deal with floating point precision issues.
ind = np.where(np.abs(np.subtract(two_thetas, two_theta)) < self.TWO_THETA_TOL)
if len(ind[0]) > 0:
peaks[two_thetas[ind[0][0]]][0] += i_hkl * lorentz_factor
peaks[two_thetas[ind[0][0]]][1].append(tuple(hkl))
else:
peaks[two_theta] = [i_hkl * lorentz_factor, [tuple(hkl)], d_hkl]
two_thetas.append(two_theta)
# Scale intensities so that the max intensity is 100.
max_intensity = max(v[0] for v in peaks.values())
x = []
y = []
hkls = []
d_hkls = []
for k in sorted(peaks.keys()):
v = peaks[k]
fam = get_unique_families(v[1])
if v[0] / max_intensity * 100 > self.SCALED_INTENSITY_TOL:
x.append(k)
y.append(v[0])
hkls.append([{"hkl": hkl, "multiplicity": mult} for hkl, mult in fam.items()])
d_hkls.append(v[2])
nd = DiffractionPattern(x, y, hkls, d_hkls)
if scaled:
nd.normalize(mode="max", value=100)
return nd
|
materialsproject/pymatgen
|
pymatgen/analysis/diffraction/neutron.py
|
Python
|
mit
| 7,907
|
[
"CRYSTAL",
"pymatgen"
] |
81bb81cffab3890a18756af1add32db67e82bfa8ff47c002f61175aa2834b0e3
|
#!/bin/env python
#------------------------------------------------------------------------
# Copyright (c) 1998 by Total Control Software
# All Rights Reserved
#------------------------------------------------------------------------
#
# Module Name: fcgi.py
#
# Description: Handles communication with the FastCGI module of the
# web server without using the FastCGI developers kit, but
# will also work in a non-FastCGI environment, (straight CGI.)
# This module was originally fetched from someplace on the
# Net (I don't remember where and I can't find it now...) and
# has been significantly modified to fix several bugs, be more
# readable, more robust at handling large CGI data and return
# document sizes, and also to fit the model that we had previously
# used for FastCGI.
#
# WARNING: If you don't know what you are doing, don't tinker with this
# module!
#
# Creation Date: 1/30/98 2:59:04PM
#
# License: This is free software. You may use this software for any
# purpose including modification/redistribution, so long as
# this header remains intact and that you do not claim any
# rights of ownership or authorship of this software. This
# software has been tested, but no warranty is expressed or
# implied.
#
#------------------------------------------------------------------------
import os, sys, string, socket, errno
from cStringIO import StringIO
import cgi
#---------------------------------------------------------------------------
# Set various FastCGI constants
# Maximum number of requests that can be handled
FCGI_MAX_REQS=1
FCGI_MAX_CONNS = 1
# Supported version of the FastCGI protocol
FCGI_VERSION_1 = 1
# Boolean: can this application multiplex connections?
FCGI_MPXS_CONNS=0
# Record types
FCGI_BEGIN_REQUEST = 1 ; FCGI_ABORT_REQUEST = 2 ; FCGI_END_REQUEST = 3
FCGI_PARAMS = 4 ; FCGI_STDIN = 5 ; FCGI_STDOUT = 6
FCGI_STDERR = 7 ; FCGI_DATA = 8 ; FCGI_GET_VALUES = 9
FCGI_GET_VALUES_RESULT = 10
FCGI_UNKNOWN_TYPE = 11
FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
# Types of management records
ManagementTypes = [FCGI_GET_VALUES]
FCGI_NULL_REQUEST_ID=0
# Masks for flags component of FCGI_BEGIN_REQUEST
FCGI_KEEP_CONN = 1
# Values for role component of FCGI_BEGIN_REQUEST
FCGI_RESPONDER = 1 ; FCGI_AUTHORIZER = 2 ; FCGI_FILTER = 3
# Values for protocolStatus component of FCGI_END_REQUEST
FCGI_REQUEST_COMPLETE = 0 # Request completed nicely
FCGI_CANT_MPX_CONN = 1 # This app can't multiplex
FCGI_OVERLOADED = 2 # New request rejected; too busy
FCGI_UNKNOWN_ROLE = 3 # Role value not known
error = 'fcgi.error'
#---------------------------------------------------------------------------
# The following function is used during debugging; it isn't called
# anywhere at the moment
def error(msg):
"Append a string to /tmp/err"
errf=open('/tmp/err', 'a+')
errf.write(msg+'\n')
errf.close()
#---------------------------------------------------------------------------
class record:
"Class representing FastCGI records"
def __init__(self):
self.version = FCGI_VERSION_1
self.recType = FCGI_UNKNOWN_TYPE
self.reqId = FCGI_NULL_REQUEST_ID
self.content = ""
#----------------------------------------
def readRecord(self, sock):
s = map(ord, sock.recv(8))
self.version, self.recType, paddingLength = s[0], s[1], s[6]
self.reqId, contentLength = (s[2]<<8)+s[3], (s[4]<<8)+s[5]
self.content = ""
while len(self.content) < contentLength:
data = sock.recv(contentLength - len(self.content))
self.content = self.content + data
if paddingLength != 0:
padding = sock.recv(paddingLength)
# Parse the content information
c = self.content
if self.recType == FCGI_BEGIN_REQUEST:
self.role = (ord(c[0])<<8) + ord(c[1])
self.flags = ord(c[2])
elif self.recType == FCGI_UNKNOWN_TYPE:
self.unknownType = ord(c[0])
elif self.recType == FCGI_GET_VALUES or self.recType == FCGI_PARAMS:
self.values={}
pos=0
while pos < len(c):
name, value, pos = readPair(c, pos)
self.values[name] = value
elif self.recType == FCGI_END_REQUEST:
b = map(ord, c[0:4])
self.appStatus = (b[0]<<24) + (b[1]<<16) + (b[2]<<8) + b[3]
self.protocolStatus = ord(c[4])
#----------------------------------------
def writeRecord(self, sock):
content = self.content
if self.recType == FCGI_BEGIN_REQUEST:
content = chr(self.role>>8) + chr(self.role & 255) + chr(self.flags) + 5*'\000'
elif self.recType == FCGI_UNKNOWN_TYPE:
content = chr(self.unknownType) + 7*'\000'
elif self.recType==FCGI_GET_VALUES or self.recType==FCGI_PARAMS:
content = ""
for i in self.values.keys():
content = content + writePair(i, self.values[i])
elif self.recType==FCGI_END_REQUEST:
v = self.appStatus
content = chr((v>>24)&255) + chr((v>>16)&255) + chr((v>>8)&255) + chr(v&255)
content = content + chr(self.protocolStatus) + 3*'\000'
cLen = len(content)
eLen = (cLen + 7) & (0xFFFF - 7) # align to an 8-byte boundary
padLen = eLen - cLen
hdr = [ self.version,
self.recType,
self.reqId >> 8,
self.reqId & 255,
cLen >> 8,
cLen & 255,
padLen,
0]
hdr = string.joinfields(map(chr, hdr), '')
sock.sendall(hdr + content + padLen*'\000')
#---------------------------------------------------------------------------
def readPair(s, pos):
nameLen=ord(s[pos]) ; pos=pos+1
if nameLen & 128:
b=map(ord, s[pos:pos+3]) ; pos=pos+3
nameLen=((nameLen&127)<<24) + (b[0]<<16) + (b[1]<<8) + b[2]
valueLen=ord(s[pos]) ; pos=pos+1
if valueLen & 128:
b=map(ord, s[pos:pos+3]) ; pos=pos+3
valueLen=((valueLen&127)<<24) + (b[0]<<16) + (b[1]<<8) + b[2]
return ( s[pos:pos+nameLen], s[pos+nameLen:pos+nameLen+valueLen],
pos+nameLen+valueLen )
#---------------------------------------------------------------------------
def writePair(name, value):
l=len(name)
if l<128: s=chr(l)
else:
s=chr(128|(l>>24)&255) + chr((l>>16)&255) + chr((l>>8)&255) + chr(l&255)
l=len(value)
if l<128: s=s+chr(l)
else:
s=s+chr(128|(l>>24)&255) + chr((l>>16)&255) + chr((l>>8)&255) + chr(l&255)
return s + name + value
#---------------------------------------------------------------------------
def HandleManTypes(r, conn):
if r.recType == FCGI_GET_VALUES:
r.recType = FCGI_GET_VALUES_RESULT
v={}
vars={'FCGI_MAX_CONNS' : FCGI_MAX_CONNS,
'FCGI_MAX_REQS' : FCGI_MAX_REQS,
'FCGI_MPXS_CONNS': FCGI_MPXS_CONNS}
for i in r.values.keys():
if vars.has_key(i): v[i]=vars[i]
r.values=vars
r.writeRecord(conn)
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
_isFCGI = 1 # assume it is until we find out for sure
def isFCGI():
global _isFCGI
return _isFCGI
#---------------------------------------------------------------------------
_init = None
_sock = None
class FCGI:
def __init__(self):
self.haveFinished = 0
if _init == None:
_startup()
if not isFCGI():
self.haveFinished = 1
self.inp, self.out, self.err, self.env = \
sys.stdin, sys.stdout, sys.stderr, os.environ
return
if os.environ.has_key('FCGI_WEB_SERVER_ADDRS'):
good_addrs=string.split(os.environ['FCGI_WEB_SERVER_ADDRS'], ',')
good_addrs=map(string.strip(good_addrs)) # Remove whitespace
else:
good_addrs=None
self.conn, addr=_sock.accept()
stdin, data="", ""
self.env = {}
self.requestId=0
remaining=1
# Check if the connection is from a legal address
if good_addrs!=None and addr not in good_addrs:
raise error, 'Connection from invalid server!'
while remaining:
r=record(); r.readRecord(self.conn)
if r.recType in ManagementTypes:
HandleManTypes(r, self.conn)
elif r.reqId==0:
# Oh, poopy. It's a management record of an unknown
# type. Signal the error.
r2=record()
r2.recType=FCGI_UNKNOWN_TYPE ; r2.unknownType=r.recType
r2.writeRecord(self.conn)
continue # Charge onwards
# Ignore requests that aren't active
elif r.reqId != self.requestId and r.recType != FCGI_BEGIN_REQUEST:
continue
# If we're already doing a request, ignore further BEGIN_REQUESTs
elif r.recType == FCGI_BEGIN_REQUEST and self.requestId != 0:
continue
# Begin a new request
if r.recType == FCGI_BEGIN_REQUEST:
self.requestId = r.reqId
if r.role == FCGI_AUTHORIZER: remaining=1
elif r.role == FCGI_RESPONDER: remaining=2
elif r.role == FCGI_FILTER: remaining=3
elif r.recType == FCGI_PARAMS:
if r.content == "":
remaining=remaining-1
else:
for i in r.values.keys():
self.env[i] = r.values[i]
elif r.recType == FCGI_STDIN:
if r.content == "":
remaining=remaining-1
else:
stdin=stdin+r.content
elif r.recType==FCGI_DATA:
if r.content == "":
remaining=remaining-1
else:
data=data+r.content
# end of while remaining:
self.inp = sys.stdin = StringIO(stdin)
self.err = sys.stderr = StringIO()
self.out = sys.stdout = StringIO()
self.data = StringIO(data)
def __del__(self):
self.Finish()
def Finish(self, status=0):
if not self.haveFinished:
self.haveFinished = 1
self.err.seek(0,0)
self.out.seek(0,0)
r=record()
r.recType = FCGI_STDERR
r.reqId = self.requestId
data = self.err.read()
while data:
chunk, data = self.getNextChunk(data)
r.content = chunk
r.writeRecord(self.conn)
r.content="" ; r.writeRecord(self.conn) # Terminate stream
r.recType = FCGI_STDOUT
data = self.out.read()
while data:
chunk, data = self.getNextChunk(data)
r.content = chunk
r.writeRecord(self.conn)
r.content="" ; r.writeRecord(self.conn) # Terminate stream
r=record()
r.recType=FCGI_END_REQUEST
r.reqId=self.requestId
r.appStatus=status
r.protocolStatus=FCGI_REQUEST_COMPLETE
r.writeRecord(self.conn)
self.conn.close()
def getFieldStorage(self):
method = 'GET'
if self.env.has_key('REQUEST_METHOD'):
method = string.upper(self.env['REQUEST_METHOD'])
if method == 'GET':
return cgi.FieldStorage(environ=self.env, keep_blank_values=1)
else:
return cgi.FieldStorage(fp=self.inp, environ=self.env, keep_blank_values=1)
def getNextChunk(self, data):
chunk = data[:8192]
data = data[8192:]
return chunk, data
Accept = FCGI # alias for backwards compatibility
#---------------------------------------------------------------------------
def _startup():
global _init
_init = 1
try:
s=socket.fromfd(sys.stdin.fileno(), socket.AF_INET,
socket.SOCK_STREAM)
s.getpeername()
except socket.error, (err, errmsg):
if err!=errno.ENOTCONN: # must be a non-fastCGI environment
global _isFCGI
_isFCGI = 0
return
global _sock
_sock = s
#---------------------------------------------------------------------------
def _test():
counter=0
try:
while isFCGI():
req = Accept()
counter=counter+1
try:
fs = req.getFieldStorage()
size = string.atoi(fs['size'].value)
doc = ['*' * size]
except:
doc = ['<HTML><HEAD><TITLE>FCGI TestApp</TITLE></HEAD>\n<BODY>\n']
doc.append('<H2>FCGI TestApp</H2><P>')
doc.append('<b>request count</b> = %d<br>' % counter)
doc.append('<b>pid</b> = %s<br>' % os.getpid())
if req.env.has_key('CONTENT_LENGTH'):
cl = string.atoi(req.env['CONTENT_LENGTH'])
doc.append('<br><b>POST data (%s):</b><br><pre>' % cl)
keys = fs.keys()
keys.sort()
for k in keys:
val = fs[k]
if type(val) == type([]):
doc.append(' <b>%-15s :</b> %s\n' % (k, val))
else:
doc.append(' <b>%-15s :</b> %s\n' % (k, val.value))
doc.append('</pre>')
doc.append('<P><HR><P><pre>')
keys = req.env.keys()
keys.sort()
for k in keys:
doc.append('<b>%-20s :</b> %s\n' % (k, req.env[k]))
doc.append('\n</pre><P><HR>\n')
doc.append('</BODY></HTML>\n')
doc = string.join(doc, '')
req.out.write('Content-length: %s\r\n'
'Content-type: text/html\r\n'
'Cache-Control: no-cache\r\n'
'\r\n'
% len(doc))
req.out.write(doc)
req.Finish()
except:
import traceback
f = open('traceback', 'w')
traceback.print_exc( file = f )
# f.write('%s' % doc)
if __name__=='__main__':
#import pdb
#pdb.run('_test()')
_test()
|
MorusAB/commonlib-fmg
|
pylib/fcgi.py
|
Python
|
agpl-3.0
| 14,933
|
[
"TINKER"
] |
f96be199958ed4dd3695af651fc4117f19cc5561ce190b52b14ee7815c536691
|
# coding: utf-8
# In[1]:
import iris
from iris.unit import Unit
from iris.cube import CubeList
from iris.exceptions import CoordinateNotFoundError, CoordinateMultiDimError
iris.FUTURE.netcdf_promote = True
iris.FUTURE.cell_datetime_objects = True
def time_coord(cube):
"""Return the variable attached to time axis and rename it to time."""
try:
cube.coord(axis='T').rename('time')
except CoordinateNotFoundError:
pass
timevar = cube.coord('time')
return timevar
def z_coord(cube):
"""Heuristic way to return the
dimensionless vertical coordinate."""
try:
z = cube.coord(axis='Z')
except CoordinateNotFoundError:
z = cube.coords(axis='Z')
for coord in cube.coords(axis='Z'):
if coord.ndim == 1:
z = coord
return z
def time_near(cube, datetime):
"""Return the nearest index to a `datetime`."""
timevar = time_coord(cube)
try:
time = timevar.units.date2num(datetime)
idx = timevar.nearest_neighbour_index(time)
except IndexError:
idx = -1
return idx
def time_slice(cube, start, stop=None):
"""TODO: Re-write to use `iris.FUTURE.cell_datetime_objects`."""
istart = time_near(cube, start)
if stop:
istop = time_near(cube, stop)
if istart == istop:
raise ValueError('istart must be different from istop!'
'Got istart {!r} and '
' istop {!r}'.format(istart, istop))
return cube[istart:istop, ...]
else:
return cube[istart, ...]
def bbox_extract_2Dcoords(cube, bbox):
"""Extract a sub-set of a cube inside a lon, lat bounding box
bbox=[lon_min lon_max lat_min lat_max].
NOTE: This is a work around too subset an iris cube that has
2D lon, lat coords."""
lons = cube.coord('longitude').points
lats = cube.coord('latitude').points
def minmax(v):
return np.min(v), np.max(v)
inregion = np.logical_and(np.logical_and(lons > bbox[0],
lons < bbox[2]),
np.logical_and(lats > bbox[1],
lats < bbox[3]))
region_inds = np.where(inregion)
imin, imax = minmax(region_inds[0])
jmin, jmax = minmax(region_inds[1])
return cube[..., imin:imax+1, jmin:jmax+1]
def intersection(cube, bbox):
"""Sub sets cube with 1D or 2D lon, lat coords.
Using `intersection` instead of `extract` we deal with 0-360
longitudes automagically."""
try:
method = "Using iris `cube.intersection`"
cube = cube.intersection(longitude=(bbox[0], bbox[2]),
latitude=(bbox[1], bbox[3]))
except CoordinateMultiDimError:
method = "Using iris `bbox_extract_2Dcoords`"
cube = bbox_extract_2Dcoords(cube, bbox)
print(method)
return cube
def get_cube(url, name_list=None, bbox=None, time=None, units=None):
cubes = iris.load_raw(url)
if name_list:
in_list = lambda cube: cube.standard_name in name_list
cubes = CubeList([cube for cube in cubes if in_list(cube)])
if not cubes:
raise ValueError('Cube does not contain {!r}'.format(name_list))
else:
cube = cubes.merge_cube()
if bbox:
cube = intersection(cube, bbox)
if time:
if isinstance(time, datetime):
start, stop = time, None
elif isinstance(time, tuple):
start, stop = time[0], time[1]
else:
raise ValueError('Time must be start or (start, stop).'
' Got {!r}'.format(time))
cube = time_slice(cube, start, stop)
if units:
if not cube.units == units:
cube.convert_units(units)
return cube
# In[2]:
import time
import contextlib
@contextlib.contextmanager
def timeit(log=None):
t = time.time()
yield
elapsed = time.strftime("%H:%M:%S", time.gmtime(time.time()-t))
if log:
log.info(elapsed)
else:
print(elapsed)
# In[3]:
get_ipython().magic('matplotlib inline')
import numpy as np
import numpy.ma as ma
import iris.quickplot as qplt
import matplotlib.pyplot as plt
def plot_surface(cube, model=''):
z = z_coord(cube)
positive = z.attributes.get('positive', None)
if positive == 'up':
idx = np.argmax(z.points)
else:
idx = np.argmin(z.points)
c = cube[idx, ...]
c.data = ma.masked_invalid(c.data)
t = time_coord(cube)
t = t.units.num2date(t.points)[0]
qplt.pcolormesh(c)
plt.title('{}: {}\nVariable: {} level: {}'.format(model, t, c.name(), idx))
# In[4]:
print(iris.__version__)
print(iris.__file__)
# In[5]:
from datetime import datetime, timedelta
start = datetime.utcnow() - timedelta(days=7)
stop = datetime.utcnow()
name_list = ['sea_water_potential_temperature', 'sea_water_temperature']
bbox = [-76.4751, 38.3890, -71.7432, 42.9397]
units = Unit('Kelvin')
# In[6]:
model = 'MARACOOS/ESPRESSO'
url = 'http://tds.marine.rutgers.edu/thredds/dodsC/roms/espresso/2009_da/his'
with timeit():
cube = get_cube(url, name_list=name_list, bbox=bbox,
time=start, units=units)
plot_surface(cube, model)
# In[7]:
model = 'USGS/COAWST'
url = 'http://geoport.whoi.edu/thredds/dodsC/coawst_4/use/fmrc/'
url += 'coawst_4_use_best.ncd'
with timeit():
cube = get_cube(url, name_list=name_list, bbox=bbox,
time=start, units=units)
plot_surface(cube, model)
# In[8]:
model = 'HYCOM'
url = 'http://ecowatch.ncddc.noaa.gov/thredds/dodsC/hycom/hycom_reg1_agg/'
url += 'HYCOM_Region_1_Aggregation_best.ncd'
with timeit():
cube = get_cube(url, name_list=name_list, bbox=bbox,
time=start, units=units)
plot_surface(cube, model)
# In[9]:
model = 'NYHOP'
url = 'http://colossus.dl.stevens-tech.edu/thredds/dodsC/fmrc/NYBight/'
url += 'NYHOPS_Forecast_Collection_for_the_New_York_Bight_best.ncd'
with timeit():
cube = get_cube(url, name_list=name_list, bbox=bbox,
time=start, units=units)
plot_surface(cube, model)
# In[10]:
model = 'RUTGERS/NWA'
url = 'http://oceanus.esm.rutgers.edu:8090/thredds/dodsC/ROMS/NWA/Run03/Output'
with timeit():
cube = get_cube(url, name_list=name_list, bbox=bbox,
time=start, units=units)
plot_surface(cube, model)
|
rsignell-usgs/notebook
|
iris_snippets.py
|
Python
|
mit
| 6,452
|
[
"ESPResSo"
] |
a72251377092f5b2e477711bd06bade74479d0222391a35d421473b8e3acf908
|
'''
Created on 19/02/2010
@author: jose
'''
# Copyright 2009 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of franklin.
# franklin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# franklin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with franklin. If not, see <http://www.gnu.org/licenses/>.
from Bio import SeqIO
from franklin.utils.cmd_utils import create_runner
from franklin.seq.alignment_result import (filter_alignments,
get_alignment_parser)
from franklin.seq.seq_analysis import (infer_introns_for_cdna,
similar_sequences_for_blast)
from franklin.seq.readers import guess_seq_file_format
from franklin.snv.snv_annotation import (calculate_maf_frequency,
snvs_in_window, calculate_snv_kind,
calculate_cap_enzymes,
variable_in_groupping,
invariant_in_groupping,
_get_group, SNP,
SNV_TYPES)
from franklin.seq.seqs import get_seq_name
# In a filter TRUE result means that a snv does NOT pass the filter.
# So it writes it to the vcf
FILTER_DESCRIPTIONS = {
'uniq_contiguous':
{'id': 'UCR',
'description':'Region is not unique or non contiguous'},
'close_to_intron':
{'id': 'I%2d',
'description':'An intron is located closer than %2d base pairs'},
'high_variable_reg':
{'id': 'HVR%d',
'description':'The region has more than %d snvs per 100 bases'},
'close_to_snv':
{'id':'cs%s%2d%s',
'description':'The snv is closer than %d nucleotides to another %s%s'},
'close_to_limit':
{'id':'cl%2d',
'description':'The snv is closer than %d nucleotides the reference edge'},
'maf':
{'id':'maf%d',
'description':
'The most frequent allele in %s: %s. frequency greater than %.2f'},
'by_kind':
{'id':'vk%s',
'description':'It is not an %s'},
'cap_enzymes':
{'id':'ce%s',
'description':'SNV is not a CAP detectable by the enzymes: %s'},
'is_variable':
{'id':'v%s%i',
'description':'It is not variable, or no data, in the %s : %s. All together: %s'},
'is_not_variable':
{'id':'nv%s%i',
'description':'It is variable, or no data, in the %s : %s. All together: %s'},
'ref_not_in_list':
{'id':'rnl',
'description':'Filters by given list of seq names'},
'min_groups':
{'id':'m%s%i',
'description':'SNV read in less than %i %s'},
'in_segment_bed':
{'id':'is%i',
'description':'The snv is outside the given segments reduced in %i bases from each edge'},
}
class SnvNamer(object):
'It gives names to the snvs'
def __init__(self):
'It inits the objects'
self._filter_counts = {}
def get_filter_description(self, filter_name, parameters,
filter_descriptions):
'It returns the short id and the description'
if (filter_name, parameters) in filter_descriptions:
return filter_descriptions[filter_name, parameters]
id_ = FILTER_DESCRIPTIONS[filter_name]['id']
desc = FILTER_DESCRIPTIONS[filter_name]['description']
if filter_name == 'by_kind':
short_name, description = self._get_nd_kind(id_, desc, parameters)
elif filter_name == 'cap_enzymes':
short_name, description = self._get_nd_ce(id_, desc, parameters)
elif filter_name == 'is_variable':
short_name, description = self._get_nd_iv(id_, desc, parameters)
elif filter_name == 'is_not_variable':
short_name, description = self._get_nd_iv(id_, desc, parameters)
elif filter_name == 'high_variable_reg':
short_name, description = self._get_nd_hvr(id_, desc, parameters)
elif filter_name == 'min_groups':
short_name, description = self._get_min_groups_desc(id_, desc,
parameters)
elif filter_name == 'maf':
short_name, description = self._get_nd_maf(id_, desc, parameters)
elif filter_name == 'close_to_snv':
short_name, description = self._get_nd_cs(id_, desc, parameters)
elif filter_name == 'in_segment_bed':
short_name, description = self._get_nd_is(id_, desc, parameters)
else:
if '%' in id_:
short_name = id_ % parameters
else:
short_name = id_
if '%' in desc:
description = desc % parameters
else:
description = desc
filter_descriptions[filter_name, parameters] = short_name, description
return short_name, description
@staticmethod
def _get_nd_cs(id_, desc, parameters):
'It returns the name an id of the close to snv filter'
limit = parameters[0]
snv_type = parameters[1]
maf = parameters[2] if len(parameters)> 2 else None
if snv_type is not None:
snv_type = SNV_TYPES[snv_type]
if snv_type is None:
fist_letter = ''
snv_type_name = 'all'
else:
fist_letter = snv_type[0]
snv_type_name = snv_type
if maf:
maf_name_str = '_%2f' % maf
maf_desc_str = ', with maf:%2f' % maf
else:
maf_name_str = ''
maf_desc_str = ''
short_name = id_ % (fist_letter, limit, maf_name_str)
description = desc % (limit, snv_type_name, maf_desc_str)
return short_name, description
def _get_nd_maf(self, id_, desc, parameters):
'It returns the name an id of the maf filter'
filter_counts = self._filter_counts
if desc not in filter_counts:
filter_counts[desc] = 0
filter_counts[desc] += 1
#print parameters
short_name = id_ % filter_counts[desc]
if isinstance(parameters, tuple) and len(parameters) > 1:
groups = ','.join(parameters[1])
kind = parameters[2]
description = desc % (kind, groups, parameters[0])
else:
if isinstance(parameters, tuple):
param = parameters[0]
else:
param = parameters
description = desc % ('All', 'All', param)
return short_name, description
@staticmethod
def _get_min_groups_desc(id_, desc, parameters):
'It returns the name and id of the snv filter for min_groups'
group_letter = parameters[1][0]
min_group_num = parameters[0]
short_name = id_ % (group_letter, min_group_num)
description = desc % (min_group_num, parameters[1])
return short_name, description
@staticmethod
def _get_nd_hvr(id_, desc, parameters):
'It returns the name and id of the snv filter for by is_variable filter'
number = int(parameters[0] * 100)
short_name = id_ % number
description = desc % number
return short_name, description
def _get_nd_iv(self, id_, desc, parameters):
'It returns the name and id of the snv filter for by is_variable filter'
filter_counts = self._filter_counts
if desc not in filter_counts:
filter_counts[desc] = 0
filter_counts[desc] += 1
groups = {'libraries':'lb', 'read_groups':'rg', 'samples':'sm'}
short_name = id_ % (groups[parameters[0]], filter_counts[desc])
groups = ','.join(parameters[1])
description = desc % (parameters[0], groups, parameters[2])
if len(parameters)>=7 and parameters[5] and parameters[6]:
description1 = ('. maf:%f' % parameters[5] + '. '
'min_num_reads:%i' % parameters[6])
description += description1
elif len(parameters)==6 and parameters[4] and parameters[5]:
description1 = ('. maf:%f' % parameters[4] + '. '
'min_num_reads:%i' % parameters[5])
description += description1
return short_name, description
@staticmethod
def _get_nd_kind(id_, desc, parameters):
'It returns the name and id of the snv filter for by kind filter'
vkinds = {0:'snp', 1:'insertion', 2:'deletion', 3:'invariant',
4:'indel', 5:'complex'}
kind = vkinds[parameters]
short_name = id_ % kind[0]
description = desc % kind
return short_name, description
@staticmethod
def _get_nd_ce(id_, desc, parameters):
'It returns the name and id of the snv filter for cap_enzyme filter'
if parameters:
enzymes = 'all'
booltag = 't'
else:
enzymes = 'cheap ones'
booltag = 'f'
short_name = id_ % booltag
description = desc % enzymes
return short_name, description
@staticmethod
def _get_nd_is(id_, desc, parameters):
'It returns the name and id of the snv filter for in_segment filter'
if parameters:
edge_avoidance = parameters
else:
edge_avoidance = 0
short_name = id_ % edge_avoidance
description = desc % edge_avoidance
return short_name, description
def _add_filter_result(snv, filter_name, result, threshold=None):
'It adds the filter to the SeqFeature qualifiers'
qualifiers = snv.qualifiers
if 'filters' not in qualifiers:
qualifiers['filters'] = {}
if filter_name not in qualifiers['filters']:
qualifiers['filters'][filter_name] = {}
qualifiers['filters'][filter_name][threshold] = result
def _get_filter_result(snv, filter_name, threshold=None):
'It gets the result of a filter. Returns None if the filter is not done'
qualifiers = snv.qualifiers
if 'filters' not in qualifiers:
return None
try:
result = qualifiers['filters'][filter_name][threshold]
return result
except KeyError:
return None
def create_reference_in_list_filter(seq_list):
'''It filters sequences looking in a list. If the sequence is in a list it
passes the filter'''
def reference_in_list_filter(sequence):
"The filter"
if sequence is None:
return None
name = get_seq_name(sequence)
if name in seq_list:
result = True
else:
result = False
for snv in sequence.get_features(kind='snv'):
previous_result = _get_filter_result(snv, 'ref_not_in_list')
if previous_result is not None:
continue
_add_filter_result(snv, 'ref_not_in_list', result)
return sequence
return reference_in_list_filter
def create_unique_contiguous_region_filter(distance, genomic_db,
genomic_seqs_fpath):
'''It returns a filter that removes snv in a region that give more than one
match or more than one match_parts'''
parameters = {'database': genomic_db}
blast_runner = create_runner(tool='blastn', parameters=parameters)
blast_parser = get_alignment_parser('blast')
match_filters = [{'kind' : 'score_threshold',
'score_key': 'similarity',
'min_score': 90,
},
{'kind' : 'min_length',
'min_num_residues': 20,
'length_in_query' : True
}
]
if not genomic_seqs_fpath:
msg = 'No genomic sequence file defined for unique SNV filter'
raise ValueError(msg)
if not genomic_db:
msg = 'No genomic blast database defined for unique SNV filter'
raise ValueError(msg)
genomic_seqs_fhand = open(genomic_seqs_fpath)
genomic_seqs_index = SeqIO.index(genomic_seqs_fhand.name,
guess_seq_file_format(genomic_seqs_fhand))
def unique_contiguous_region_filter(sequence):
'''It filters out the snv in regions repeated in the genome or
discontiguous'''
if sequence is None:
return None
for snv in sequence.get_features(kind='snv'):
# Check if it is already done
previous_result = _get_filter_result(snv, 'uniq_contiguous',
threshold=distance)
if previous_result is not None:
continue
#we make a blast
#with the sequence around the snv
location = snv.location.start.position
start = location - distance
end = location + distance
if start < 0:
start = 0
#print start, end
seq_fragment = sequence[start:end]
blast_fhand = blast_runner(seq_fragment)['blastn']
#now we parse the blast
blast_result = blast_parser(blast_fhand)
alignments = filter_alignments(blast_result, config=match_filters)
#are there any similar sequences?
try:
alignment = alignments.next()
result = True
except StopIteration:
#if there is no similar sequence we assume that is unique
result = False
if result:
#how many matches, it should be only one
num_hits = len(alignment['matches'])
if num_hits > 1:
result = True
else:
#how many match parts have the first match?
#we could do it with the blast result, but blast is not very
#good aligning, so we realign with est2genome
blast_fhand.seek(0)
sim_seqs = similar_sequences_for_blast(blast_fhand)
sim_seq = sim_seqs[0] if sim_seqs else None
introns = infer_introns_for_cdna(sequence=seq_fragment,
genomic_seqs_index=genomic_seqs_index,
similar_sequence=sim_seq,
genomic_db=genomic_db)
if introns:
result = True
else:
result = False
blast_fhand.close()
_add_filter_result(snv, 'uniq_contiguous', result, distance)
return sequence
return unique_contiguous_region_filter
def create_close_to_intron_filter(distance):
'''It returns a filter that filters snv by the proximity to introns.
The introns should have been annotated before.'''
def close_to_intron_filter(sequence):
'The filter'
if sequence is None:
return None
for snv in sequence.get_features(kind='snv'):
previous_result = _get_filter_result(snv, 'close_to_intron',
threshold=distance)
if previous_result is not None:
continue
location = snv.location.start.position
result = False
for intron in sequence.get_features(kind='intron'):
if abs(location - intron.location.start.position) < distance:
result = True
_add_filter_result(snv, 'close_to_intron', result,
threshold=distance)
return sequence
return close_to_intron_filter
def create_high_variable_region_filter(max_variability, window=0):
'It returns a filter that filters snvs by region variability.'
if window == 0:
window = None
def high_variable_region_filter(sequence):
'The filter'
if sequence is None:
return None
snvs = list(sequence.get_features(kind='snv'))
for snv in snvs:
threshold = (max_variability, window)
previous_result = _get_filter_result(snv, 'high_variable_reg',
threshold=threshold)
if previous_result is not None:
continue
if window is None:
snv_num = len(snvs)
total_length = len(sequence)
else:
total_length = window
snv_num = snvs_in_window(snv, snvs, window)
variability = snv_num / float(total_length)
if variability > max_variability:
result = True
else:
result = False
_add_filter_result(snv, 'high_variable_reg', result,
threshold=threshold)
return sequence
return high_variable_region_filter
def create_close_to_snv_filter(distance, snv_type=None, maf=None):
'''It returns a filter that filters snv by the distance to other snvs.
If the snv has another snv closer than DISTANCE, then this snv is
filtered out'''
def close_to_snv_filter(sequence):
'The filter'
if sequence is None:
return None
snvs = list(sequence.get_features(kind='snv'))
for snv in snvs:
previous_result = _get_filter_result(snv, 'close_to_snv',
threshold=(distance, snv_type,
maf))
if previous_result is not None:
continue
num_snvs = snvs_in_window(snv, snvs, distance * 2, snv_type, maf)
if num_snvs >= 1:
result = True
else:
result = False
_add_filter_result(snv, 'close_to_snv', result,
threshold=(distance, snv_type, maf))
return sequence
return close_to_snv_filter
def create_snv_close_to_limit_filter(distance):
'''This function is a function factory. This function is a filter than
return true if those snvs are or not closer to the limit than max_distance
'''
def snv_close_to_reference_limit(sequence):
'''True if the snv variation is close to the contig limits.
It checks if the snv is not within the range covered by the
consensus and if it's close to one of its limits. In both cases it will
return True.
'''
if sequence is None:
return None
snvs = list(sequence.get_features(kind='snv'))
for snv in snvs:
previous_result = _get_filter_result(snv, 'close_to_limit',
threshold=distance)
if previous_result is not None:
continue
location = int(snv.location.start.position)
if location < distance or location + distance > len(sequence):
result = True
else:
result = False
_add_filter_result(snv, 'close_to_limit', result,
threshold=distance)
return sequence
return snv_close_to_reference_limit
def create_major_allele_freq_filter(frequency, groups=None, group_kind=None):
'It filters the snv in a seq by the frequency of the more frequent allele'
if groups is None:
parameters = (frequency,)
else:
parameters = (frequency, tuple(groups), group_kind)
def major_allele_freq_filter(sequence):
'The filter'
if sequence is None:
return None
for snv in sequence.get_features(kind='snv'):
previous_result = _get_filter_result(snv, 'maf',
threshold=parameters)
if previous_result is not None:
continue
maf = calculate_maf_frequency(snv, groups=groups,
group_kind=group_kind)
if maf > frequency or maf is None:
result = True
else:
result = False
_add_filter_result(snv, 'maf', result, threshold=parameters)
return sequence
return major_allele_freq_filter
def create_kind_filter(kind):
'It filters out the snvs with a different kind'
def kind_filter(sequence):
'The filter'
if sequence is None:
return None
for snv in sequence.get_features(kind='snv'):
previous_result = _get_filter_result(snv, 'by_kind', threshold=kind)
if previous_result is not None:
continue
kind_ = calculate_snv_kind(snv)
if kind != kind_:
result = True
else:
result = False
_add_filter_result(snv, 'by_kind', result, threshold=kind)
return sequence
return kind_filter
def create_cap_enzyme_filter(all_enzymes):
'It filters the snv looking if it is detectable by restriction enzymes'
def cap_enzyme_filter(sequence):
'The filter'
if sequence is None:
return None
for snv in sequence.get_features(kind='snv'):
previous_result = _get_filter_result(snv, 'cap_enzymes',
threshold=all_enzymes)
if previous_result is not None:
continue
enzymes = calculate_cap_enzymes(snv, sequence,
all_enzymes=all_enzymes)
if len(enzymes) != 0:
result = False
else:
result = True
_add_filter_result(snv, 'cap_enzymes', result,
threshold=all_enzymes)
return sequence
return cap_enzyme_filter
def create_not_variable_in_group_filter(group_kind, groups, in_union=True,
reference_free=True, maf=None,
min_num_reads=None):
'''It filters looking if the list of reads is variable in the given
conditions.'''
if isinstance(groups, basestring):
groups = (groups,)
else:
groups = tuple(groups)
parameters = (group_kind, groups, in_union, reference_free, maf,
min_num_reads)
def is_not_variable_filter(sequence):
'The filter'
if sequence is None:
return None
for snv in sequence.get_features(kind='snv'):
previous_result = _get_filter_result(snv, 'is_not_variable',
threshold=parameters)
if previous_result is not None:
continue
result = invariant_in_groupping(snv, group_kind, groups,
in_union, in_all_groups=False,
reference_free=reference_free,
maf=maf,
min_num_reads=min_num_reads)
_add_filter_result(snv, 'is_not_variable', not result,
threshold=parameters)
return sequence
return is_not_variable_filter
def create_is_variable_filter(group_kind, groups, in_union=True,
in_all_groups=True, reference_free=True,
maf=None, min_num_reads=None,
min_reads_per_allele = None):
'''It filters looking if the list of reads is variable in the given
conditions.'''
if isinstance(groups, basestring):
groups = (groups,)
else:
groups = tuple(groups)
parameters = (group_kind, groups, in_union,in_all_groups, reference_free,
maf, min_num_reads, min_reads_per_allele)
def is_variable_filter(sequence):
'The filter'
if sequence is None:
return None
for snv in sequence.get_features(kind='snv'):
previous_result = _get_filter_result(snv, 'is_variable',
threshold=parameters)
if previous_result is not None:
continue
result = variable_in_groupping(snv, group_kind, groups,
in_union=in_union,
in_all_groups=in_all_groups,
reference_free=reference_free,
maf=maf,
min_num_reads=min_num_reads,
min_reads_per_allele=min_reads_per_allele)
_add_filter_result(snv, 'is_variable', not result,
threshold=parameters)
return sequence
return is_variable_filter
def create_min_groups_filter(min_groups, group_kind='read_groups'):
'It filters snvs read in less groups (samples) than the min number given'
parameters = (min_groups, group_kind)
def min_groups_filter(sequence):
'The filter'
if sequence is None:
return None
for snv in sequence.get_features(kind='snv'):
previous_result = _get_filter_result(snv, 'min_groups',
threshold=parameters)
if previous_result is not None:
continue
#how many groups are in the alleles?
groups = set()
rg_info = snv.qualifiers['read_groups']
for allele in snv.qualifiers['alleles'].values():
read_groups = allele['read_groups'].keys()
al_groups = [_get_group(rg, group_kind, rg_info) for rg in read_groups]
groups = groups.union(set(al_groups))
result = False if len(groups) >= min_groups else True
_add_filter_result(snv, 'min_groups', result, threshold=parameters)
return sequence
return min_groups_filter
def _inside_segment_filter(sequence, segments, edge_avoidance, filter_name=None):
'It filters and annotates inside the snv the result'
filter_name = 'in_segment_bed' if filter_name is None else filter_name
index = 0
for snv in sequence.get_sorted_features(kind='snv'):
previous_result = _get_filter_result(snv, filter_name,
threshold=edge_avoidance)
if previous_result is not None:
continue
result = None if segments else True
snv_start = snv.location.start.position
snv_end = snv.location.end.position
while result is None:
if snv_end > segments[-1][1] - edge_avoidance:
result = True
elif snv_end < segments[index][0] + edge_avoidance:
result = True
elif ((snv_end <= segments[index][1] - edge_avoidance) and
(snv_start >= segments[index][0])):
result = False
elif ((snv_end <= segments[index][1] - edge_avoidance) and not
(snv_start >= segments[index][0])):
result = True
else:
index += 1
_add_filter_result(snv, filter_name, result, threshold=edge_avoidance)
return sequence
def _get_segments_from_bed(fpath):
'It parses the bed file and converts it in segments'
segments = {}
for line in open(fpath):
line = line.strip()
if not line:
continue
name, start, end = line.split('\t')
if name not in segments:
segments[name] = []
segments[name].append((int(start), int(end)))
return segments
def create_in_segment_bed_filter(bed_fpath, edge_avoidance=None):
'''It checks if the snv is inside (False) or outside (True) of the segment.
It calculates the segments using the bed file
'''
segments = _get_segments_from_bed(bed_fpath)
return create_in_segment_filter(segments, edge_avoidance, 'in_segment_bed')
def create_in_segment_filter(segments, edge_avoidance=None, filter_name=None):
'It checks if the snv is inside (False) or outside (True) of the segment'
edge_avoidance = 0 if edge_avoidance is None else edge_avoidance
def in_segment_filter(sequence):
'The filter'
if sequence is None:
return None
seq_name = sequence.name
if seq_name not in segments:
seq_segments = []
else:
seq_segments = segments[sequence.name]
_inside_segment_filter(sequence, seq_segments, edge_avoidance,
filter_name)
return _inside_segment_filter
return in_segment_filter
|
JoseBlanca/franklin
|
franklin/snv/snv_filters.py
|
Python
|
agpl-3.0
| 29,261
|
[
"BLAST"
] |
ba0a81f197b7b37be82d4c0517ac05f3d0ae21492412ecc996167831123066ab
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os, sys
import subprocess
import json
import unittest
from FactorySystem import Parser
import pyhit
import mooseutils
def run_app(args=[]):
"""
Run the app and return the output.
Exits if the app failed to run for any reason.
"""
proc = None
app_name = mooseutils.find_moose_executable_recursive()
args.insert(0, app_name)
# "-options_left 0" is used to stop the debug version of PETSc from printing
# out WARNING messages that sometime confuse the json parser
args.insert(1, "-options_left")
args.insert(2, "0")
cmd_line = ' '.join(args)
try:
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except OSError as e:
print("Problem running '%s'\nError: %s" % (cmd_line, e))
sys.exit(1)
data = proc.communicate()
stdout_data = data[0].decode("utf-8")
if proc.returncode != 0:
print("Failed with exit code %s" % proc.returncode)
sys.exit(proc.returncode)
return stdout_data
class TestJSONBase(unittest.TestCase):
"""
Make sure the Json dump produces valid Json
and has the expected structure
"""
def getJsonData(self, extra=[]):
args = ["--json"] + extra
output = run_app(args)
self.assertIn("**START JSON DATA**\n", output)
self.assertIn("**END JSON DATA**\n", output)
start_json_string = '**START JSON DATA**\n'
start_pos = output.find('**START JSON DATA**\n')
self.assertGreater(start_pos, -1)
end_pos = output.find('**END JSON DATA**')
self.assertGreater(end_pos, -1)
output = output[start_pos + len(start_json_string):end_pos]
data = json.loads(output)
return data
def check_basic_json(self, data):
self.assertIn("Executioner", data)
self.assertIn("BCs", data)
bcs = data["BCs"]
periodic = bcs["subblocks"]["Periodic"]
self.assertEqual(periodic["star"]["subblock_types"], None)
self.assertIn("DirichletBC", bcs["star"]["subblock_types"])
self.assertNotIn("types", bcs)
exe = data["Executioner"]
self.assertIn("types", exe)
self.assertIn("Transient", exe["types"])
self.assertNotIn("subblock_types", exe)
params = exe["actions"]["CreateExecutionerAction"]["parameters"]
self.assertEqual(params["active"]["cpp_type"], "std::vector<std::string>")
self.assertEqual(params["active"]["basic_type"], "Array:String")
self.assertEqual(params["type"]["cpp_type"], "std::string")
self.assertEqual(params["type"]["basic_type"], "String")
# Preconditioning has a Preconditioning/*/* syntax which is unusual
self.assertIn("Preconditioning", data)
p = data["Preconditioning"]
split = p["star"]["star"]["subblock_types"]["Split"]
self.assertIn("petsc_options", split["parameters"])
self.assertIn("splitting_type", split["parameters"])
f = data["Functions"]["star"]
self.assertIn("associated_types", f)
self.assertEquals(["FunctionName"], f["associated_types"])
self.assertEqual(f["subblock_types"]["ParsedFunction"]["class"], "MooseParsedFunction")
self.assertEqual(f["subblock_types"]["ParsedFunction"]["label"], "MooseApp")
a = data["Adaptivity"]
i = a["subblocks"]["Indicators"]["star"]["subblock_types"]["AnalyticalIndicator"]
self.assertIn("all", i["parameters"]["outputs"]["reserved_values"])
self.assertIn("none", i["parameters"]["outputs"]["reserved_values"])
def getBlockSections(self, node):
return {c.path(): c for c in node.children(node_type=hit.NodeType.Section)}
def getBlockParams(self, node):
return {c.path(): c for c in node.children(node_type=hit.NodeType.Field)}
def getInputFileFormat(self, extra=[]):
"""
Does a dump and uses the GetPotParser to parse the output.
"""
args = ["--disable-refcount-printing", "--dump"] + extra
output = run_app(args)
self.assertIn("### START DUMP DATA ###\n", output)
self.assertIn("### END DUMP DATA ###\n", output)
output = output.split('### START DUMP DATA ###\n')[1]
output = output.split('### END DUMP DATA ###')[0]
self.assertNotEqual(len(output), 0)
root = pyhit.parse(output)
errors = list(Parser.checkDuplicates(root))
self.assertEqual(errors, [])
return root
class TestFull(TestJSONBase):
def testFullJson(self):
"""
Some basic checks to see if some data
is there and is in the right location.
"""
all_data = self.getJsonData()
self.assertIn("active", all_data["global"]["parameters"])
data = all_data["blocks"]
self.check_basic_json(data)
# Make sure the default dump has test objects
self.assertIn("ApplyInputParametersTest", data)
self.assertEqual(data["Functions"]["star"]["subblock_types"]["PostprocessorFunction"]["label"], "MooseTestApp")
class TestNoTestObjects(TestJSONBase):
def testNoTestObjects(self):
# Make sure test objects are removed from the output
all_data = self.getJsonData(["--disallow-test-objects"])
self.assertIn("active", all_data["global"]["parameters"])
data = all_data["blocks"]
self.check_basic_json(data)
self.assertNotIn("ApplyInputParametersTest", data)
class TestSearch(TestJSONBase):
def testJsonSearch(self):
"""
Make sure parameter search works
"""
all_data = self.getJsonData(["initial_marker"])
self.assertNotIn("global", all_data)
data = all_data["blocks"]
self.assertNotIn("Executioner", data)
self.assertNotIn("BCs", data)
self.assertIn("Adaptivity", data)
self.assertEqual(len(data.keys()), 1)
params = data["Adaptivity"]["actions"]["SetAdaptivityOptionsAction"]["parameters"]
self.assertIn("initial_marker", params)
self.assertEqual(len(params.keys()), 1)
# test to make sure it matches blocks as well
all_data = self.getJsonData(["diffusion"])
data = all_data["blocks"]
self.assertEqual(len(data.keys()), 2)
self.assertIn("BadKernels", data)
self.assertIn("Kernels", data)
diff = data["Kernels"]["star"]["subblock_types"]["Diffusion"]
self.assertIn("use_displaced_mesh", diff["parameters"])
class TestLineInfo(TestJSONBase):
def testLineInfo(self):
"""
Make sure file/line information works
"""
all_data = self.getJsonData()
data = all_data["blocks"]
adapt = data["Adaptivity"]["actions"]["SetAdaptivityOptionsAction"]
fi = adapt["file_info"]
self.assertEqual(len(fi.keys()), 1)
fname = list(fi)[0]
# Clang seems to have the full path name for __FILE__
# gcc seems to just use the path that is given on the command line, which won't include "framework"
self.assertTrue(fname.endswith(os.path.join("src", "base", "Moose.C")), 'file "{}" found instead'.format(fname))
self.assertGreater(fi[fname], 0)
fi = adapt["tasks"]["set_adaptivity_options"]["file_info"]
self.assertEqual(len(fi.keys()), 1)
fname = list(fi)[0]
self.assertTrue(fname.endswith(os.path.join("src", "actions", "SetAdaptivityOptionsAction.C")))
self.assertGreater(fi[fname], 0)
class TestNoTemplate(unittest.TestCase):
def test(self):
output = run_app(['--json'])
self.assertNotIn('<RESIDUAL>', output)
self.assertNotIn('<JACOBIAN>', output)
if __name__ == '__main__':
unittest.main(__name__, verbosity=2)
|
harterj/moose
|
test/tests/outputs/format/test_json.py
|
Python
|
lgpl-2.1
| 8,093
|
[
"MOOSE"
] |
b48882686fe68e8543cd8fca8d06253353052c63e5f50494f85b519630f014e4
|
from yambopy import *
from qepy import *
from schedulerpy import *
import os
import subprocess
from glob import glob
from copy import deepcopy
import matplotlib.pyplot as plt
class YamboDG_Optimize():
"""
Class to generate and run convergence tests for the yambo double grid.
** This class is useful but complex, read the description well AND/OR check its tutorial! **
- Needs a quantum espresso scf save folder
- Needs nscf (qe) and yambo [desired calculation type] inputs PASSED AS PwIn AND YamboIn OBJECTS
-- If converging the double grids, an independent-particles (ip) yambo input is required
- Needs lists of coarse grids (CG) and fine grids (FG); [NOTE] Only random FG presently implemented.
- Additional arguments: directory paths, file names, experimental laser energy [for absorption], etc.
- The workflow is divided into FOUR STEPS that can be executed separately:
1. nscf CG [STEPS='1']
2. nscf FG and yambo CG [STEPS='2']
3. yambo FG [STEPS='3']
4. if 'converge_DG' is on (therefore with yambo--> ip):
-- TODO: Analyis, report, plot results and give ip-converged value [STEPS='4']
-- TODO: Move the double grid generation functions to a different submodule
- Scheme of the workflow:
-- If job submissions are used, the workflow is better submitted in subsequent steps
-- If planning a parallel traversal (each independent branch simultaneously) of this tree
with job submissions, see function branch_wise_flow
NSCF YAMBO
| |
step 1 CG_1 CG_2 ... CG_N |
| | |
step 2 FG_11 ... FG_M1 FG_12 ... FG_M2 ... CG_1 CG_2 ... CG_N
| |
step 3 FG_11 ... FG_M1 FG_12 ... FG_M2 ...
\ | | /
\ \ | /
step 4 _________ PLOTS ______
Some optional variables
- E_laser: external laser energy (for RT checks)
- STEPS: which workflow steps to execute
- RUN: if False, no job is submitted
- converge_DG: if True, enables automatic double grid convergence; requires IP yambo input.
- nscf_out, y_out_dir: pw and yambo output directories
- qe_scheduler,y_scheduler: SchedulerPy object for cluster submission (default: bash)
- wait_all: if cluster submission is on, forces the master python process to wait for all sumbitted jobs to complete before exiting
- yambo_calc_type: name the yambo calculations
- yambo_exec: either yambo, yambo_ph or yambo_rt
- save_type: simple, elph, expanded_elph, fixsymm, fixsymm+elph
-- NOTE: if *elph is used: prepare a symlink to elph_dir in RUN_path
- noyambo: if RUN is True, run only QE steps plus SAVEs, but not actual yambo calculations
Example of use (frontend):
.. code-block:: python
YamboDG_Optimize(cg_grids,fg_grids,prefix,scf_path,pseudo_path,...,STEPS='all')
Example of use (job submission: each step dependent on the one before)
.. code-block:: python
scheduler1 = Scheduler(...)
scheduler2 = Scheduler(...,dependency=scheduler1)
scheduler3 = Scheduler(...,dependency=scheduler2)
scheduler1.add_command('python dg_script.py -steps 1')
sheduler1.run()
scheduler2.add_command('python dg_script.py -steps 2')
sheduler2.run()
scheduler3.add_command('python dg_script.py -steps 3')
sheduler3.run()
..
.. code-block:: dg_script.py
YamboDG_Optimize(cg_grids,fg_grids,prefix,scf_path,pseudo_path,...,wait_all==True,STEPS=args.steps)
TO DO:
- Separate double grid generation and double grid convergence (simple option 'converge_DG' might suffice)
- If automatic DG convergence assessment is on, then implement MOMENTA of the abs spectra as a method to check convergence
- Change check_nscf_completed into common/check_qe_completed
"""
def __init__(self,cg_grids,fg_grids,prefix,nscf_input,ya_input,E_laser=0.,STEPS='all',RUN=True, converge_DG=False,\
scf_save_path='./scf',pseudo_path='./pseudos',RUN_path='./',nscf_out="nscf",y_out_dir="results",\
qe_scheduler=None,y_scheduler=None,wait_all=True,pw_exec_path='',yambo_exec_path='',\
yambo_exec='yambo',save_type='simple',yambo_calc_type="yambo",noyambo=False):
#Configuring schedulers
self.frontend = Scheduler.factory(scheduler="bash")
if y_scheduler is not None and qe_scheduler is not None: #Here we use, e.g., slurm
self.qejobrun, self.yjobrun = qe_scheduler, y_scheduler
self.wait_up = True #This will enable calls to a function that will make the code wait upon completion of previous submitted job
self.job_folders = []
self.job_shells = []
elif y_scheduler is None and qe_scheduler is None: # Both schedulers must be present to activate job submission
self.qejobrun, self.yjobrun = Scheduler.factory(scheduler="bash"), Scheduler.factory(scheduler="bash")
self.wait_up = False
else: raise UserWarning('Submission mode is on only for either yambo or qe')
#Setting global variables
self.cg_grids = cg_grids
self.cg_strings = []
for cg in cg_grids: self.cg_strings.append("%dx%dx%d"%(cg[0],cg[1],cg[2]))
self.fg_grids = fg_grids
self.fg_strings = []
for fg in self.fg_grids:
temp_ls = []
for i in range(len(fg)): temp_ls.append(str(fg[i])+'_fg')
self.fg_strings.append(temp_ls)
self.prefix = prefix
self.scf_save_path = scf_save_path
self.pseudo_path = pseudo_path
self.RUN_path = RUN_path
self.yambo_calc_type = yambo_calc_type
self.noyambo = noyambo
self.E_laser = E_laser
# Initialize JOBID lists (used only in submission mode)
self.qe_id_cg = [ -1 for cg in self.cg_strings ]
self.ya_id_cg = [ -1 for cg in self.cg_strings ]
self.qe_id_fg = [ [ -1 for fg_i in fg ] for fg in self.fg_strings ]
self.ya_id_fg = [ [ -1 for fg_i in fg ] for fg in self.fg_strings ]
# Path of nscf and ip calculations and final plots
if converge_DG: self.yambo_calc_type='ip'
if not os.path.isdir(RUN_path): os.mkdir(RUN_path)
self.nscf_dir = '%s/nscf_grids'%RUN_path
self.yambo_dir = '%s/%s_grids'%(RUN_path,self.yambo_calc_type)
self.plot_dir = '%s/plots'%RUN_path
if not os.path.isdir(self.nscf_dir): os.mkdir(self.nscf_dir)
if not os.path.isdir(self.yambo_dir): os.mkdir(self.yambo_dir)
#Inputs
self.nscf_inp = nscf_input
self.ya_inp = ya_input
if converge_DG:
yambo_exec = 'yambo'
self.ip_input_is_there()
#Executables
if yambo_exec_path != '': yambo_exec_path_aux=yambo_exec_path+'/'
if pw_exec_path != '': pw_exec_path+='/'
self.pw = pw_exec_path + 'pw.x'
self.ypp = yambo_exec_path_aux + 'ypp'
self.yambo = yambo_exec_path_aux + yambo_exec
# Automatically determine which SAVE to create (better to specify it explicitly)
if yambo_exec == 'yambo': save_type='simple'
elif yambo_exec == 'yambo_ph' and not save_type[-4:]=='elph': save_type='elph'
elif yambo_exec == 'yambo_rt' and not save_type[3:]=='fix': save_type='fixsymm'
# Deal with elph_path
elph_path = None
if save_type[-4:]=='elph':
if ( not os.path.isdir('%s/elph_dir'%self.RUN_path) ) and ( not os.path.isfile('%s/elph_dir'%self.RUN_path) ):
raise FileNotFoundError('Please mv or symlink the elph_dir folder to the RUN_path of this workflow.')
else:
elph_path=self.RUN_path
#Start IO
self.yf = YamboIO(out_name='YAMBOPY_double-grid_Optimize.log',out_path=self.RUN_path,print_to_shell=True)
self.yf.IO_start()
if converge_DG: self.yf.msg('#### DOUBLE GRID CONVERGENCE WORKFLOW ####')
else: self.yf.msg('#### DOUBLE GRID WORKFLOW FOR %s CALCULATIONS ####'%self.yambo_calc_type)
self.driver(STEPS,RUN,nscf_out,y_out_dir,save_type,elph_path,yambo_exec_path,converge_DG)
#End IO
self.yf.IO_close()
# Check if python must exit immediately or after all submitted jobs have completed
if self.wait_up:
if wait_all: wait_for_all_jobs(self.job_shells,self.job_folders)
for shell in self.job_shells: shell.clean()
def driver(self,STEPS,RUN,nscf_out,y_out_dir,save_type,elph_path,yambo_exec_path,converge_DG):
"""
Worflow driver.
It runs the following:
- setup functions
- job submission functions
- double grid convergence functions
"""
if STEPS=='1' or STEPS=='all':
self.yf.msg("------ STEP 1 ------")
self.setup_cg()
if RUN: self.run_jobs(nscf_out,y_out_dir)
if STEPS=='2' or STEPS=='all':
self.yf.msg("------ STEP 2 ------")
for ig,cg in enumerate(self.cg_strings):
# NSCF status check
calc_dir = '%s/%s_coarse_grid'%(self.nscf_dir,cg)
calc = self.check_nscf_completed(calc_dir,nscf_out)
if calc: self.yf.msg(" NSCF CG %s found."%cg)
else: self.yf.msg(" NSCF CG %s NOT found."%cg)
# YAMBO status check
ycalc_dir = '%s/%s_coarse_grid'%(self.yambo_dir,cg)
ycalc = self.yambo_output_is_NOT_there(ycalc_dir,y_out_dir)
if ycalc: self.yf.msg(" YAMBO CG %s NOT found."%cg)
else: self.yf.msg(" YAMBO CG %s found."%cg)
#
if calc and ycalc:
yambo_dir = '%s/%s_coarse_grid'%(self.yambo_dir,cg)
CreateYamboSave(self.prefix,save_type=save_type,nscf=calc_dir,elph_path=elph_path,\
database=os.path.abspath(yambo_dir),yambo_exec_path=yambo_exec_path,printIO=False)
self.setup_fg(calc_dir,yambo_dir,self.fg_grids[ig],self.fg_strings[ig])
if RUN: self.run_jobs(nscf_out,y_out_dir)
if STEPS=='3' or STEPS=='all':
self.yf.msg("------ STEP 3 ------")
for ig,cg in enumerate(self.cg_strings):
for iff,fg in enumerate(self.fg_strings[ig]):
# NSCF status check
calc_dir = '%s/%s_coarse_grid/%s'%(self.nscf_dir,cg,fg)
calc = self.check_nscf_completed(calc_dir,nscf_out)
if calc: self.yf.msg(" NSCF CG %s FG %s found."%(cg,fg))
else: self.yf.msg(" NSCF CG %s FG %s NOT found."%(cg,fg))
# YAMBO status check
ycalc_dir = '%s/%s_coarse_grid/%s'%(self.yambo_dir,cg,fg)
ycalc = self.yambo_output_is_NOT_there(ycalc_dir,y_out_dir)
if ycalc: self.yf.msg(" YAMBO CG %s FG %s NOT found."%(cg,fg))
else: self.yf.msg(" YAMBO CG %s FG %s found."%(cg,fg))
#
if calc and ycalc:
yambo_dir = '%s/%s_coarse_grid/%s'%(self.yambo_dir,cg,fg)
if not os.path.isfile('%s/SAVE/ndb.Double_Grid'%yambo_dir):
CreateYamboSave(self.prefix,save_type='simple',nscf=calc_dir,elph_path=None,\
database="%s/dg_SAVE"%os.path.abspath(yambo_dir),yambo_exec_path=yambo_exec_path,printIO=False)
self.setup_yambo_fg(yambo_dir,self.fg_grids[ig][iff],y_out_dir)
if RUN: self.run_jobs(nscf_out,y_out_dir)
# This is a plotting routine in the ip case. It has to be updated to a full convergence analysis and report.
# The involved functions can possibly be moved into another file
#
if (STEPS=='4' or STEPS=='all') and converge_DG :
self.yf.msg("---------- STEP 4 ----------")
self.yf.msg("-- Double grid convergence --")
yout = 'o-%s.eps_q1_ip'%y_out_dir
ip_data = []
ip_labels = []
#
for ig,cg in enumerate(self.cg_strings):
ip_data_temp = []
ip_labels_temp = []
temp_path = '%s/%s_coarse_grid/%s/%s'%(self.yambo_dir,cg,y_out_dir,yout)
if os.path.isfile(temp_path):
self.yf.msg(" IP CG %s found."%cg)
ip_data_temp.append(np.loadtxt(temp_path))
ip_labels_temp.append(cg)
else:
self.yf.msg(" IP CG %s NOT found."%cg)
for iff,fg in enumerate(self.fg_strings[ig]):
temp_path = '%s/%s_coarse_grid/%s/%s/%s'%(self.yambo_dir,cg,fg,y_out_dir,yout)
if os.path.isfile(temp_path):
self.yf.msg(" IP CG %s FG %s found."%(cg,fg))
ip_data_temp.append(np.loadtxt(temp_path))
ip_labels_temp.append('%s N_FG=%d'%(cg,self.fg_grids[ig][iff]))
else:
self.yf.msg(" IP CG %s FG %s NOT found."%(cg,fg))
ip_data.append(ip_data_temp)
ip_labels.append(ip_labels_temp)
#
ip_data = [ip for ip in ip_data if ip != []]
ip_labels = [ip for ip in ip_labels if ip != []]
self.plot_ip_spectra(ip_data,ip_labels,self.E_laser)
def setup_cg(self):
""" First step of the workflow: setup CG folder tree and CG nscf calculations
"""
for ig,cg in enumerate(self.cg_grids):
work_dir = "%s/%s_coarse_grid"%(self.nscf_dir,self.cg_strings[ig])
if not os.path.isdir(work_dir):
os.mkdir(work_dir)
self.generate_inputfile(work_dir,cg)
os.system('cp -r %s/%s.save %s'%(self.scf_save_path,self.prefix,work_dir))
yambo_dir = "%s/%s_coarse_grid"%(self.yambo_dir,self.cg_strings[ig])
if not os.path.isdir(yambo_dir):
os.mkdir(yambo_dir)
self.ya_inp.write('%s/%s.in'%(yambo_dir,self.yambo_calc_type))
def setup_fg(self,nscf_cg_dir,yambo_cg_dir,fg_grid,fg_string):
""" Second step of the workflow: setup FG folder tree and FG (CG) nscf (yambo) calculations
"""
for iff,fg in enumerate(fg_grid):
fg_nscf_inp = '%s_fg.nscf'%self.prefix
rand_nm = 'random_kpt_list_%s.dat'%fg_string[iff]
ypp_inp = 'ypp_fg.in'
work_dir = "%s/%s"%(nscf_cg_dir,fg_string[iff])
if not os.path.isdir(work_dir):
os.mkdir(work_dir)
os.system('cp -r %s/%s.save %s'%(self.scf_save_path,self.prefix,work_dir))
yambo_dir = "%s/%s"%(yambo_cg_dir,fg_string[iff])
if not os.path.isdir(yambo_dir):
os.mkdir(yambo_dir)
os.mkdir("%s/dg_SAVE"%yambo_dir)
self.ya_inp.write('%s/%s.in'%(yambo_dir,self.yambo_calc_type))
if not os.path.isfile('%s/%s'%(work_dir,fg_nscf_inp)):
self.generate_ypp_input_random_grid(yambo_cg_dir,fg,ypp_inp)
ypp_run = self.frontend
ypp_run.add_command('cd %s; %s -F %s > ypp_fg.log'%(yambo_cg_dir,self.ypp,ypp_inp))
ypp_run.add_command('mv o.random_k_pts %s'%rand_nm)
ypp_run.add_command('cp %s %s'%(rand_nm,work_dir))
ypp_run.run()
kpts_rndm = np.loadtxt('%s/%s'%(work_dir,rand_nm))
if len(kpts_rndm) != fg:
self.yf.msg("[WARNING] Actual fine grid number of kpts is %d instead of %d"%(len(kpts_rndm),fg))
self.generate_inputfile(work_dir,fg,klist=kpts_rndm)
def setup_yambo_fg(self,yambo_fg_dir,fg_num,yresults_dir,clean_dg_saves=True):
""" Third step of the workflow: map FG to CG and FG yambo calculations
"""
ypp_inp = 'ypp_map.in'
os_run = self.frontend
if os.path.isfile('%s/../%s/ndb.dipoles'%(yambo_fg_dir,yresults_dir)):
os_run.add_command('cd %s; cp ../%s/ndb.dipoles* ../SAVE/ ; cp -r ../SAVE .'%(yambo_fg_dir,yresults_dir))
else:
os_run.add_command('cd %s; cp -r ../SAVE .'%yambo_fg_dir)
os_run.run()
self.generate_ypp_input_map_grid(yambo_fg_dir,fg_num,ypp_inp)
ypp_run = self.frontend
ypp_run.add_command('cd %s; %s -F %s > ypp_map.log'%(yambo_fg_dir,self.ypp,ypp_inp))
ypp_run.run()
if os.path.isfile('%s/SAVE/ndb.Double_Grid'%yambo_fg_dir):
self.yf.msg(" -> Double Grid ready. <-")
if clean_dg_saves:
os.system('rm -r %s/dg_SAVE/SAVE'%yambo_fg_dir)
os.system('touch %s/dg_SAVE/DOUBLE_GRID_SAVE_REMOVED_TO_SAVE_DISK_SPACE'%yambo_fg_dir)
else:
self.yf.msg(" -> Double Grid NOT ready. <-")
def generate_inputfile(self,folder,kpts,klist=None):
""" Modify nscf input file in case of CG or FG kpoint grids
"""
import copy
qe_input = copy.deepcopy(self.nscf_inp)
qe_input.control['pseudo_dir'] = "'%s'"%self.pseudo_path
if klist is None:
qe_input.kpoints = kpts
else:
qe_input.ktype = "crystal"
qe_input.klist = klist
qe_input.write('%s/%s.nscf'%(folder,self.prefix))
def generate_ypp_input_random_grid(self,folder,fg_num,inp_nm):
""" Create ypp input file for the generation of the FG coordinates [units: yambo "rlu" -> qe "crystal"]
"""
yppin = YamboIn()
yppin.arguments.append('bzgrids')
yppin.arguments.append('Random_Grid')
yppin['OutputAlat'] = self.nscf_inp.system['celldm(1)']
yppin['cooOut'] = "rlu"
yppin['BZ_random_Nk'] = fg_num
yppin.write('%s/%s'%(folder,inp_nm))
def generate_ypp_input_map_grid(self,folder,fg_num,inp_nm):
""" Create ypp input file for the mapping of the FG grid to the CG grid
"""
yppin = YamboIn()
yppin.arguments.append('kpts_map')
yppin['FineGd_mode']='unexpanded'
yppin['BZ_DbGd_Nk']=fg_num
yppin.arguments.append('SkipCheck')
yppin['FineGd_DB1_paths'] = ['./dg_SAVE']
yppin.write('%s/%s'%(folder,inp_nm))
def run_jobs(self,out_qe,out_yambo):
""" Workflow
"""
#MODULARIZATION ISSUE:
# remember that function check_nscf_completed has a dependency
# on the name of the qe output file - it has to be '*.out' - and
# hence it depends on how this name is given in function shell_run
for ig,cg in enumerate(self.cg_strings): # ---------- Outer COARSE GRID loop ----------
# temp_dir: where qe calculations are run
# save_dir: where yambo calculations are run
temp_dir = '%s/%s_coarse_grid'%(self.nscf_dir,cg)
if not self.check_nscf_completed(temp_dir,out_qe):
self.yf.msg("Running NSCF CG %s..."%cg) ############## Run NSCF CG
self.qe_id_cg[ig] = self.shell_run("qe_%s"%cg,temp_dir,out_qe,'qe')
if self.wait_up: self.job_folders.append(temp_dir)
save_dir = '%s/%s_coarse_grid'%(self.yambo_dir,cg)
if os.path.isdir('%s/SAVE'%save_dir):
if self.yambo_output_is_NOT_there(save_dir,out_yambo) and not self.noyambo:
self.yf.msg("Running YAMBO CG %s..."%cg) ############ Run YAMBO CG
self.ya_id_cg[ig] = self.shell_run("ya_%s"%cg,save_dir,out_yambo,'y') # depends on JOBID='%d'%self.qe_id_cg[ig])
if self.wait_up: self.job_folders.append(save_dir)
try: self.fg_strings[ig]
except IndexError as err: raise Exception('No fine grid(s) provided for CG %s.'%cg) from err
for iff,fg in enumerate(self.fg_strings[ig]): # ---------- Inner FINE GRID loop ----------
temp_dir = '%s/%s_coarse_grid/%s'%(self.nscf_dir,cg,fg)
if os.path.isdir(temp_dir):
if not self.check_nscf_completed(temp_dir,out_qe):
self.yf.msg("Running NSCF CG %s FG %s..."%(cg,fg)) ######### Run NSCF FG
self.qe_id_fg[ig][iff] = self.shell_run("qe_%s"%cg,temp_dir,out_qe,'qe') # depends on JOBID='%d'%self.qe_id_cg[ig])
if self.wait_up: self.job_folders.append(temp_dir)
save_dir = '%s/%s_coarse_grid/%s'%(self.yambo_dir,cg,fg)
if os.path.isdir('%s/SAVE'%save_dir) and os.path.isfile('%s/SAVE/ndb.Double_Grid'%save_dir):
if self.yambo_output_is_NOT_there(save_dir,out_yambo) and not self.noyambo:
self.yf.msg("Running YAMBO CG %s FG %s..."%(cg,fg)) ############ Run YAMBO FG
self.ya_id_fg[ig][iff] = self.shell_run("ya_%s"%cg,save_dir,out_yambo,'y') # depends on JOBID='%d:%d'%(self.ya_id_cg[ig],self.qe_id_fg[ig][iff]))
if self.wait_up: self.job_folders.append(save_dir)
def shell_run(self,jname,run_dir,out_dir,exec,JOBID=None):
"""
Submit job
exec:
qe: runs pw.x
y: runs the yambo* executable of choice
jname: job name
JOBID: job id of simulation that the present job has a dependency on
run_dir: where job is run
out_dir: name of output (folder and log for yambo; '%s.out'%out_dir file for qe)
returns id of present submitted job
"""
if exec=='qe': shell = deepcopy(self.qejobrun)
if exec=='y': shell = deepcopy(self.yjobrun)
shell.name = '%s_%s'%(jname,shell.name)
# Add dependency if specified
if self.wait_up and JOBID is not None:
dependency='afterok:%s'%JOBID
shell.dependency=dependency
if exec=='qe': shell.add_mpirun_command('%s -inp %s.nscf > %s.out'%(self.pw,self.prefix,out_dir))
if exec=='y': shell.add_mpirun_command('%s -F %s.in -J %s -C %s 2> %s.log'%(self.yambo,self.yambo_calc_type,out_dir,out_dir,out_dir))
shell.run(filename='%s/%s.sh'%(run_dir,exec)) ### Specify run path
# Manage submissions if specified
#if self.wait_up: wait_for_job(shell,run_dir)
if self.wait_up: this_job_id = shell.jobid
else: this_job_id = -1
if self.wait_up: self.job_shells.append(shell)
else: shell.clean()
return this_job_id
def ip_input_is_there(self):
""" Check if yambo ip input is correctly given in converge_DG case
"""
condition = 'chi' in self.ya_inp.arguments and \
'optics' in self.ya_inp.arguments and \
self.ya_inp['Chimod']=='IP'
if not condition:
raise FileNotFoundError("IP input file not recognised: are you sure you specified 'chi' and 'optics' runlevels and Chimod='IP'?")
def check_nscf_completed(self,folder,out_prefix):
""" Check if nscf calculation has completed and proceed
"""
status = True
try:
check = subprocess.check_output("grep JOB %s/%s*"%(folder,out_prefix), shell=True, stderr=subprocess.STDOUT)
check = check.decode('utf-8')
check = check.strip().split()[-1]
except subprocess.CalledProcessError as e:
check = ""
if check != "DONE.": status = False
return status
def yambo_output_is_NOT_there(self,calc_dir,results_dir):
""" Check if yambo produced non-empty outputs
"""
condition = 0
out_files = glob('%s/%s/o-%s.*'%(calc_dir,results_dir,results_dir))
for out_file in out_files:
test = ( not os.path.isfile(out_file) ) or ( os.path.isfile(out_file) and os.stat(out_file).st_size == 0 )
if test: condition+=1
if condition==len(out_files): return True # This means no output file has been produced: this calculation must be run
elif condition==0: return False # This means all output files have been produced: this calculation must be skipped
else: raise UserWarning('Some output files have been produced and some not -- check the above calculation.')
#TODO: move convergence-related functions to a new submodule
def plot_ip_spectra(self,data,labels,w_laser,fig_name='ip_spectra',xlims=None):
""" Plot results in a dynamic layout (chosen by FP)
"""
if not os.path.isdir(self.plot_dir): os.mkdir(self.plot_dir)
lwidth = 0.8
f, (axes) = plt.subplots(1,len(data),sharey=True,figsize=(12,4))
for ig,cg in enumerate(data):
colors = plt.cm.gist_rainbow(np.linspace(0.,1.,num=len(cg)))
axes[ig].set_ylim(0.,1.1*np.max(cg[0][:,1]))
if xlims is not None: axes[ig].set_xlim(xlims[0],xlims[1])
for iff,fg in enumerate(cg):
axes[ig].plot(fg[:,0],fg[:,1], '-', lw = lwidth, color = colors[iff], label = labels[ig][iff])
axes[ig].fill_between(cg[0][:,0], cg[-1][:,1], color = colors[-1], alpha = 0.2)
for ax in axes:
ax.axvline(w_laser,lw=0.8,color='black',zorder=-1)
ax.legend()
f.tight_layout()
plt.savefig('%s/%s.png'%(self.plot_dir,fig_name),format='png',dpi=150)
def clean_everything(self):
""" Remove workflow tree
"""
rm_run = self.frontend
rm_run.add_command('rm -rf %s %s %s'%(self.nscf_dir,self.yambo_dir,self.plot_dir))
rm_run.run()
self.yf.msg("Workflow removed.")
def branch_wise_flow(self):
"""
The workflow dependencies are complicated.
We understand them in two steps.
First, how the actual job submissions depend on each other:
qe_CG_1 qe_CG_2 ... qe_CG_n
| | |
______|________________ | |
| | | (same in parallel) (same in parallel)
| | |
qe_FG_1 ... qe_FG_n y_CG_1
| |______ ... | ... _____
|_______________________|_________|
| |
| |
y_FG_1 ... y_FG_n
if converge_DG: REDUX ALL JOBS
|
|
PLOTS
All the separate branches of the workflow could be run sequentially using the dependency system of the scheduler.
Second, how the workflow functions actually depend on each other (single tree branch is shown here):
qe_CG_1
|
______|________________ first barrier
| | |
| | |
|---------|--------- SAVE
| | | second barrier
setup_fg setup_fg | (very small delay here)
| | | third barrier
| | |
qe_FG_1 ... qe_FG_n y_CG_1
| |______ ... | ... _____
|_______________________|_________| fourth barrier
| |
| |
setup_yambo_fg setup_yambo_fg
| | fifth barrier
| |
y_FG_1 ... y_FG_n
For this reason, it's better to plan a submission 'STEP by STEP' of the workflow instead than 'branch by branch'.
However, the latter way would be more efficient if implemented.
"""
|
alexmoratalla/yambopy
|
yambopy/double_grid/dg_convergence.py
|
Python
|
bsd-3-clause
| 29,787
|
[
"CRYSTAL",
"Quantum ESPRESSO",
"Yambo"
] |
1140029a67465f8a7a21374fe0eafb3922e9d6dbd46440d9cab9de4eb249d965
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
dimensions = 1
halfSize1d = 55
GRIDSIZE = [2**12]
halfSize = [halfSize1d,0.1,0.1]# must be three components, because yade is inherently 3D and uses Vector3r. Remaining components will be used for AABB
dampMarginBandMin = 2
dampMarginBandMax = 20
dampFormulaSmooth = True # True - uses exp() with smoothed edge, False - uses 'traditional' exp() cut-off with discontinuity in first derivative.
dampExponent = 0.01
#dampFormulaSmooth = False # True - uses exp() with smoothed edge, False - uses 'traditional' exp() cut-off with discontinuity in first derivative.
#dampExponent = 4
zero_shift_left = -45
k0_x = 8
gaussWidth = 0.95 #/ 2.0
x0_center = 6.15 + zero_shift_left
mass = 1604.391494
gaussPacketArg = {'x0':[x0_center,0,0],'t0':0,'k0':[k0_x,0,0],'a0':[gaussWidth,0,0],'gridSize':GRIDSIZE}
## This is a simple test:
## - a freely moving particle according to Schrodinger equation is calculated using Tal-Ezer Kosloff 1984 method
## - it is compared with the same movement, but calculated analytically
## The error between numerical and analytical solution is plot on the graph
O.engines=[
StateDispatcher([
St1_QMPacketGaussianWave(),
]),
SpatialQuickSortCollider([
Bo1_Box_Aabb(),
]),
SchrodingerKosloffPropagator(
FIXMEatomowe_MASS = mass
,dampMarginBandMin = dampMarginBandMin
,dampMarginBandMax = dampMarginBandMax
,dampFormulaSmooth = dampFormulaSmooth
,dampExponent = dampExponent
,dampDebugPrint = False
,threadNum=1),
SchrodingerAnalyticPropagator()
,PyRunner(iterPeriod=1,command='myAddPlotData()')
]
dampDrawScale = 30
displayOptionsDamp= { 'partAbsolute':['default wire', 'hidden', 'nodes', 'points', 'wire', 'surface']
,'partImaginary':['default hidden', 'hidden', 'nodes', 'points', 'wire', 'surface']
,'partReal':['default hidden', 'hidden', 'nodes', 'points', 'wire', 'surface']
,'stepRender':["default stripes","hidden","frame","stripes","mesh"]}
partsScale = 30
stepRenderHide =["default hidden","hidden","frame","stripes","mesh"]
## Two particles are created - the analytical one, and the numerical one. They
## do not interact, they are two separate calculations in fact.
## The analytical one:
analyticBody = QMBody()
# make sure it will not interact with the other particle (although interaction is not possible/implemented anyway)
analyticBody.groupMask = 2
analyticBody.shape = QMGeometry(extents=halfSize,color=[0.8,0.8,0.8],displayOptions=[
QMDisplayOptions(partsScale=partsScale,stepRender=stepRenderHide,renderWireLight=False)
,QMDisplayOptions(partsScale=partsScale,stepRender=stepRenderHide,renderWireLight=False,renderFFT=True,renderSe3=(Vector3(0,0,60), Quaternion((1,0,0),0)))
])
# it's too simple now. Later we will have quarks (up, down, etc.), leptons and bosons as a material.
# So no material for now.
analyticBody.material = QMParticle(dim=dimensions,hbar=1,m=mass)
analyticBody.state = QMPacketGaussianWave(**gaussPacketArg)
nid=O.bodies.append(analyticBody)
O.bodies[nid].state.setAnalytic() # is propagated as analytical solution - no calculations involved
## The numerical one:
numericalBody = QMBody()
# make sure it will not interact with the other particle (although interaction is not possible/implemented anyway)
numericalBody.groupMask = 1
numericalBody.shape = QMGeometry(extents=halfSize,color=[1,1,1],displayOptions=[
QMDisplayOptions(partsScale=partsScale,stepRender=stepRenderHide,renderWireLight=False)
,QMDisplayOptions(partsScale=partsScale,stepRender=stepRenderHide,renderWireLight=False,renderFFT=True,renderSe3=(Vector3(0,0,60), Quaternion((1,0,0),0)))
])
numericalBody.material = analyticBody.material
# Initialize the discrete wavefunction using the analytical gaussPacket created earlier.
# The wavefunction shape can be anything - as long as it is normalized, in this case the Gauss shape is used.
# The grid size must be a power of 2 to allow FFT. Here 2**12=4096 is used.
numericalBody.state = QMPacketGaussianWave(**gaussPacketArg)
nid=O.bodies.append(numericalBody)
O.bodies[nid].state.setNumeric() # is being propagated by SchrodingerKosloffPropagator
## Define timestep for the calculations
#O.dt=.000001
O.dt=150
## Save the scene to file, so that it can be loaded later. Supported extension are: .xml, .xml.gz, .xml.bz2.
O.save('/tmp/a.xml.bz2');
#o.run(100000); o.wait(); print o.iter/o.realtime,'iterations/sec'
############################################
##### now the part pertaining to plots #####
############################################
from yade import plot
## we will have 2 plots:
## 1. t as function of i (joke test function)
## 2. i as function of t on left y-axis ('|||' makes the separation) and z_sph, v_sph (as green circles connected with line) and z_sph_half again as function of t
plot.plots={'t':('error')}
def myAddPlotData():
symId=0
numId=1
O.bodies[symId].state.update()
psiDiff=((O.bodies[symId].state)-(O.bodies[numId].state))
zeroOutsideThisRange = False
printDebugInfo = False
psiDiff.zeroRange([0],[dampMarginBandMin + 2 ], zeroOutsideThisRange, printDebugInfo)
psiDiff.zeroRange([2*halfSize1d - dampMarginBandMax - 2 ],[2*halfSize1d],False, printDebugInfo)
plot.addData(t=O.time,error=(psiDiff|psiDiff).real)
plot.liveInterval=.2
plot.plot(subPlots=False)
try:
from yade import qt
qt.View()
qt.Controller()
qt.controller.setWindowTitle("1D free prop. of gaussian packet")
qt.controller.setViewAxes(dir=(0,1,0),up=(0,0,1))
qt.Renderer().blinkHighlight=False
qt.Renderer().extraDrawers=[GlExtra_QMEngine(drawDTable=True,dampColor=Vector3(1,1,1)
,dampDisplayOptions=QMDisplayOptions(partsScale=dampDrawScale
,renderSe3=(Vector3(0,0,0), Quaternion((1,0,0),0)),**displayOptionsDamp))]
qt.views()[0].center(False,5) # median=False, suggestedRadius = 5
except ImportError:
pass
#O.run(20000)
#### save result for comparison with mathematica
#
#ff=open("1d-free-propagation-yade.txt",'w')
#for i in range(nn.gridSize[0]):
# ff.write(str(nn.iToX(i,0))+" "+str((nn.atPsiGlobal([i])).real)+" "+str((nn.atPsiGlobal([i])).imag)+"\n")
#ff.close()
|
cosurgi/trunk
|
examples/qm/1d-free-propagation-DAMP-TEST.py
|
Python
|
gpl-2.0
| 6,342
|
[
"Gaussian"
] |
8c868212ea19a873ef9788886f4f8a19ddbd19ac9056e3ba54ec2d1461a31556
|
"""
.. module:: compute_month_anomaly.py
:synopsis: This script should calculate the anomaly for every month and
store it as nc files in the appropriate directories.
.. moduleauthor:: Arulalan.T <arulalant@gmail.com>
"""
import os
import sys
import cdms2
# setting the absolute path of the previous directory
# getting the this py module path by __file__ variable
# pass that __file__ to the os.path.dirname, returns the path of this module
__diagnosisDir__ = os.path.dirname(__file__)
previousDir = os.path.abspath(os.path.join(__diagnosisDir__, '..'))
# adding the previous path to python path
sys.path.append(previousDir)
# import xml_data_acces.py from previous directory diagnosisutils
import diagnosisutils.xml_data_access as xml_data_access
from diagnosisutils.timeutils import TimeUtility
from diag_setup.varsdict import variables
from diag_setup.globalconfig import models, climatologies, processfilesPath
from diag_setup.gendir import createDirsIfNotExists
import diag_setup.netcdf_settings
# create time utility object
timobj = TimeUtility()
# timeAxis check value to skip the anomaly process for the existing month in
# the mean nc file
__timeCheck__ = True
def genMonthAnomalyDirs(modelname, modelpath, climregridpath, climpfilename,
climatologyyear):
"""
:func:`genMonthAnomalyDirs` : It should generate the directory structure
whenever it needs. It reads the timeAxis information of the
model data xml file(which is updating it by cdscan), and once
the full months is completed, then it should check either that
month directory is empty or not.
case 1: If that directory is empty means, it should call the
function called `genMonthAnomalyFiles`, to calculate
the mean analysis and anomaly for that month and should
store the processed files in side that directory.
case 2: If that directory is non empty means,
****have to update*****
Inputs : modelname is the model data name, which will become part of the
directory structure.
modelpath is the absolute path of data where the model xml files
are located.
climregridpath is the absolute path of the climatolgy regridded
path w.r.t to this model data resolution (both horizontal and
vertical)
climpfilename is the climatolgy Partial File Name to combine the
this passed name with (at the end) of the climatolgy var name to
open the climatolgy files.
climatolgyyear is the year of climatolgy data.
Outputs : It should create the directory structure in the processfilesPath
and create the processed nc files.
Written By : Arulalan.T
Date : 01.12.2011
"""
xmlobj = xml_data_access.GribXmlAccess(modelpath)
# get one model var name from the global 'vars.txt' file
mvar = variables.get(modelname).values()[0].model_var
modeldataset = xmlobj[mvar, 'a']
# get the timeAxis of modeldata set and correct its bounds
modeltime = timobj._correctTimeAxis(modeldataset.getTime())
# get the fully available months
availableMonths = timobj.getTimeAxisFullMonths(modeltime)
# create modelname, Anomaly directories if it is not exists
childMeanPath = createDirsIfNotExists(processfilesPath,
[modelname, 'Anomaly'])
for year in availableMonths:
# get the months dictionary
monthdic = availableMonths.get(year)
# sort the months in correct order
months = timobj._sortMonths(monthdic.keys())
year = str(year)
# create Mean Root Year,Month directories if it is not exists
meanAnomalyPath = createDirsIfNotExists(childMeanPath,
[year, 'Month', 'Analysis'])
# generate mean analysis month path
meanAnalysisPath = os.path.join(processfilesPath, modelname, 'Mean',
year, 'Month', 'Analysis')
for month in months:
# get the start & end date of the month
sedate = monthdic.get(month)
month = month.lower()
# combaine month and its startdate & enddate within tuple
monthdate = (month, sedate)
# calling function to create all nc files mean monthly anomaly
genMonthAnomalyFiles(meanAnomalyPath, meanAnalysisPath,
climregridpath, climpfilename, climatologyyear,
monthdate, year, modelName = modelname,
modelXmlObj = xmlobj)
# end of for month in months:
# close all the opened xml file objects
xmlobj.closeXmlObjs()
# end of for year in availableMonths.keys():
# end of def genMonthAnomalyDirs()
def genMonthAnomalyFiles(meanAnomalyPath, meanAnalysisPath, climRegridPath,
climPFileName, climatologyYear,
monthdate, year, **model):
"""
:func:`genMonthAnomalyFiles` : It should calculate monthly mean anomaly
from the monthly mean analysis and monthly mean climatolgy,
for the month (of year) and process it. Finally
stores it as nc files in corresponding directory path which are
passed in this function args.
Inputs : meanAnomalyPath is the absolute path where the processed mean
anomaly nc files are going to store.
meanAnalysisPath is the absolute path where the processed mean
analysis nc files were already stored.
climRegridPath is the absolute path where the regridded monthly
mean climatologies (w.r.t the model vertical resolution)
nc files were already stored.
climPFileName is the partial nc filename of the climatolgy.
climatologyYear is the year of the climatolgy to access it.
monthdate (which contains monthname, startdate & enddate) and
year are the inputs to extract the monthly data.
KWargs: modelName, modelXmlPath, modelXmlObj
modelName is the model data name which will become part of the
process nc files name.
modelPath is the absolute path of data where the model xml files
are located.
modelXmlObj is an instance of the GribXmlAccess class instance.
If we are passing modelXmlObj means, it will be optimized one
when we calls this same function for same model for different
months.
We can pass either modelXmlPath or modelXmlObj KWarg is enough.
Outputs : It should create mean anomaly for the particular variables which
are all set the clim_var option in the vars.txt file. Finally
store it as nc file formate in the proper directories structure
(modelname, process name, year and then month hierarchy).
Written By : Arulalan.T
Date : 08.09.2011
Updated : 07.12.2011
"""
modelXmlObj, modelPath = None, None
if 'modelName' in model:
modelName = model.get('modelName')
else:
raise RuntimeError("KWarg modelName must be passed")
if 'modelXmlObj' in model:
modelXmlObj = model.get('modelXmlObj')
elif 'modelXmlPath' in model:
modelPath = model.get('modelXmlPath')
else:
raise RuntimeError("you must pass atleast one KWargs of modelXmlPath \
or modelXmlPath ")
if not modelXmlObj:
xmlobj = xml_data_access.GribXmlAccess(modelPath)
else:
if isinstance(modelXmlObj, xml_data_access.GribXmlAccess):
xmlobj = modelXmlObj
else:
raise ValueError("Passed modelXmlObj instance %s is not an \
instance of GribXmlAccess class " % type(modelXmlObj))
# end of if not modelXmlObj:
month = monthdate[0]
# get the startdate, enddate of this month for both model & climatolgy
modelDataTimeRange = monthdate[1]
if not os.path.isdir(meanAnalysisPath):
raise RuntimeError("The monthly mean analysis directory doesnt \
exists in the path %s " % (meanAnalysisPath))
# get the nc files name of mean analysis
anlfiles = [f for f in os.listdir(meanAnalysisPath) if f.endswith('.nc')]
if not anlfiles:
raise RuntimeError("monthly mean analysis directory is empty. \
So couldnt compute anomaly. Stopping the process \
for %s month" % (month))
# get the nc files name of mean anomaly
ncfiles = [f for f in os.listdir(meanAnomalyPath) if f.endswith('.nc')]
# make ncfiles as dictionary with key as var name
ncfiledic = {}
for ncfile in ncfiles:
var = ncfile.split('_')[0]
ncfiledic[var] = ncfile
# make memory free
del ncfiles
anlvariables = xmlobj.listvariable(Type = 'a')
# get the namedtuple object from the global 'vars.txt' file
totalvars = variables.get(modelName)
for globalvar in totalvars.itervalues():
# get the model var name
mvar = globalvar.model_var
# get the climatolgy var name
cvar = globalvar.clim_var
if not cvar:
print "Climatology var name is empty string. So skipping anomaly \
process for %s model var name " % mvar
continue
# end of if not cvar:
if not mvar in anlvariables:
print "The variable %s is not available in the xml anl file object" % mvar
print "So skipping the anomaly and mean analysis processes \
for this variable %s which is one of the keys of the \
variables dictionary" % mvar
continue
# end of if not mvar in allvariables:
# partial nc file name
pFileName = mvar + '_'+ modelName + '_' + year
# store anomaly into proper nc file
if mvar in ncfiledic:
anomalyFileName = ncfiledic.get(mvar)
meanAnomalyFilePath = meanAnomalyPath + '/' + anomalyFileName
try:
# open nc file in append mode
anomalyFile = cdms2.open(meanAnomalyFilePath, 'a')
# get the ncfile timeAxis
fileTime = anomalyFile[mvar].getTime()
# Do check either this month timeAxis is already exists in
# the nc file's timeAxis or not. If exists means skip it.
if __timeCheck__:
if modelDataTimeRange[0] in fileTime.asComponentTime():
print "The mean anomaly is already exists in the \
file %s. So skipping var '%s' " % \
(anomalyFileName, mvar)
anomalyFile.close()
continue
# end of if __timeCheck__:
except cdms2.error.CDMSError, AttributeError:
# if it getting this error means, it may not written
# properly. So open nc file in write mode freshly.
print "Got Problem. nc file is correpted at last time. \
May be lost the previous months data.\
Now creating same nc file '%s' freshly w.r.t current \
month %s" % (anomalyFileName, month)
anomalyFile = cdms2.open(meanAnomalyFilePath, 'w')
else:
# generate the nc filename
anomalyFileName = pFileName + '_mean_anomaly.nc'
meanAnomalyFilePath = meanAnomalyPath + '/' + anomalyFileName
# open new nc file in write mode
anomalyFile = cdms2.open(meanAnomalyFilePath, 'w')
# end of if mvar in ncfiledic:
print "Collecting model mean analysis data of variable '%s' for the \
%s month %s year" % (mvar, month, year)
# generate the mean analysis nc file name
analysisFileName = pFileName + '_mean_analysis.nc'
# open the monthly mean analysis nc file
analysisFile = cdms2.open(meanAnalysisPath + '/' + analysisFileName, 'r')
try:
# get the model data for this month alone (monthly mean analysis)
meanAnalysis = analysisFile(mvar, time = modelDataTimeRange[0])
except:
print "Coundn't get the analysis data of %s var %s time" \
% (mvar, modelDataTimeRange[0])
print "So skipping the anomaly and mean analysis processes\
for the '%s' variable" % mvar
continue
finally:
analysisFile.close()
# end of try:
# extracting units from the title of the data
title = meanAnalysis.title
stitle = title.find('[')
etitle = title.find(']')
if stitle!= -1 and etitle!= -1:
varunits = title[stitle + 1: etitle]
else:
varunits = ''
# store mean analysis into proper nc file
modelLevel = meanAnalysis.getLevel()
modelLatitude = meanAnalysis.getLatitude()
modelLongitude = meanAnalysis.getLongitude()
print "\n monthly time axis has been copied from the mean analysis"
meanTime = meanAnalysis.getTime()
# generate the climatology file name
climatologyFile = cvar + climPFileName
cfile = cdms2.open(climRegridPath + '/' + climatologyFile, 'r')
climDataTimeRange = timobj.monthFirstLast(monthdate[0], climatologyYear)
try:
# get the climatolgy data for this month alone
meanClimatology = cfile(cvar, time = climDataTimeRange[0])
except:
print "Coundn't get the climatolgy data for the variable %s and \
time %s " % (cvar, climDataTimeRange[0])
print "So skipping anomaly for the variable %s" % mvar
continue
finally:
cfile.close()
# end of try:
print "Calculating Monthly Anomaly"
# anomaly
anomaly = meanAnalysis - meanClimatology
# make memory free
del meanAnalysis, meanClimatology
# setting model time axis to the anomaly
if modelLevel:
anomaly.setAxisList([meanTime, modelLevel, modelLatitude,
modelLongitude])
else:
anomaly.setAxisList([meanTime, modelLatitude, modelLongitude])
anomaly.id = mvar
anomaly.units = varunits
anomaly.long_name = title
anomaly.comments = 'monthly mean anomaly of %s model data of %s' % (modelName, year)
print "Writing mean anomaly into %s file \n" % (anomalyFileName)
anomalyFile.write(anomaly)
anomalyFile.close()
# make memory free
del anomaly
# end of for globalvar in totalvars.itervalues():
# end of def genMonthAnomalyFiles(...):
if __name__ == '__main__':
if len(models) == len(climatologies) == 1:
print "Obtained one model and one climatolgy"
elif len(models) == len(climatologies):
print "Obtained %d models and climatologies" % len(models)
else:
print "Obtained %d models and %d climatologies" % (len(models),
len(climatologies))
for model in models:
for climatolgy in climatologies:
if model.count == climatolgy.count:
# generate the climatolgy regrid path which has already
# created
climatologyRegridPath = os.path.join(processfilesPath,
model.name, 'Regrid', 'Climatology', climatolgy.name)
if climatolgy.mfile:
# calling the genMonthAnomalyDirs function to do process
genMonthAnomalyDirs(model.name, model.path, climatologyRegridPath,
climatolgy.mfile, climatolgy.year)
else:
print "In configure.txt climpartialmonfile not mentioned. \
So can not compute monthly anomaly."
else:
pass
# climatolgy configuration and model data configuration are
# not equal in the text file handle this case, in diff manner.
# The same loop should works.
# But need to check all the cases.
# end of for model in models:
print "Done! Creation of Monthly Anomaly netCdf Files"
# end of if __name__ == '__main__':
|
arulalant/mmDiagnosis
|
diagnosis1/diagnosis/compute_month_anomaly.py
|
Python
|
gpl-3.0
| 16,848
|
[
"NetCDF"
] |
b93f5125d8d60057c304f0591c8039f1aaa13c2060a2ef3d721df0360561e554
|
"""
Extract Surface
~~~~~~~~~~~~~~~
You can extract the surface of nearly any object within ``pyvista``
using the ``extract_surface`` filter.
"""
# sphinx_gallery_thumbnail_number = 2
import numpy as np
import pyvista as pv
from vtk import VTK_QUADRATIC_HEXAHEDRON
###############################################################################
# Create a quadratic cell and extract its surface
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Here we create a single quadratic hexahedral cell and then extract its surface
# to demonstrate how to extract the surface of an UnstructuredGrid.
lin_pts = np.array([[-1, -1, -1], # point 0
[ 1, -1, -1], # point 1
[ 1, 1, -1], # point 2
[-1, 1, -1], # point 3
[-1, -1, 1], # point 4
[ 1, -1, 1], # point 5
[ 1, 1, 1], # point 6
[-1, 1, 1]], np.double) # point 7
# these are the "midside" points of a quad cell. See the definition of a
# vtkQuadraticHexahedron at:
# https://vtk.org/doc/nightly/html/classvtkQuadraticHexahedron.html
quad_pts = np.array([
(lin_pts[1] + lin_pts[0])/2, # between point 0 and 1
(lin_pts[1] + lin_pts[2])/2, # between point 1 and 2
(lin_pts[2] + lin_pts[3])/2, # and so on...
(lin_pts[3] + lin_pts[0])/2,
(lin_pts[4] + lin_pts[5])/2,
(lin_pts[5] + lin_pts[6])/2,
(lin_pts[6] + lin_pts[7])/2,
(lin_pts[7] + lin_pts[4])/2,
(lin_pts[0] + lin_pts[4])/2,
(lin_pts[1] + lin_pts[5])/2,
(lin_pts[2] + lin_pts[6])/2,
(lin_pts[3] + lin_pts[7])/2])
# introduce a minor variation to the location of the mid-side points
quad_pts += np.random.random(quad_pts.shape)*0.3
pts = np.vstack((lin_pts, quad_pts))
# create the grid
# If you are using vtk>=9, you do not need the offset array
offset = np.array([0])
cells = np.hstack((20, np.arange(20))).astype(np.int64, copy=False)
celltypes = np.array([VTK_QUADRATIC_HEXAHEDRON])
grid = pv.UnstructuredGrid(offset, cells, celltypes, pts)
# finally, extract the surface and plot it
surf = grid.extract_surface()
surf.plot(show_scalar_bar=False)
###############################################################################
# Nonlinear Surface Subdivision
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Should your UnstructuredGrid contain quadratic cells, you can
# generate a smooth surface based on the position of the
# "mid-edge" nodes. This allows the plotting of cells
# containing curvature. For additional reference, please see:
# https://prod.sandia.gov/techlib-noauth/access-control.cgi/2004/041617.pdf
surf_subdivided = grid.extract_surface(nonlinear_subdivision=5)
surf_subdivided.plot(show_scalar_bar=False)
|
akaszynski/vtkInterface
|
examples/01-filter/extract-surface.py
|
Python
|
mit
| 2,740
|
[
"VTK"
] |
ab410f0ae8209bf6d506c3a86be4684e0608af2fa192c4785e5fafa0978ea044
|
""" @package antlr3.tree
@brief ANTLR3 runtime package, treewizard module
A utility module to create ASTs at runtime.
See <http://www.antlr.org/wiki/display/~admin/2007/07/02/Exploring+Concept+of+TreeWizard> for an overview. Note that the API of the Python implementation is slightly different.
"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
from antlr3.constants import INVALID_TOKEN_TYPE
from antlr3.tokens import CommonToken
from antlr3.tree import CommonTree, CommonTreeAdaptor
def computeTokenTypes(tokenNames):
"""
Compute a dict that is an inverted index of
tokenNames (which maps int token types to names).
"""
if tokenNames is None:
return {}
return dict((name, type) for type, name in enumerate(tokenNames))
## token types for pattern parser
EOF = -1
BEGIN = 1
END = 2
ID = 3
ARG = 4
PERCENT = 5
COLON = 6
DOT = 7
class TreePatternLexer(object):
def __init__(self, pattern):
## The tree pattern to lex like "(A B C)"
self.pattern = pattern
## Index into input string
self.p = -1
## Current char
self.c = None
## How long is the pattern in char?
self.n = len(pattern)
## Set when token type is ID or ARG
self.sval = None
self.error = False
self.consume()
__idStartChar = frozenset(
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_'
)
__idChar = __idStartChar | frozenset('0123456789')
def nextToken(self):
self.sval = ""
while self.c != EOF:
if self.c in (' ', '\n', '\r', '\t'):
self.consume()
continue
if self.c in self.__idStartChar:
self.sval += self.c
self.consume()
while self.c in self.__idChar:
self.sval += self.c
self.consume()
return ID
if self.c == '(':
self.consume()
return BEGIN
if self.c == ')':
self.consume()
return END
if self.c == '%':
self.consume()
return PERCENT
if self.c == ':':
self.consume()
return COLON
if self.c == '.':
self.consume()
return DOT
if self.c == '[': # grab [x] as a string, returning x
self.consume()
while self.c != ']':
if self.c == '\\':
self.consume()
if self.c != ']':
self.sval += '\\'
self.sval += self.c
else:
self.sval += self.c
self.consume()
self.consume()
return ARG
self.consume()
self.error = True
return EOF
return EOF
def consume(self):
self.p += 1
if self.p >= self.n:
self.c = EOF
else:
self.c = self.pattern[self.p]
class TreePatternParser(object):
def __init__(self, tokenizer, wizard, adaptor):
self.tokenizer = tokenizer
self.wizard = wizard
self.adaptor = adaptor
self.ttype = tokenizer.nextToken() # kickstart
def pattern(self):
if self.ttype == BEGIN:
return self.parseTree()
elif self.ttype == ID:
node = self.parseNode()
if self.ttype == EOF:
return node
return None # extra junk on end
return None
def parseTree(self):
if self.ttype != BEGIN:
return None
self.ttype = self.tokenizer.nextToken()
root = self.parseNode()
if root is None:
return None
while self.ttype in (BEGIN, ID, PERCENT, DOT):
if self.ttype == BEGIN:
subtree = self.parseTree()
self.adaptor.addChild(root, subtree)
else:
child = self.parseNode()
if child is None:
return None
self.adaptor.addChild(root, child)
if self.ttype != END:
return None
self.ttype = self.tokenizer.nextToken()
return root
def parseNode(self):
# "%label:" prefix
label = None
if self.ttype == PERCENT:
self.ttype = self.tokenizer.nextToken()
if self.ttype != ID:
return None
label = self.tokenizer.sval
self.ttype = self.tokenizer.nextToken()
if self.ttype != COLON:
return None
self.ttype = self.tokenizer.nextToken() # move to ID following colon
# Wildcard?
if self.ttype == DOT:
self.ttype = self.tokenizer.nextToken()
wildcardPayload = CommonToken(0, ".")
node = WildcardTreePattern(wildcardPayload)
if label is not None:
node.label = label
return node
# "ID" or "ID[arg]"
if self.ttype != ID:
return None
tokenName = self.tokenizer.sval
self.ttype = self.tokenizer.nextToken()
if tokenName == "nil":
return self.adaptor.nil()
text = tokenName
# check for arg
arg = None
if self.ttype == ARG:
arg = self.tokenizer.sval
text = arg
self.ttype = self.tokenizer.nextToken()
# create node
treeNodeType = self.wizard.getTokenType(tokenName)
if treeNodeType == INVALID_TOKEN_TYPE:
return None
node = self.adaptor.createFromType(treeNodeType, text)
if label is not None and isinstance(node, TreePattern):
node.label = label
if arg is not None and isinstance(node, TreePattern):
node.hasTextArg = True
return node
class TreePattern(CommonTree):
"""
When using %label:TOKENNAME in a tree for parse(), we must
track the label.
"""
def __init__(self, payload):
CommonTree.__init__(self, payload)
self.label = None
self.hasTextArg = None
def toString(self):
if self.label is not None:
return '%' + self.label + ':' + CommonTree.toString(self)
else:
return CommonTree.toString(self)
class WildcardTreePattern(TreePattern):
pass
class TreePatternTreeAdaptor(CommonTreeAdaptor):
"""This adaptor creates TreePattern objects for use during scan()"""
def createWithPayload(self, payload):
return TreePattern(payload)
class TreeWizard(object):
"""
Build and navigate trees with this object. Must know about the names
of tokens so you have to pass in a map or array of token names (from which
this class can build the map). I.e., Token DECL means nothing unless the
class can translate it to a token type.
In order to create nodes and navigate, this class needs a TreeAdaptor.
This class can build a token type -> node index for repeated use or for
iterating over the various nodes with a particular type.
This class works in conjunction with the TreeAdaptor rather than moving
all this functionality into the adaptor. An adaptor helps build and
navigate trees using methods. This class helps you do it with string
patterns like "(A B C)". You can create a tree from that pattern or
match subtrees against it.
"""
def __init__(self, adaptor=None, tokenNames=None, typeMap=None):
self.adaptor = adaptor
if typeMap is None:
self.tokenNameToTypeMap = computeTokenTypes(tokenNames)
else:
if tokenNames is not None:
raise ValueError("Can't have both tokenNames and typeMap")
self.tokenNameToTypeMap = typeMap
def getTokenType(self, tokenName):
"""Using the map of token names to token types, return the type."""
try:
return self.tokenNameToTypeMap[tokenName]
except KeyError:
return INVALID_TOKEN_TYPE
def create(self, pattern):
"""
Create a tree or node from the indicated tree pattern that closely
follows ANTLR tree grammar tree element syntax:
(root child1 ... child2).
You can also just pass in a node: ID
Any node can have a text argument: ID[foo]
(notice there are no quotes around foo--it's clear it's a string).
nil is a special name meaning "give me a nil node". Useful for
making lists: (nil A B C) is a list of A B C.
"""
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, self.adaptor)
return parser.pattern()
def index(self, tree):
"""Walk the entire tree and make a node name to nodes mapping.
For now, use recursion but later nonrecursive version may be
more efficient. Returns a dict int -> list where the list is
of your AST node type. The int is the token type of the node.
"""
m = {}
self._index(tree, m)
return m
def _index(self, t, m):
"""Do the work for index"""
if t is None:
return
ttype = self.adaptor.getType(t)
elements = m.get(ttype)
if elements is None:
m[ttype] = elements = []
elements.append(t)
for i in range(self.adaptor.getChildCount(t)):
child = self.adaptor.getChild(t, i)
self._index(child, m)
def find(self, tree, what):
"""Return a list of matching token.
what may either be an integer specifzing the token type to find or
a string with a pattern that must be matched.
"""
if isinstance(what, (int, long)):
return self._findTokenType(tree, what)
elif isinstance(what, basestring):
return self._findPattern(tree, what)
else:
raise TypeError("'what' must be string or integer")
def _findTokenType(self, t, ttype):
"""Return a List of tree nodes with token type ttype"""
nodes = []
def visitor(tree, parent, childIndex, labels):
nodes.append(tree)
self.visit(t, ttype, visitor)
return nodes
def _findPattern(self, t, pattern):
"""Return a List of subtrees matching pattern."""
subtrees = []
# Create a TreePattern from the pattern
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
tpattern = parser.pattern()
# don't allow invalid patterns
if (tpattern is None or tpattern.isNil()
or isinstance(tpattern, WildcardTreePattern)):
return None
rootTokenType = tpattern.getType()
def visitor(tree, parent, childIndex, label):
if self._parse(tree, tpattern, None):
subtrees.append(tree)
self.visit(t, rootTokenType, visitor)
return subtrees
def visit(self, tree, what, visitor):
"""Visit every node in tree matching what, invoking the visitor.
If what is a string, it is parsed as a pattern and only matching
subtrees will be visited.
The implementation uses the root node of the pattern in combination
with visit(t, ttype, visitor) so nil-rooted patterns are not allowed.
Patterns with wildcard roots are also not allowed.
If what is an integer, it is used as a token type and visit will match
all nodes of that type (this is faster than the pattern match).
The labels arg of the visitor action method is never set (it's None)
since using a token type rather than a pattern doesn't let us set a
label.
"""
if isinstance(what, (int, long)):
self._visitType(tree, None, 0, what, visitor)
elif isinstance(what, basestring):
self._visitPattern(tree, what, visitor)
else:
raise TypeError("'what' must be string or integer")
def _visitType(self, t, parent, childIndex, ttype, visitor):
"""Do the recursive work for visit"""
if t is None:
return
if self.adaptor.getType(t) == ttype:
visitor(t, parent, childIndex, None)
for i in range(self.adaptor.getChildCount(t)):
child = self.adaptor.getChild(t, i)
self._visitType(child, t, i, ttype, visitor)
def _visitPattern(self, tree, pattern, visitor):
"""
For all subtrees that match the pattern, execute the visit action.
"""
# Create a TreePattern from the pattern
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
tpattern = parser.pattern()
# don't allow invalid patterns
if (tpattern is None or tpattern.isNil()
or isinstance(tpattern, WildcardTreePattern)):
return
rootTokenType = tpattern.getType()
def rootvisitor(tree, parent, childIndex, labels):
labels = {}
if self._parse(tree, tpattern, labels):
visitor(tree, parent, childIndex, labels)
self.visit(tree, rootTokenType, rootvisitor)
def parse(self, t, pattern, labels=None):
"""
Given a pattern like (ASSIGN %lhs:ID %rhs:.) with optional labels
on the various nodes and '.' (dot) as the node/subtree wildcard,
return true if the pattern matches and fill the labels Map with
the labels pointing at the appropriate nodes. Return false if
the pattern is malformed or the tree does not match.
If a node specifies a text arg in pattern, then that must match
for that node in t.
"""
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
tpattern = parser.pattern()
return self._parse(t, tpattern, labels)
def _parse(self, t1, t2, labels):
"""
Do the work for parse. Check to see if the t2 pattern fits the
structure and token types in t1. Check text if the pattern has
text arguments on nodes. Fill labels map with pointers to nodes
in tree matched against nodes in pattern with labels.
"""
# make sure both are non-null
if t1 is None or t2 is None:
return False
# check roots (wildcard matches anything)
if not isinstance(t2, WildcardTreePattern):
if self.adaptor.getType(t1) != t2.getType():
return False
if t2.hasTextArg and self.adaptor.getText(t1) != t2.getText():
return False
if t2.label is not None and labels is not None:
# map label in pattern to node in t1
labels[t2.label] = t1
# check children
n1 = self.adaptor.getChildCount(t1)
n2 = t2.getChildCount()
if n1 != n2:
return False
for i in range(n1):
child1 = self.adaptor.getChild(t1, i)
child2 = t2.getChild(i)
if not self._parse(child1, child2, labels):
return False
return True
def equals(self, t1, t2, adaptor=None):
"""
Compare t1 and t2; return true if token types/text, structure match
exactly.
The trees are examined in their entirety so that (A B) does not match
(A B C) nor (A (B C)).
"""
if adaptor is None:
adaptor = self.adaptor
return self._equals(t1, t2, adaptor)
def _equals(self, t1, t2, adaptor):
# make sure both are non-null
if t1 is None or t2 is None:
return False
# check roots
if adaptor.getType(t1) != adaptor.getType(t2):
return False
if adaptor.getText(t1) != adaptor.getText(t2):
return False
# check children
n1 = adaptor.getChildCount(t1)
n2 = adaptor.getChildCount(t2)
if n1 != n2:
return False
for i in range(n1):
child1 = adaptor.getChild(t1, i)
child2 = adaptor.getChild(t2, i)
if not self._equals(child1, child2, adaptor):
return False
return True
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/antlr3/antlr3/treewizard.py
|
Python
|
bsd-3-clause
| 18,202
|
[
"VisIt"
] |
288e596f72c11952e10eb78743d2ed7ae0a5008797fade93495f26cd33c02c27
|
""" Create and send a combined request for any pending operations at
the end of a job:
* fileReport (for the transformation)
* jobReport (for jobs)
* accounting
* request (for failover)
"""
from DIRAC import gLogger
from DIRAC.Workflow.Modules.ModuleBase import ModuleBase, GracefulTermination
class FailoverRequest( ModuleBase ):
#############################################################################
def __init__( self ):
"""Module initialization.
"""
self.log = gLogger.getSubLogger( "FailoverRequest" )
super( FailoverRequest, self ).__init__( self.log )
#############################################################################
def _resolveInputVariables( self ):
""" By convention the module input parameters are resolved here.
"""
super( FailoverRequest, self )._resolveInputVariables()
super( FailoverRequest, self )._resolveInputStep()
def _initialize( self ):
""" checks if is to do, then prepare few things
"""
if not self._enableModule():
raise GracefulTermination( "Skipping FailoverRequest module" )
self.request.RequestName = 'job_%d_request.xml' % self.jobID
self.request.JobID = self.jobID
self.request.SourceComponent = "Job_%d" % self.jobID
def _execute( self ):
# report on the status of the input data, by default they are 'Processed', unless the job failed
# failures happening before are not touched
filesInFileReport = self.fileReport.getFiles()
if not self._checkWFAndStepStatus( noPrint = True ):
for lfn in self.inputDataList:
if not lfn in filesInFileReport:
self.log.info( "Forcing status to 'Unused' due to workflow failure for: %s" % ( lfn ) )
# Set the force flag in case the file was in a terminal status
self.fileReport.force = True
self.fileReport.setFileStatus( int( self.production_id ), lfn, 'Unused' )
else:
filesInFileReport = self.fileReport.getFiles()
for lfn in self.inputDataList:
if not lfn in filesInFileReport:
self.log.verbose( "No status populated for input data %s, setting to 'Processed'" % lfn )
self.fileReport.setFileStatus( int( self.production_id ), lfn, 'Processed' )
result = self.fileReport.commit()
if not result['OK']:
self.log.error( "Failed to report file status to TransformationDB, trying again before populating request with file report information" )
result = self.fileReport.generateForwardDISET()
if not result['OK']:
self.log.warn( "Could not generate Operation for file report with result:\n%s" % ( result['Value'] ) )
else:
if result['Value'] is None:
self.log.info( "Files correctly reported to TransformationDB" )
else:
result = self.request.addOperation( result['Value'] )
else:
self.log.info( "Status of files have been properly updated in the TransformationDB" )
# Must ensure that the local job report instance is used to report the final status
# in case of failure and a subsequent failover operation
if self.workflowStatus['OK'] and self.stepStatus['OK']:
self.setApplicationStatus( "Job Finished Successfully" )
self.generateFailoverFile()
def _finalize( self ):
""" Finalize and report correct status for the workflow based on the workflow
or step status.
"""
if not self._checkWFAndStepStatus( True ):
raise RuntimeError( "Workflow failed, FailoverRequest module completed" )
super( FailoverRequest, self )._finalize( "Workflow successful, end of FailoverRequest module execution." )
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
arrabito/DIRAC
|
Workflow/Modules/FailoverRequest.py
|
Python
|
gpl-3.0
| 3,764
|
[
"DIRAC"
] |
fa9dfc1a2cfd63a8f97976364256017086b64649344c376c4ec0ce0d682df97a
|
"""
:mod:`bein` -- LIMS and workflow manager for bioinformatics
===========================================================
.. module:: bein
:platform: Unix
:synopsis: Workflow and provenance manager for bioinformatics
.. moduleauthor:: BBCF <webmaster.bbcf@epfl.ch>
Bein contains a miniature LIMS (Laboratory Information Management
System) and a workflow manager. It was written for the Bioinformatics
and Biostatistics Core Facility of the Ecole Polytechnique Federale de
Lausanne. It is aimed at processes just complicated enough where the
Unix shell becomes problematic, but not so large as to justify all the
machinery of big workflow managers like KNIME or Galaxy.
This module contains all the core logic and functionality of bein.
There are three core classes you need to understand:
execution
The actual class is Execution, but it is generally created with the
execution contextmanager. An execution tracks all the information
about a run of a given set of programs. It corresponds roughly to a
script in shell.
MiniLIMS
MiniLIMS represents a database and a directory of files. The database
stores metainformation about the files and records all executions run
with this MiniLIMS. You can go back and examine the return code, stdout,
stderr, imported files, etc. from any execution.
program
The @program decorator provides a very simple way to bind external
programs into bein for use in executions.
"""
import subprocess
import random
import traceback
import string
import sys
import os
import sqlite3
import time
import shutil
import threading
from contextlib import contextmanager
__version__ = '1.1.0'
################################################################################
class ProgramOutput(object):
"""Object passed to return_value functions when binding programs.
Programs bound with ``@program`` can call a function when they are
finished to create a return value from their output. The output
is passed as a ``ProgramObject``, containing all the information
available to bein about that program.
"""
def __init__(self, return_code, pid, arguments, stdout, stderr):
self.return_code = return_code
self.pid = pid
self.arguments = arguments
self.stdout = stdout
self.stderr = stderr
################################################################################
class ProgramFailed(Exception):
"""Thrown when a program bound by ``@program`` exits with a value other than 0."""
def __init__(self, output):
self.output = output
def __str__(self):
message = "Running '%s' failed with " % " ".join(self.output.arguments)
if self.output.stdout: message += "stdout:\n%s" % "".join(self.output.stdout)
if self.output.stderr: message += "stderr:\n%s" % "".join(self.output.stderr)
return message
################################################################################
def unique_filename_in(path=None):
"""Return a random filename unique in the given path.
The filename returned is twenty alphanumeric characters which are
not already serving as a filename in *path*. If *path* is
omitted, it defaults to the current working directory.
"""
if path == None:
path = os.getcwd()
def random_string():
return "".join([random.choice(string.letters + string.digits)
for x in range(20)])
while True:
filename = random_string()
files = [f for f in os.listdir(path) if f.startswith(filename)]
if files == []:
break
return filename
################################################################################
class Execution(object):
"""``Execution`` objects hold the state of a current running execution.
You should generally use the execution function below to create an
Execution, since it sets up the working directory properly.
Executions are run against a particular MiniLIMS object where it
records all the information onf programs that were run during it,
fetches files from it, and writes files back to it.
The important methods for the user to know are ``add`` and ``use``.
Everything else is used internally by bein. ``add`` puts a file
into the LIMS repository from the execution's working directory.
``use`` fetches a file from the LIMS repository into the working
directory.
"""
def __init__(self, lims, working_directory):
self.lims = lims
self.working_directory = working_directory
self.programs = []
self.files = []
self.used_files = []
self.started_at = int(time.time())
self.finished_at = None
self.id = None
def path_to_file(self, id_or_alias):
"""Fetch the path to *id_or_alias* in the attached LIMS."""
if self.lims == None:
raise ValueError("Cannot use path_to_file; no attached LIMS.")
else:
return self.lims.path_to_file(id_or_alias)
def report(self, program):
"""Add a ProgramOutput object to the execution.
When the Execution finishes, all programs added to the
Execution with 'report', in the order the were added, are
written into the MiniLIMS repository.
"""
self.programs.append(program)
def add(self, filename, description="", associate_to_id=None,
associate_to_filename=None, template=None, alias=None):
"""Add a file to the MiniLIMS object from this execution.
filename is the name of the file in the execution's working
directory to import. description is an optional argument to
assign a string or a dictionary to describe that file in the MiniLIMS
repository.
Note that the file is not actually added to the repository
until the execution finishes.
"""
if isinstance(description,dict): description=str(description)
if filename == None:
if description == "":
raise(IOError("Tried to add None to repository."))
else:
raise(IOError("Tried to add None to repository, with description '" + description +"' ."))
elif not(os.path.exists(filename)):
raise IOError("No such file or directory: '"+filename+"'")
else:
self.files.append((filename,description,associate_to_id,
associate_to_filename,template,alias))
def finish(self):
"""Set the time when the execution finished."""
self.finished_at = int(time.time())
def use(self, file_or_alias):
"""Fetch a file from the MiniLIMS repository.
fileid should be an integer assigned to a file in the MiniLIMS
repository, or a string giving a file alias in the MiniLIMS
repository. The file is copied into the execution's working
directory with a unique filename. 'use' returns the unique
filename it copied the file into.
"""
fileid = self.lims.resolve_alias(file_or_alias)
try:
filename = [x for (x,) in
self.lims.db.execute("select exportfile(?,?)",
(fileid, self.working_directory))][0]
for (f,t) in self.lims.associated_files_of(fileid):
self.lims.db.execute("select exportfile(?,?)",
(f, os.path.join(self.working_directory,t % filename)))
self.used_files.append(fileid)
return filename
except ValueError, v:
raise ValueError("Tried to use a nonexistent file id " + str(fileid))
################################################################################
@contextmanager
def execution(lims = None, description="", remote_working_directory=None):
"""Create an ``Execution`` connected to the given MiniLIMS object.
``execution`` is a ``contextmanager``, so it can be used in a ``with``
statement, as in::
with execution(mylims) as ex:
touch('boris')
It creates a temporary directory where the execution will work,
sets up the ``Execution`` object, then runs the body of the
``with`` statement. After the body finished, or if it fails and
throws an exception, ``execution`` writes the ``Execution`` to the
MiniLIMS repository and deletes the temporary directory after all
is finished.
The ``Execution`` has field ``id`` set to ``None`` during the
``with`` block, but afterwards ``id`` is set to the execution ID
it ran as. For example::
with execution(mylims) as ex:
pass
print ex.id
will print the execution ID the ``with`` block ran as.
On some clusters, such as VITAL-IT in Lausanne, the path to the
current directory is different on worker nodes where batch jobs
run than on the nodes from which jobs are submitted. For
instance, if you are working in /scratch/abc on your local node,
the worker nodes might mount the same directory as
/nfs/boris/scratch/abc. In this case, running programs via LSF
would not work correctly.
If this is the case, you can pass the equivalent directory on
worker nodes as *remote_working_directory*. In the example above,
an execution may create a directory lK4321fdr21 in /scratch/abc.
On the worker node, it would be /nfs/boris/scratch/abc/lK4321fd21,
so you pass /nfs/boris/scratch/abc as *remote_working_directory*.
"""
execution_dir = unique_filename_in(os.getcwd())
os.mkdir(os.path.join(os.getcwd(), execution_dir))
ex = Execution(lims,os.path.join(os.getcwd(), execution_dir))
if remote_working_directory == None:
ex.remote_working_directory = ex.working_directory
else:
ex.remote_working_directory = os.path.join(remote_working_directory,
execution_dir)
os.chdir(os.path.join(os.getcwd(), execution_dir))
exception_string = None
try:
yield ex
except:
(exc_type, exc_value, exc_traceback) = sys.exc_info()
exception_string = ''.join(traceback.format_exception(exc_type, exc_value,
exc_traceback))
raise
finally:
ex.finish()
try:
if lims is not None:
ex.id = lims.write(ex, description, exception_string)
finally:
os.chdir("..")
shutil.rmtree(ex.working_directory, ignore_errors=True)
cleaned_up = True
assert(cleaned_up)
################################################################################
class program(object):
"""Decorator to wrap external programs for use by bein.
Bein depends on external programs to do most of its work. In this
sense, it's a strange version of a shell. The ``@program`` decorator
makes bindings to external programs only a couple lines long.
To wrap a program, write a function that takes whatever arguments
you will need to vary in calling the program (for instance, the
filename for touch or the number of seconds to sleep for sleep).
This function should return a dictionary containing two keys,
``'arguments'`` and ``'return_value'``. ``'arguments'`` should
point to a list of strings which is the actual command and
arguments to be executed (``["touch",filename]`` for touch, for instance).
``'return_value'`` should point to a value to return, or a callable
object which takes a ProgramOutput object and returns the value
that will be passed back to the user when this program is run.
For example, to wrap touch, we write a one argument function that
takes the filename of the file to touch, and apply the ``@program``
decorator to it::
@program
def touch(filename):
return {"arguments": ["touch",filename],
"return_value": filename}
Once we have such a function, how do we call it? We can call it
directly, but ``@program`` inserts an additional argument at the
beginning of the argument list to take the execution the program
is run in. Typically it will be run like::
with execution(lims) as ex:
touch(ex, "myfile")
where ``lims`` is a MiniLIMs object. The ProgramOutput of touch
is automatically recorded to the execution ``ex`` and stored in the
MiniLIMS. The value returned by touch is ``"myfile"``, the name of
the touched file.
Often you want to call a function, but not block when it returns
so you can run several in parallel. ``@program`` also creates a
method ``nonblocking`` which does this. The return value is a
Future object with a single method: ``wait()``. When you call
``wait()``, it blocks until the program finishes, then returns the
same value that you would get from calling the function directly.
So to touch two files, and not block until both commands have
started, you would write::
with execution(lims) as ex:
a = touch.nonblocking(ex, "myfile1")
b = touch.nonblocking(ex, "myfile2")
a.wait()
b.wait()
By default, ``nonblocking`` runs local processes, but you can
control how it runs its processes with the ``via`` keyword
argument. For example, on systems using the LSF batch submission
systems, you can run commands via batch submission by passing the
``via`` argument the value ``"lsf"``::
with execution(lims) as ex:
a = touch.nonblocking(ex, "myfile1", via="lsf")
a.wait()
You can force local execution with ``via="local"``.
Some programs do not accept an output file as an argument and only
write to ``stdout``. Alternately, you might need to capture
``stderr`` to a file. All the methods of ``@program`` accept
keyword arguments ``stdout`` and ``stderr`` to specify files to
write these streams to. If they are omitted, then both streams
are captured and returned in the ``ProgramOutput`` object.
"""
def __init__(self, gen_args):
self.gen_args = gen_args
self.__doc__ = gen_args.__doc__
self.__name__ = gen_args.__name__
def __call__(self, ex, *args, **kwargs):
"""Run a program locally, and block until it completes.
This form takes one argument before those to the decorated
function, an execution the program should be run as part of.
The return_code, pid, stdout, stderr, and command arguments of
the program are recorded to that execution, and thus to the
MiniLIMS object.
"""
if not(isinstance(ex,Execution)):
raise ValueError("First argument to program " + self.gen_args.__name__ + " must be an Execution.")
elif ex.id != None:
raise SyntaxError("Program being called on an execution that has already terminated.")
if kwargs.has_key('stdout'):
stdout = open(kwargs['stdout'],'w')
kwargs.pop('stdout')
else:
stdout = subprocess.PIPE
if kwargs.has_key('stderr'):
stderr = open(kwargs['stderr'],'w')
kwargs.pop('stderr')
else:
stderr = subprocess.PIPE
if 'memory' in kwargs: kwargs.pop('memory')
if 'threads' in kwargs: kwargs.pop('threads')
if 'queue' in kwargs: kwargs.pop('queue')
d = self.gen_args(*args, **kwargs)
try:
sp = subprocess.Popen(d["arguments"], bufsize=-1, stdout=stdout,
stderr=stderr,
cwd = ex.working_directory)
except OSError, ose:
raise ValueError("Program %s does not seem to exist in your $PATH." % d['arguments'][0])
return_code = sp.wait()
if isinstance(stdout,file):
stdout_value = None
else:
stdout_value = sp.stdout.readlines()
if isinstance(stderr,file):
stderr_value = None
else:
stderr_value = sp.stderr.readlines()
po = ProgramOutput(return_code, sp.pid,
d["arguments"],
stdout_value, stderr_value)
ex.report(po)
if return_code == 0:
z = d["return_value"]
if callable(z):
return z(po)
else:
return z
else:
raise ProgramFailed(po)
def nonblocking(self, ex, *args, **kwargs):
"""Run a program, but return a Future object instead of blocking.
Like __call__, nonblocking takes an Execution as an extra,
initial argument before the arguments to the decorated
function. However, instead of blocking, it starts the program
in a separate thread, and returns an object which lets the
user choose when to wait for the program by calling its wait()
method. When wait() is called, the thread blocks, and the
program is recorded in the execution and its value returned as
if the use had called __call__ directory. Thus,
with execution(lims) as ex:
f = touch("boris")
is exactly equivalent to
with execution(lims) as ex:
a = touch.nonblocking("boris")
f = a.wait()
All the methods are named as _method, with the same arguments
as ``nonblocking``. That is, the ``via="local"`` method is
implemented by ``_local``, the ``via="lsf"`` method by
``_lsf``, etc. When writing a new method, name it in the same
way, and add a condition to the ``if`` statement in
``nonblocking``.
If you need to pass a keyword argument ``via`` to your
program, you will need to call one of the hidden methods
(``_local`` or ``_lsf``) directly.
Maximum memory allocation can be specifie via the ``memory`` argument,
given in Gigabytes.
The desired number of threads (multiple cores on a single cluster node)
can be specified via the
``threads`` argument (equivalent to `bsub -n nthreads -R span[hosts=1]`).
"""
if not(isinstance(ex,Execution)):
raise ValueError("First argument to a program must be an Execution.")
elif ex.id != None:
raise SyntaxError("Program being called on an execution that has already terminated.")
if kwargs.has_key('via'):
via = kwargs['via']
kwargs.pop('via')
else:
via = 'local'
if via == 'local':
return self._local(ex, *args, **kwargs)
elif via == 'lsf':
return self._lsf(ex, *args, **kwargs)
def _local(self, ex, *args, **kwargs):
"""Method called by ``nonblocking`` for running locally.
If you need to pass a ``via`` keyword argument to your
function, you will have to call this method directly.
"""
if kwargs.has_key('stdout'):
stdout = open(kwargs['stdout'],'w')
kwargs.pop('stdout')
else:
stdout = subprocess.PIPE
if kwargs.has_key('stderr'):
stderr = open(kwargs['stderr'],'w')
kwargs.pop('stderr')
else:
stderr = subprocess.PIPE
if 'memory' in kwargs: kwargs.pop('memory')
if 'threads' in kwargs: kwargs.pop('threads')
if 'queue' in kwargs: kwargs.pop('queue')
d = self.gen_args(*args, **kwargs)
class Future(object):
def __init__(self):
self.program_output = None
self.return_value = None
def wait(self):
v.wait()
ex.report(self.program_output)
if isinstance(f.return_value, Exception):
raise self.return_value
else:
return self.return_value
f = Future()
v = threading.Event()
def g():
try:
try:
sp = subprocess.Popen(d["arguments"], bufsize=-1,
stdout=stdout,
stderr=stderr,
cwd = ex.working_directory)
except OSError, ose:
raise ValueError("Program %s does not seem to exist in your $PATH." % d['arguments'][0])
return_code = sp.wait()
if isinstance(stdout,file):
stdout_value = None
else:
stdout_value = sp.stdout.readlines()
if isinstance(stderr,file):
stderr_value = None
else:
stderr_value = sp.stderr.readlines()
f.program_output = ProgramOutput(return_code, sp.pid,
d["arguments"],
stdout_value,
stderr_value)
if return_code == 0:
z = d["return_value"]
if callable(z):
f.return_value = z(f.program_output)
else:
f.return_value = z
v.set()
except Exception, e:
f.return_value = e
v.set()
a = threading.Thread(target=g)
a.start()
return f
def _lsf(self, ex, *args, **kwargs):
"""Method called by ``nonblocking`` to run via LSF."""
if not(isinstance(ex,Execution)):
raise ValueError("First argument to a program must be an Execution.")
# Check if `bsub` exists in $PATH.
bsub_found = None
path = [os.path.join(s,'bsub') for s in os.environ["PATH"].split(os.pathsep)]
for p in path:
if (os.path.isfile(p) and os.access(p, os.X_OK)):
bsub_found = True; break
if not bsub_found:
raise ValueError("bsub: command not found in PATH. Try via='local'.")
if kwargs.has_key('stdout'):
stdout = kwargs['stdout']
kwargs.pop('stdout')
load_stdout = False
else:
stdout = unique_filename_in(ex.working_directory)
load_stdout = True
if kwargs.has_key('stderr'):
stderr = kwargs['stderr']
kwargs.pop('stderr')
load_stderr = False
else:
stderr = unique_filename_in(ex.working_directory)
load_stderr = True
threads = []
if 'threads' in kwargs:
threads = ['-n',str(kwargs['threads']),'-R','span[hosts=1]']
kwargs.pop('threads')
mem_opts = []
if 'memory' in kwargs:
gigabytes = int(kwargs['memory'])
kwargs.pop('memory')
mem_opts = ["-M",str(gigabytes*1000000),
"-R","rusage[mem=%i]" %(gigabytes*1000)]
queue = ["-q",kwargs.pop("queue","normal")]
d = self.gen_args(*args, **kwargs)
# Jacques Rougemont figured out the following syntax that works in both bash and tcsh.
remote_cmd = " ".join(d["arguments"])
remote_cmd += " > "+stdout
remote_cmd = " ( "+remote_cmd+" ) >& "+stderr
cmds = ["bsub","-cwd",ex.remote_working_directory,
"-o","/dev/null","-e","/dev/null"]
cmds += queue+mem_opts+threads+["-K","-r",remote_cmd]
class Future(object):
def __init__(self):
self.program_output = None
self.return_value = None
def wait(self):
v.wait()
ex.report(self.program_output)
if isinstance(f.return_value, Exception):
raise self.return_value
else:
return self.return_value
f = Future()
v = threading.Event()
def g():
try:
nullout = open(os.path.devnull, 'w')
sp = subprocess.Popen(cmds, bufsize=-1, stdout=nullout,
stderr=nullout)
return_code = sp.wait()
while not(os.path.exists(os.path.join(ex.working_directory,
stdout))):
time.sleep(10) # We need to wait until the files actually show up
if load_stdout:
with open(os.path.join(ex.working_directory,stdout), 'r') as fo:
stdout_value = fo.readlines()
else:
stdout_value = None
while not(os.path.exists(os.path.join(ex.working_directory,stderr))):
time.sleep(10) # We need to wait until the files actually show up
if load_stderr:
with open(os.path.join(ex.working_directory,stderr), 'r') as fe:
stderr_value = fe.readlines()
else:
stderr_value = None
f.program_output = ProgramOutput(return_code, sp.pid,
cmds, stdout_value, stderr_value)
if return_code == 0:
z = d["return_value"]
if callable(z):
f.return_value = z(f.program_output)
else:
f.return_value = z
v.set()
except Exception, e:
f.return_value = e
v.set()
raise
a = threading.Thread(target=g)
a.start()
return(f)
################################################################################
class MiniLIMS(object):
"""Encapsulates a database and directory to track executions and files.
A MiniLIMS repository consists of a SQLite database and a
directory of the same name with ``.files`` appended where all files
kept in the repository are stored. For example, if the SQLite
database is ``/home/boris/myminilims``, then there is a directory
``/home/boris/myminilims.files`` with all the corresponding files.
You should never edit the repository by hand!.
If you create a MiniLIMS object pointing to a nonexistent
database, then it creates the database and the file directory.
Basic file operations:
* :meth:`import_file`
* :meth:`export_file`
* :meth:`path_to_file`
* :meth:`copy_file`
Fetching files and executions:
* :meth:`fetch_file`
* :meth:`fetch_execution`
Deleting files and executions:
* :meth:`delete_file`
* :meth:`delete_execution`
Searching files and executions:
* :meth:`search_files`
* :meth:`search_executions`
* :meth:`browse_files`
* :meth:`browse_executions`
File aliases:
* :meth:`resolve_alias`
* :meth:`add_alias`
* :meth:`delete_alias`
File associations:
* :meth:`associate_file`
* :meth:`delete_file_association`
* :meth:`associated_files_of`
"""
def __init__(self, path):
self.db_path = path
self.db = sqlite3.connect(path, check_same_thread=False,timeout=6000)
self.file_path = os.path.abspath(path +".files")
if not(os.path.exists(self.file_path)):
self.initialize_database(self.db)
os.mkdir(self.file_path)
self.db.create_function("importfile",1,self._copy_file_to_repository)
self.db.create_function("deletefile",1,self._delete_repository_file)
self.db.create_function("exportfile",2,self._export_file_from_repository)
def remove(self):
"""Removes the MiniLIMS entierly."""
# The files #
shutil.rmtree(self.file_path)
# The SQLite file #
self.db.close()
os.remove(self.db_path)
def __repr__(self):
return '<%s object> from %s' % (self.__class__.__name__, self.file_path)
def initialize_database(self, db):
"""Sets up a new MiniLIMS database.
"""
self.db.execute("""
CREATE TABLE execution (
id integer primary key,
started_at integer not null,
finished_at integer default null,
working_directory text not null,
description text not null default '',
exception text default null
)""")
self.db.execute("""
CREATE TABLE program (
pos integer,
execution integer references execution(id),
pid integer not null,
stdin text default null,
return_code integer not null,
stdout text default null,
stderr text default null,
primary key (pos,execution)
)""")
self.db.execute("""
CREATE TABLE argument (
pos integer,
program integer references program(pos),
execution integer references program(execution),
argument text not null,
primary key (pos,program,execution)
)""")
self.db.execute("""
CREATE TABLE file (
id integer primary key autoincrement,
external_name text,
repository_name text,
created timestamp default current_timestamp,
description text not null default '',
origin text not null default 'execution',
origin_value integer default null
)""")
self.db.execute("""
CREATE TABLE execution_use (
execution integer references execution(id),
file integer references file(id)
)""")
self.db.execute("""
CREATE TABLE file_alias (
alias text primary key,
file integer references file(id)
)""")
self.db.execute("""
CREATE TABLE file_association (
id integer primary key,
fileid integer references file(id) not null,
associated_to integer references file(id) not null,
template text not null
)""")
self.db.execute("""
CREATE TRIGGER prevent_repository_name_change BEFORE UPDATE ON file
FOR EACH ROW WHEN (OLD.repository_name != NEW.repository_name) BEGIN
SELECT RAISE(FAIL, 'Cannot change the repository name of a file.');
END""")
self.db.execute("""
CREATE VIEW file_direct_immutability AS
SELECT file.id as id, count(execution) > 0 as immutable
from file left join execution_use
on file.id = execution_use.file
group by file.id
""")
self.db.execute("""
create view all_associations as
select file.id as id, file_association.associated_to as target
from file inner join file_association
on file.id = file_association.fileid
union all
select file.id as id, file.id as target
from file
order by id asc
""")
self.db.execute("""
create view file_immutability as
select aa.id as id, max(fdi.immutable) as immutable
from all_associations as aa left join file_direct_immutability as fdi
on aa.target = fdi.id
group by aa.id
""")
self.db.execute("""
CREATE VIEW execution_outputs AS
select execution.id as execution, file.id as file
from execution left join file
on execution.id = file.origin_value
and file.origin='execution'
""")
self.db.execute("""
CREATE VIEW execution_immutability AS
SELECT eo.execution as id, ifnull(max(fi.immutable),0) as immutable from
execution_outputs as eo left join file_immutability as fi
on eo.file = fi.id
group by id
""")
self.db.execute("""
CREATE TRIGGER prevent_file_delete BEFORE DELETE ON file
FOR EACH ROW WHEN
(SELECT immutable FROM file_immutability WHERE id = OLD.id) = 1
BEGIN
SELECT RAISE(FAIL, 'File is immutable; cannot delete it.');
END
""")
self.db.execute("""
CREATE TRIGGER prevent_argument_delete BEFORE DELETE ON argument
FOR EACH ROW WHEN
(SELECT immutable FROM execution_immutability WHERE id = OLD.execution) = 1
BEGIN
SELECT RAISE(FAIL, 'Execution is immutable; cannot delete argument.');
END
""")
self.db.execute("""
CREATE TRIGGER prevent_argument_update BEFORE UPDATE ON argument
FOR EACH ROW WHEN
(SELECT immutable FROM execution_immutability WHERE id = OLD.execution) = 1
BEGIN
SELECT RAISE(FAIL, 'Execution is immutable; cannot update command arguments.');
END
""")
self.db.execute("""
CREATE TRIGGER prevent_command_delete BEFORE DELETE ON program
FOR EACH ROW WHEN
(SELECT immutable FROM execution_immutability WHERE id = OLD.execution) = 1
BEGIN
SELECT RAISE(FAIL, 'Execution is immutable; cannot delete command.');
END
""")
self.db.execute("""
CREATE TRIGGER prevent_command_update BEFORE UPDATE ON program
FOR EACH ROW WHEN
(SELECT immutable FROM execution_immutability WHERE id = OLD.execution) = 1
BEGIN
SELECT RAISE(FAIL, 'Execution is immutable; cannot update commands.');
END
""")
self.db.execute("""
CREATE TRIGGER prevent_execution_delete BEFORE DELETE ON execution
FOR EACH ROW WHEN
(SELECT immutable FROM execution_immutability WHERE id = OLD.id) = 1
BEGIN
SELECT RAISE(FAIL, 'Execution is immutable; cannot delete.');
END
""")
self.db.execute("""
CREATE TRIGGER prevent_execution_update BEFORE UPDATE ON execution
FOR EACH ROW WHEN
(SELECT immutable FROM execution_immutability WHERE id = OLD.id) = 1 AND
(OLD.id != NEW.id OR OLD.started_at != NEW.started_at OR OLD.finished_at != NEW.finished_at
OR OLD.temp_dir != NEW.temp_dir)
BEGIN
SELECT RAISE(FAIL, 'Execution is immutable; cannot update anything but description.');
END
""")
self.db.execute("""
CREATE TRIGGER prevent_immutable_file_update BEFORE UPDATE on file
FOR EACH ROW WHEN
(SELECT immutable FROM file_immutability WHERE id = old.id) = 1 AND
(OLD.id != NEW.id OR OLD.external_name != NEW.external_name OR
OLD.repository_name != NEW.repository_name OR
OLD.created != NEW.created OR OLD.origin != NEW.origin OR
OLD.origin_value != NEW.origin_value)
BEGIN
SELECT RAISE(FAIL, 'File is immutable; cannot update except description.');
END
""")
self.db.commit()
def _copy_file_to_repository(self,src):
"""Copy a file src into the MiniLIMS repository.
src can be a fairly arbitrary path, either from the CWD, or
using .. and other such shortcuts. This function should only
be called from SQLite3, not Python.
"""
filename = unique_filename_in(self.file_path)
shutil.copyfile(src,os.path.abspath(os.path.join(self.file_path,filename)))
return filename
def _delete_repository_file(self,filename):
"""Delete a file from the MiniLIMS repository.
This function should only be called from SQLite3, not from Python.
"""
os.remove(os.path.join(self.file_path,filename))
return None
def _export_file_from_repository(self,fileid,dst):
"""Write a file with id fileid to the directory dst.
This function should only be called from SQLite3, not Python.
"""
if os.path.isdir(dst):
filename = unique_filename_in(dst)
else:
filename = ""
try:
[repository_filename] = [x for (x,) in self.db.execute("select repository_name from file where id=?",
(fileid,))]
shutil.copyfile(os.path.abspath(os.path.join(self.file_path,repository_filename)),
os.path.abspath(os.path.join(dst, filename)))
return filename
except ValueError, v:
return None
def write(self, ex, description = "", exception_string=None):
"""Write an execution to the MiniLIMS.
The overall Execution object is recorded in the execution
table. Each program in it is entered as a row in the program
table, including their stdout, stderr, etc. Each argument to
the program gets a row in the argument table. All files which
were used in the execution from the MiniLIMS repository are
added to the execution_use table. Any files which were added
to the repository are copied to the repository and entered in
the file table.
"""
sqlite3.OptimizedUnicode #self.db.text_factory = 'unicode'
description = str(description)
self.db.execute("""insert into execution
(started_at, finished_at, working_directory,
description, exception)
values (?,?,?,?,?)""",
(ex.started_at, ex.finished_at, ex.working_directory,
description, exception_string))
exid = self.db.execute("select last_insert_rowid()").fetchone()[0]
# Write all the programs
# If the program is not found, the following will return an AttributeError.
# We avoid this case by replacing the program failed by a fake program instance.
class failed_program(object):
def __init__(self, pid):
self.stdout = None
self.stderr = exception_string
self.pid = pid
self.return_code = exception_string
self.arguments = []
for i,p in enumerate(ex.programs):
if p is None:
p = failed_program(i)
if p.stdout == None:
stdout_value = ""
else:
stdout_value = "".join(p.stdout)
if p.stderr == None:
stderr_value = ""
else:
stderr_value = "".join(p.stderr)
self.db.execute("""insert into program(pos,execution,pid,
return_code,stdout,stderr)
values (?,?,?,?,?,?)""",
(i, exid, p.pid, p.return_code,
stdout_value.decode('utf-8'), stderr_value.decode('utf-8')))
for j,a in enumerate(p.arguments):
self.db.execute("""insert into argument(pos,program,execution,
argument) values (?,?,?,?)""",
(j,i,exid,a))
# Write the files
# This section is rather complicated due to the necessity of
# handling hierarchies of associations correctly. The
# algorithm is roughly as follows:
# remaining = files which have yet to be inserted
# removed = those files already processed
# while True:
# these = all files to be processed this round,
# defined as those whose associate_to_file field
# (field 3 of the tuple) is in removed.
# for file in these:
# insert the file
# add any alias
# associate the file, renaming it so association namings are
# preserved even in the repository
fileids = {}
removed = [(None,)]
remaining = ex.files
while remaining != []:
these = [k for k in ex.files if k[3] in [x[0] for x in removed]]
for (filename,description,associate_to_id,associate_to_filename,
template,alias) in these:
fileids[filename] = self._insert_file(ex, exid, filename, description)
if alias != None:
self.add_alias(fileids[filename], alias)
if associate_to_id != None or associate_to_filename != None:
if template == None:
raise ValueError("Must provide a template for an association.")
elif template == "%s":
raise ValueError("Template must be more than just %s")
elif template.index("%s") == -1:
raise ValueError("Template must contain %s")
elif associate_to_id != None:
self._associate_file(fileids[filename], associate_to_id, template)
else:
self._associate_file(fileids[filename], fileids[associate_to_filename], template)
[remaining.remove(t) for t in these]
removed.extend(these)
for used_file in set(ex.used_files):
self.db.execute("""insert into execution_use(execution,file)
values (?,?)""", (exid,used_file))
self.db.commit()
return exid
def _insert_file(self, ex, exid, filename, description):
self.db.execute("""insert into file(external_name,repository_name,
description,origin,origin_value)
values (?,importfile(?),?,?,?)""",
(filename, os.path.abspath(os.path.join(ex.working_directory,filename)),
description, 'execution', exid))
return self.db.execute("""select last_insert_rowid()""").fetchone()[0]
def _rename_in_repository(self, fileid, new_repository_name):
old_target_name = self.db.execute("""select repository_name from file
where id=?""", (fileid,)).fetchone()[0]
self.db.execute("""drop trigger if exists prevent_repository_name_change""")
self.db.execute("""update file set repository_name=? where id=?""",
(new_repository_name, fileid))
self.db.execute("""CREATE TRIGGER prevent_repository_name_change
BEFORE UPDATE ON file
FOR EACH ROW WHEN (OLD.repository_name != NEW.repository_name) BEGIN
SELECT RAISE(FAIL, 'Cannot change the repository name of a file.');
END""")
shutil.move(os.path.join(self.file_path, old_target_name),
os.path.join(self.file_path, new_repository_name))
def _associate_file(self, thisid, targetid, template):
# Make the filename in the repository match this association
raw_name = self.db.execute("""select repository_name from file where id=?""",
(targetid,)).fetchone()[0]
new_target_name = template % raw_name
self._rename_in_repository(thisid, new_target_name)
self.associate_file(thisid, targetid, template)
def search_files(self, with_text=None, with_description=None, older_than=None, newer_than=None, source=None):
"""Find files matching given criteria in the LIMS.
Finds files which satisfy all the criteria which are not None.
The criteria are:
* *with_text*: The file's external_name or description
contains *with_text*
* *with_description*: The file's description contains
*with_description*
* *older_than*: The file's created time is earlier than
*older_than*. This should be of the form "YYYY-MM-DD
HH:MM:SS". Final fields can be omitted, so "YYYY" and
"YYYY-MM-DD HH:MM" are also valid date formats.
* *newer_than*: The file's created time is later than
*newer_then*, using the same format as *older_than*.
* *source*: Where the file came from. Can be one of
``'execution'``, ``'copy'``, ``'import'``,
``('execution',exid)``, or ``('copy',srcid)``, where
``exid`` is the numeric ID of the execution that created
this file, and ``srcid`` is the file ID of the file which
was copied to create this one.
"""
desc_request = "(id is not null)";
if isinstance(with_description,dict):
sql = """select description,id from file where length(description)>1 """
descriptions = self.db.execute(sql).fetchall()
from_db=[]; descriptions_to_keep=[]; desc_request="("
for d in descriptions:
if d[0][0] == '{':
try: from_db.append( (eval(d[0]),d[1]) )
except SyntaxError: pass
for d in from_db:
if all([d[0].get(k)==with_description[k] for k in with_description.keys()]):
descriptions_to_keep.append(d[1])
for d in descriptions_to_keep:
desc_request += "id = "+str(d)+" or "
desc_request += "id is null)"
with_description=None
if not(isinstance(source, tuple)):
source = (source,None)
source = source != None and source or (None,None)
with_text = with_text != None and '%' + with_text + '%' or None
sql = """select id from file where""" + desc_request + """
and ((external_name like ? or ? is null) or (description like ? or ? is null))
and (description like ? or ? is null)
and (created >= ? or ? is null)
and (created <= ? or ? is null)
and (origin = ? or ? is null)
and (origin_value = ? or ? is null)"""
matching_files = self.db.execute(sql,
(with_text, with_text, with_text, with_text,
with_description, with_description,
newer_than, newer_than,
older_than, older_than,
source[0], source[0],
source[1], source[1]))
return [x for (x,) in matching_files]
def search_executions(self, with_text=None, with_description=None, started_before=None,
started_after=None, ended_before=None, ended_after=None, fails=None):
"""Find executions matching the given criteria.
Returns a list of execution ids of executions which satisfy
all the criteria which are not None. The criteria are:
* *with_text*: The execution's description or one of the
program arguments in the execution contains *with_text*.
* *with_description*: The execution's description contains
*with_description*.
* *started_before*: The execution started running before
*start_before*. This should be of the form "YYYY-MM-DD
HH:MM:SS". Final fields can be omitted, so "YYYY" and
"YYYY-MM-DD HH:MM" are also valid date formats.
* *started_after*: The execution started running after
*started_after*. The format is identical to
*started_before*.
* *ended_before*: The execution finished running before
*ended_before*. The format is the same as for
*started_before*.
* *ended_after*: The execution finished running after
*ended_after*. The format is the same as for
*started_before*.
* *fails*: If 'False', returns only executions that didn't
encounter any error, i.e. execution.exception not null.
If 'True', returns only executions with errors.
Warning: any try/except block inside an execution may
cause execution.exception not to be null without making
fail the script itself.
"""
desc_request = "(id is not null)"
if isinstance(with_description,dict):
sql = """select description,id from execution where length(description)>1"""
descriptions = self.db.execute(sql).fetchall()
from_db=[]; descriptions_to_keep=[]; desc_request="("
for d in descriptions:
if d[0][0] == '{':
try: from_db.append( (eval(d[0]),d[1]) )
except SyntaxError: pass
for d in from_db:
if all([d[0].get(k)==with_description[k] for k in with_description.keys()]):
descriptions_to_keep.append(d[1])
for d in descriptions_to_keep:
desc_request += "id = "+str(d)+" or "
desc_request += "id is null)"
with_description=None
if fails == False:
desc_request += " and (exception is null) "
elif fails == True:
desc_request += " and (exception is not null) "
with_text = with_text != None and '%'+with_text+'%' or None
sql = """select id from execution where""" + desc_request + """
and (started_at <= ? or ? is null)
and (started_at >= ? or ? is null)
and (finished_at <= ? or ? is null)
and (finished_at >= ? or ? is null)
and (description like ? or ? is null)
and ((working_directory like ? or ? is null) or (description like ? or ? is null))
"""
matching_executions = [x for (x,) in self.db.execute(sql,
(started_before, started_before,
started_after, started_after,
ended_before, ended_before,
ended_after, ended_after,
with_description, with_description,
with_text, with_text,
with_text, with_text))]
if with_text != None:
sql = """select distinct execution from argument where argument like ?"""
matching_programs = [x for (x,) in self.db.execute(sql, (with_text,))]
else:
matching_programs = []
return sorted(list(set(matching_executions+matching_programs)))[::-1]
def browse_files(self, with_text=None, with_description=None, older_than=None, newer_than=None, source=None):
"""
Prints and returns a set of tuples (ID, description, created at),
one for each file corresponding to the request.
See documentation for search_files().
"""
if not(isinstance(source, tuple)):
source = (source,None)
source = source != None and source or (None,None)
with_text = with_text != None and '%'+with_text+'%' or None
sql = """select id,description,created from file where
((external_name like ? or ? is null) or (description like ? or ? is null))
and (description like ? or ? is null)
and (created >= ? or ? is null)
and (created <= ? or ? is null)
and (origin = ? or ? is null)
and (origin_value = ? or ? is null)
"""
matching_files = self.db.execute(sql,
(with_text, with_text, with_text, with_text,
with_description, with_description,
newer_than, newer_than,
older_than, older_than,
source[0], source[0],
source[1], source[1]))
out = "ID \t Description \t Created at \n"
for m in matching_files:
out += str(m[0])+"\t"+ m[1]+"\t"+m[2]+"\n"
print out
return matching_files
def browse_executions(self, with_text=None, with_description=None, started_before=None,
started_after=None, ended_before=None, ended_after=None):
"""
Prints and returns a set of tuples (ID, description, started at, finished at),
one for each execution corresponding to the request.
See documentation for search_executions().
"""
with_text = with_text != None and '%'+with_text+'%' or None
sql = """select id,description,started_at,finished_at from execution where
(started_at <= ? or ? is null) and
(started_at >= ? or ? is null) and
(finished_at <= ? or ? is null) and
(finished_at >= ? or ? is null) and
(description like ? or ? is null) and
((working_directory like ? or ? is null) or (description like ? or ? is null))
"""
matching_executions = [x for x in self.db.execute(sql,
(started_before, started_before,
started_after, started_after,
ended_before, ended_before,
ended_after, ended_after,
with_description, with_description,
with_text, with_text, with_text, with_text))]
if with_text != None:
sql = """select distinct execution from argument where argument like ?"""
matching_programs = [x for (x,) in self.db.execute(sql, (with_text,))]
else:
matching_programs = []
matching_executions = matching_executions + matching_programs
out = "ID \t Description \t Started at \t Finished at \n"
for m in matching_executions:
out += str(m[0])+"\t"+ m[1]+"\t"+ time.ctime(m[2])+"\t"+ time.ctime(m[3])+"\n"
print out
return matching_executions
def last_id(self):
"""Return the id of the last thing written to the repository."""
return self.db.execute("select last_insert_rowid()").fetchone()[0]
def fetch_file(self, id_or_alias):
"""Returns a dictionary describing the given file."""
fileid = self.resolve_alias(id_or_alias)
fields = self.db.execute("""select external_name, repository_name,
created, description, origin, origin_value
from file where id=?""",
(fileid,)).fetchone()
if fields == None:
raise ValueError("No such file " + str(id_or_alias) + " in MiniLIMS.")
else:
[external_name, repository_name, created, description,
origin_type, origin_value] = fields
if origin_type == 'copy':
origin = ('copy',origin_value)
elif origin_type == 'execution':
origin = ('execution',origin_value)
elif origin_type == 'import':
origin = 'import'
aliases = [a for (a,) in
self.db.execute("select alias from file_alias where file=?",
(fileid,))]
associations = self.db.execute("""select fileid,template from file_association where
associated_to=?""", (fileid,)).fetchall()
associated_to = self.db.execute("""select associated_to,template from file_association
where fileid=?""", (fileid,)).fetchall()
immutable = self.db.execute("select immutable from file_immutability where id=?",
(fileid,)).fetchone()[0]
return {'external_name': external_name,
'repository_name': repository_name,
'created': created,
'description': description,
'origin': origin,
'aliases': aliases,
'associations': associations,
'associated_to': associated_to,
'immutable': immutable == 1}
def fetch_execution(self, exid):
"""Returns a dictionary of all the data corresponding to the given execution id."""
def fetch_program(exid, progid):
fields = self.db.execute("""select pid,return_code,stdout,stderr
from program where execution=? and pos=?""",
(exid, progid)).fetchone()
if fields == None:
raise ValueError("No such program: execution %d, program %d" % (exid, progid))
else:
[pid, return_code, stdout, stderr] = fields
arguments = [a for (a,) in self.db.execute("""select argument from argument
where execution=? and program=?
order by pos asc""", (exid,progid))]
return {'pid': pid,
'return_code': return_code,
'stdout': stdout,
'stderr': stderr,
'arguments': arguments}
exfields = self.db.execute("""select started_at, finished_at, working_directory,
description, exception from execution
where id=?""", (exid,)).fetchone()
if exfields == None:
raise ValueError("No such execution with id %d" % (exid,))
else:
(started_at,finished_at,working_directory,
description, exception) = exfields
progids = [a for (a,) in self.db.execute("""select pos from program where execution=?
order by pos asc""", (exid,))]
progs = [fetch_program(exid,i) for i in progids]
added_files = [a for (a,) in self.db.execute("""select id from file where
origin='execution' and origin_value=?""",
(exid,))]
used_files = [a for (a,) in self.db.execute("""select file from execution_use
where execution=?""", (exid,))]
immutability = self.db.execute("""select immutable from execution_immutability
where id=?""", (exid,)).fetchone()[0]
return {'started_at': started_at,
'finished_at': finished_at,
'working_directory': working_directory,
'description': description,
'exception_string': exception,
'programs': progs,
'added_files': added_files,
'used_files': used_files,
'immutable': immutability == 1}
def copy_file(self, file_or_alias):
"""Copy the given file in the MiniLIMS repository.
A copy of the file corresponding to the given fileid is made
in the MiniLIMS repository, and the file id of the copy is
returned. This is most useful to create a mutable copy of an
immutable file.
"""
fileid = self.resolve_alias(file_or_alias)
try:
sql = """select external_name,repository_name,description
from file where id = ?"""
[(external_name,
repository_name,
description)] = [x for x in self.db.execute(sql, (fileid, ))]
new_repository_name = unique_filename_in(self.file_path)
sql = """insert into file(external_name,repository_name,
origin,origin_value) values (?,?,?,?)"""
[x for x in self.db.execute(sql, (external_name,
new_repository_name,
'copy', fileid))]
[new_id] = [x for (x,) in
self.db.execute("select last_insert_rowid()")]
shutil.copyfile(os.path.join(self.file_path, repository_name),
os.path.join(self.file_path, new_repository_name))
self.db.commit()
return new_id
except ValueError, v:
raise ValueError("No such file id " + str(fileid))
def delete_file(self, file_or_alias):
"""Delete a file from the repository."""
fileid = self.resolve_alias(file_or_alias)
try:
try:
for (f,t) in self.associated_files_of(fileid):
self.delete_file(f)
except ValueError, v:
pass
sql = "select repository_name from file where id = ?"
[repository_name] = [x for (x,) in self.db.execute(sql, (fileid,))]
sql = "delete from file where id = ?"
[x for (x,) in self.db.execute(sql, (fileid, ))]
os.remove(os.path.join(self.file_path, repository_name))
sql = "delete from file_alias where file=?"
self.db.execute(sql, (fileid,)).fetchone()
self.db.commit()
except ValueError:
raise ValueError("No such file id " + str(fileid))
def delete_execution(self, execution_id):
"""Delete an execution from the MiniLIMS repository."""
try:
files = self.search_files(source=('execution',execution_id))
for i in files:
try:
self.delete_file(i)
except ValueError, v:
pass
self.db.execute("delete from argument where execution = ?",
(execution_id,))
self.db.execute("delete from program where execution = ?",
(execution_id,))
self.db.execute("delete from execution where id = ?",
(execution_id,))
self.db.execute("delete from execution_use where execution=?",
(execution_id,))
self.db.commit()
except ValueError, v:
raise ValueError("No such execution id " + str(execution_id) + ": " + v.message)
def import_file(self, src, description=""):
"""Add an external file *src* to the MiniLIMS repository.
*src* should be the path to the file to be added.
*description* is an optional string or dictionary that will be attached to
the file in the repository. ``import_file`` returns the file id
in the repository of the newly imported file.
"""
if isinstance(description,dict):
description = str(description)
self.db.execute("""insert into file(external_name,repository_name,
description,origin,origin_value)
values (?,importfile(?),?,?,?)""",
(os.path.basename(src),os.path.abspath(src),
description,'import',None))
self.db.commit()
return [x for (x,) in
self.db.execute("""select last_insert_rowid()""")][0]
def export_file(self, file_or_alias, dst, with_associated=False):
"""Write *file_or_alias* from the MiniLIMS repository to *dst*.
*dst* can be either a directory, in which case the file will
have its repository name in the new directory, or can specify
a filename, in which case the file will be copied to that
filename.
Associated files will also be copied if *with_associated=True*.
"""
src = self.path_to_file(file_or_alias)
shutil.copy(src, dst)
if with_associated:
if os.path.isdir(dst):
dst = os.path.join(dst, self.fetch_file(file_or_alias)['repository_name'])
for association in self.fetch_file(file_or_alias)['associations']: #association = (id,template)
fileid = association[0]
template = association[1][2:] #removes %s
dst = dst + template
shutil.copy(src,dst)
def path_to_file(self, file_or_alias):
"""Return the full path to a file in the repository.
It is often useful to be able to read a file in the repository
without actually copying it. If you are not planning to write
to it, this presents no problem.
"""
fileid = self.resolve_alias(file_or_alias)
filename = [x for (x,) in
self.db.execute("""select repository_name
from file where id = ?""",
(fileid, ))][0]
return(os.path.join(self.file_path,filename))
def resolve_alias(self, alias):
"""Resolve an alias to an integer file id.
If an integer is passed to ``resolve_alias``, it is returned as is,
so this method can be used without worry any time any alias
might have to be resolved.
"""
if isinstance(alias, int):
x = self.db.execute("select id from file where id=?", (alias,)).fetchall()
if x != [] and x != None:
return alias
else:
raise ValueError("No such file with id %d" % alias)
elif isinstance(alias, str):
x = self.db.execute("select file from file_alias where alias=?", (alias,)).fetchone()
if x == None:
raise ValueError("No such file alias: " + alias)
else:
return x[0]
def add_alias(self, fileid, alias):
"""Make the string *alias* an alias for *fileid* in the repository.
An alias can be used in place of an integer file ID in
all methods that take a file ID.
"""
self.db.execute("""insert into file_alias(alias,file) values (?,?)""",
(alias, self.resolve_alias(fileid)))
self.db.commit()
def delete_alias(self, alias):
"""Delete the alias *alias* from the repository.
The file itself is untouched. This only affects the alias.
"""
self.db.execute("""delete from file_alias where alias = ?""", (alias,))
self.db.commit()
def associated_files_of(self, file_or_alias):
"""Find all files associated to *file_or_alias*.
Return a list of ``(fileid, template)`` of all files associated
to *file_or_alias*.
"""
f = self.resolve_alias(file_or_alias)
return self.db.execute("""select fileid,template from file_association where associated_to = ?""", (f,)).fetchall()
def associate_file(self, file_or_alias, associate_to, template):
"""Add a file association from *file_or_alias* to *associate_to*.
When the file *associate_to* is used in an execution,
*file_or_alias* is also used, and named according to *template*.
*template* should be a string containing ``%s``, which will be
replaced with the name *associate_to* is copied to. So if
*associate_to* is copied to *X* in the working directory, and
the template is ``"%s.idx"``, then `file_or_alias` is copied
to *X* ``.idx``.
"""
src = self.resolve_alias(file_or_alias)
dst = self.resolve_alias(associate_to)
if template.find("%s") == -1:
raise ValueError("Template of a file association must contain exactly one %s.")
else:
self.db.execute("""insert into file_association(fileid,associated_to,template) values (?,?,?)""", (src, dst, template))
self.db.commit()
def delete_file_association(self, file_or_alias, associated_to):
"""Remove the file association from *file_or_alias* to *associated_to*.
Both fields can be either an integer or an alias string.
"""
src = self.resolve_alias(file_or_alias)
dst = self.resolve_alias(associated_to)
self.db.execute("""delete from file_association where fileid=? and associated_to=?""", (src,dst))
self.db.commit()
################################################################################
def task(f):
"""Wrap the function *f* in an execution.
The @task decorator wraps a function in an execution and handles
producing a sensible return value. The function must expect its
first argument to be an execution. The function produced by @task
instead expects a MiniLIMS (or ``None``) in its place.
You can also pass a ``description`` keyword argument, which will
be used to set the description of the execution. For example,::
@task
def f(ex, filename):
touch(ex, filename)
ex.add(filename, "New file")
return {'created': filename}
will be wrapped into a function that is called as::
f(M, "boris", description="An execution")
where ``M`` is a MiniLIMS. It could also be called with ``None``
in place of ``M``::
f(None, "boris")
which is the same as creating an execution without attaching it to
a MiniLIMS. (In this case it will fail, since ``f`` tries to add a
file to the MiniLIMS.)
The return value is a dictionary with three keys:
* ``value`` is the value returned by the function which @task
wrapped.
* ``files`` is a dictionary of all files the execution added
to the MiniLIMS, with their descriptions as keys and their
IDs in the MiniLIMS as values.
* ``execution`` is the execution ID.
In the call to ``f`` above, the return value would be (with some
other value for ``'execution'`` in practice)::
{'value': {'created': 'boris'},
'files': {'New file': 'boris'},
'execution': 33}
"""
def wrapper(lims, *args, **kwargs):
# If there is a description given, pull it out to use for the
# execution.
try:
description = kwargs.pop('description')
except KeyError, k:
description = ""
# Wrap the function to run in an execution.
with execution(lims, description=description) as ex:
v = f(ex, *args, **kwargs)
# Pull together the return value.
ex_id = ex.id
if isinstance(lims, MiniLIMS):
file_ids = lims.search_files(source=('execution', ex_id))
files = dict([(lims.fetch_file(i)['description'],i) for i in file_ids])
else:
files = {}
return {'value': v, 'files': files, 'execution': ex_id}
wrapper.__doc__ = f.__doc__
wrapper.__name__ = f.__name__
return wrapper
|
bbcf/bbcflib
|
bein/__init__.py
|
Python
|
gpl-3.0
| 70,402
|
[
"Galaxy"
] |
9043f7dc0cb6fa89ad4003d66c7c56b03145ee3f9557d2ecd526e7166cb4372b
|
# -*- coding: utf-8 -*-
import collections
import gzip
import itertools
import io
import logging
import math
import re
import sys
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import numpy as np
import seaborn as sns
from .amino_acids import _SUBSTITUTION_MATRICES
from .data import path_blast, path_kalign, uniref100, get_network_parameters
from .utils import load_fasta_iter
############
# Features #
############
_DBS = ['100', '90']
_FEATURES = {
'basic': ['hkd', 'pos'] +
['blosum50', 'blosum62', 'blosum80', 'miyata', 'pam60'] +
[''.join(l) for l in itertools.product(
['hwo', 'hww', 'vol'], ['_d', '_r'])],
'topology': ['top_' + s for s in [
'betweenness', 'cc', 'closeness', 'degree',
'degree_centrality', 'eigenvector']],
'blast': [''.join(l) for l in itertools.product(
['b'],
[d[0] for d in _DBS],
['_all', '_eva', '_hum', '_nhu'],
['_nwt', '_nmt', '_naa', '_nal', '_rwt', '_rmt', '_pwm'],
['', '_w'])],
'kalign': [''.join(l) for l in itertools.product(
['k'],
[d[0] for d in _DBS],
['_all', '_hum', '_nhu'],
['_nwt', '_nmt', '_naa', '_nal', '_rwt', '_rmt', '_pwm',
'_dh', '_dp', '_de', '_dv', '_da', '_db'],
['', '_w'])]
}
# Prepend 'feature_'
_FEATURES = {k: ['feature_' + x for x in v] for k, v in _FEATURES.items()}
FEATURES = tuple(itertools.chain(*_FEATURES.values()))
_BLAST_FEATURES = {
d: set([f for f in _FEATURES['blast'] if f.startswith('feature_b' + d[0])])
for d in _DBS
}
_KALIGN_FEATURES = {
d: set([f for f in _FEATURES['kalign']
if f.startswith('feature_k' + d[0])])
for d in _DBS
}
PMUT_FEATURES = sorted([
'feature_k1_all_nwt_w',
'feature_b1_eva_rwt',
'feature_k1_all_nal',
'feature_k9_all_nwt_w',
'feature_b1_eva_naa',
'feature_k9_hum_naa_w',
'feature_b9_eva_naa',
'feature_k9_all_naa_w',
'feature_k1_all_pwm_w',
'feature_b1_eva_pwm_w',
'feature_k1_hum_naa',
'feature_miyata',
])
#################
# Feature names #
#################
# TODO: update feature names
_FEATURE_NAMES = {
'feature_blosum62': 'BLOSUM 62',
'feature_hwo_d': 'Hidrophobicity difference',
'feature_top_degree': 'Topology Degree',
'feature_b1_eva_nwt': 'BLAST UniRef100 # wt',
'feature_b1_eva_nmt_w': 'BLAST UniRef100 % mt',
'feature_b1_eva_naa': 'BLAST UniRef100 # aas',
'feature_b1_eva_pwm': 'BLAST UniRef100 PSSM',
'feature_b1_eva_pwm_w': 'BLAST UniRef100 weighted PSSM',
'feature_k1_all_nwt': 'Kalign UniRef100 # wt',
'feature_k1_all_nal': 'Kalign UniRef100 # align',
'feature_k1_all_pwm_w': 'Kalign UniRef100 weighted PSSM',
'feature_k1_hum_nmt_w': 'Kalign Human UniRef100 weighted # mt',
'feature_k1_hum_nal': 'Kalign Human UniRef100 # align',
'feature_k9_all_nwt_w': 'Kalign UniRef90 weighted # wt',
'feature_k9_hum_naa': 'Kalign Human UniRef90',
}
def feature_name(feature):
"""Return a human readable name of the feature"""
if feature in _FEATURE_NAMES:
return _FEATURE_NAMES[feature]
return feature
########################
# Features computation #
########################
def compute_features(variants, features=None):
"""Compute features of variants"""
if features is None:
features = FEATURES
_basic_features(variants, features)
_topology_features(variants, features)
_blast_features(variants, features)
_kalign_features(variants, features)
##################
# Basic features #
##################
# Average volume of buried residues
# Source: Amino Acid Volumes. -C. Chothia, Nature 254:304-308(1975)
_AA_Vol = {
'A': 92., 'C': 106., 'D': 125., 'E': 155., 'F': 203., 'G': 66.,
'H': 167., 'I': 169., 'K': 171., 'L': 168., 'M': 171., 'N': 125.,
'P': 129., 'Q': 161., 'R': 202., 'S': 99., 'T': 122., 'V': 142.,
'W': 238., 'Y': 204.,
'U': 0.0, 'X': 0.0 # Invented
}
# Hydrophobicity
# Source: Wimley and White, Nat Struct Biol 3:842 (1996),
# http://www.cgl.ucsf.edu/chimera/docs/UsersGuide/midas/hydrophob.html
_AA_Hww = {
"D": -1.23, "E": -2.02, "N": -0.42, "Q": -0.58, "K": -0.99, "R": -0.81,
"H": -0.96, "G": -0.01, "P": -0.45, "S": -0.13, "T": -0.14, "C": 0.24,
"M": 0.23, "A": -0.17, "V": -0.07, "I": 0.31, "L": 0.56, "F": 1.13,
"W": 1.85, "Y": 0.94,
'U': 0.0, 'X': 0.0 # Invented
}
# Free energies of transfer between Octanol and water
# Source: Octanol and water. By Fauchere & Pliska.(1989)
_AA_Hwo = {
"A": 0.42, "C": 1.34, "D": -1.05, "E": -0.87, "F": 2.44, "G": 0.00,
"H": 0.18, "I": 2.46, "K": -1.35, "L": 2.32, "M": 1.68, "N": -0.82,
"P": 0.98, "Q": -0.30, "R": -1.37, "S": -0.05, "T": 0.35, "V": 1.66,
"W": 3.07, "Y": 1.31,
'U': 0.0, 'X': 0.0 # Invented
}
# Hydrophaty Index
# Source: A simple method for displaying the hydropathic character of a
# protein. Kyte and Doolittle
_AA_Hkd = {
"F": 2.8, "I": 4.5, "W": -0.9, "L": 3.8, "V": 4.2, "M": 1.9,
"Y": 6.3, "C": 2.5, "A": 1.8, "T": -1.3, "H": -3.2, "G": -0.4,
"S": -0.8, "Q": -3.5, "R": -4.5, "K": -3.9, "N": -3.5, "E": -3.5,
"P": -1.6, "D": -3.5, "X": -0.14,
'U': 0.0, 'X': 0.0 # Invented
}
def _diff(variant, values_dict):
return values_dict[variant.wt] - values_dict[variant.mt]
def _rdiff(variant, values_dict):
diff = _diff(variant, values_dict)
if values_dict[variant.wt] == 0:
return diff / sys.float_info.epsilon
return diff / values_dict[variant.wt]
def _kyte_doolittle(variant, window_size=9):
start = (variant.position-1) - (window_size - 1)//2
if start < 0:
start = 0
end = (variant.position-1) + (window_size + 1)//2
if end > len(variant.sequence):
end = len(variant.sequence)
sum_wt = sum([_AA_Hkd[aa] for aa in variant.sequence[start:end]])
sum_mt = sum_wt - _AA_Hkd[variant.wt] + _AA_Hkd[variant.mt]
return float(sum_wt - sum_mt) / (end - start)
def _matrix(variant, matrix):
if (variant.wt, variant.mt) in matrix:
return matrix[(variant.wt, variant.mt)]
elif (variant.mt, variant.wt) in matrix:
return matrix[(variant.mt, variant.wt)]
else:
return np.nan
def _basic_feature(variants, feature):
if feature == 'feature_pos':
return variants['position']
elif feature == 'feature_hkd':
return variants.apply(_kyte_doolittle, axis=1)
elif feature == 'feature_vol_d':
return variants.apply(_diff, args=(_AA_Vol,), axis=1)
elif feature == 'feature_vol_r':
return variants.apply(_rdiff, args=(_AA_Vol,), axis=1)
elif feature == 'feature_hww_d':
return variants.apply(_diff, args=(_AA_Hww,), axis=1)
elif feature == 'feature_hww_r':
return variants.apply(_rdiff, args=(_AA_Hww,), axis=1)
elif feature == 'feature_hwo_d':
return variants.apply(_diff, args=(_AA_Hwo,), axis=1)
elif feature == 'feature_hwo_r':
return variants.apply(_rdiff, args=(_AA_Hwo,), axis=1)
m = re.match('feature_(blosum80|blosum62|blosum50|pam60|miyata)', feature)
if m:
matrix = _SUBSTITUTION_MATRICES[m.group(1)]
return variants.apply(_matrix, args=(matrix,), axis=1)
def _basic_features(variants, features):
logging.info('Computing basic features...')
for feature in set(features).intersection(_FEATURES['basic']):
variants[feature] = _basic_feature(variants, feature)
#####################
# Topology features #
#####################
_TOPOLOGY_FEATURES = {}
_TOPOLOGY_FEATURES_INDEX = {}
def _load_topology_features():
global _TOPOLOGY_FEATURES
global _TOPOLOGY_FEATURES_INDEX
_TOPOLOGY_FEATURES, _TOPOLOGY_FEATURES_INDEX = get_network_parameters()
def _topology_features(variants, features):
logging.info('Computing topology features...')
_load_topology_features()
for feature in set(features).intersection(_FEATURES['topology']):
variants[feature] = _topology_feature(variants, feature)
def _topology_feature(variants, feature):
index = _TOPOLOGY_FEATURES_INDEX[feature]
return variants.apply(_topology_variant_feature, args=(index,), axis=1)
def _topology_variant_feature(variant, index):
if variant.protein_id in _TOPOLOGY_FEATURES:
return _TOPOLOGY_FEATURES[variant.protein_id][index]
return np.nan
##################
# Blast features #
##################
_BLAST_EVALUE = {
'90': 1e-45,
'100': 1e-75,
'test': 1e-75,
}
def _blast_features(variants, features):
logging.info('Computing blast features for %d variants...' % len(variants))
for db, blast_features in _BLAST_FEATURES.items():
db_features = set(features).intersection(blast_features)
if len(db_features) > 0:
for protein_id, group in variants.groupby('protein_id'):
_blast_protein_features(variants, group.index, db_features, db)
def _blast_protein_features(variants, indices, features, db):
# Get blast
protein_id, sequence = variants.loc[indices[0]][['protein_id', 'sequence']]
blast = _get_blast(protein_id, sequence, db)
# Compute features for each position
for position, group in variants.loc[indices].groupby('position'):
_blast_position_features(variants, group.index, features, db, blast)
def _blast_position_features(variants, indices, features, db, blast):
# Get position column
position = variants.loc[indices[0]].position
try:
aas = blast['aas'][:, position - 1]
scores = blast['scores'][:, position - 1]
evalue = blast['evalue'][:, position - 1]
except:
aas = np.array([], dtype=np.character)
scores = np.array([], dtype=np.float)
evalue = np.array([], dtype=np.float)
aas_evalue = aas[evalue < _BLAST_EVALUE[db]]
weights_evalue = scores[evalue < _BLAST_EVALUE[db]]
aas_human = aas[blast['human']]
weights_human = scores[blast['human']]
aas_not_human = aas[np.logical_not(blast['human'])]
weights_not_human = scores[np.logical_not(blast['human'])]
for feature in features:
feature_parts = feature.split('_')
subset, aas_feature = feature_parts[2:4]
if subset == 'all':
weights = scores if feature_parts[-1] == 'w' else None
for i in indices:
variants.set_value(i, feature, _aas_feature(
variants.loc[i], aas_feature, aas, weights))
elif subset == 'eva':
weights = weights_evalue if feature_parts[-1] == 'w' else None
for i in indices:
variants.set_value(i, feature, _aas_feature(
variants.loc[i], aas_feature, aas_evalue, weights))
elif subset == 'hum':
weights = weights_human if feature_parts[-1] == 'w' else None
for i in indices:
variants.set_value(i, feature, _aas_feature(
variants.loc[i], aas_feature, aas_human, weights))
elif subset == 'nhu':
weights = weights_not_human if feature_parts[-1] == 'w' else None
for i in indices:
variants.set_value(i, feature, _aas_feature(
variants.loc[i], aas_feature, aas_not_human, weights))
def _aas_feature(variant, feature, aas, weights):
if weights is None:
weights = np.repeat(1., len(aas))
def n_aln():
return weights.sum()
def wt_aas():
return float(((aas == variant.wt.encode())*weights).sum())
def mt_aas():
return float(((aas == variant.mt.encode())*weights).sum())
def difference_to_property_mean(property_dict):
property_sum = 0.0
property_n = 0.0
for k, v in property_dict.items():
n = (aas == k.encode()).sum()
property_n += n
property_sum += n*v
return property_sum/property_n - property_dict[variant.mt]
if feature == 'nal':
return n_aln()
elif feature == 'naa':
return float(((aas != b'-')*weights).sum())
elif feature == 'nwt':
return wt_aas()
elif feature == 'nmt':
return mt_aas()
elif feature == 'rwt':
return wt_aas() / len(aas) if len(aas) > 0 else 0.0
elif feature == 'rmt':
return mt_aas() / len(aas) if len(aas) > 0 else 0.0
elif feature == 'pwm':
return _pssm(variant, wt_aas(), mt_aas())
elif feature == 'dh':
return difference_to_property_mean(_AA_Hkd)
def _pssm(variant, wt_x, mt_x):
try:
return (math.log(mt_x/_AA_Freq[variant.mt]) -
math.log(wt_x/_AA_Freq[variant.wt]))
except ValueError:
return -10.
# Composition in percent for the complete Swissprot database
# (only Organism: Human)
_AA_Freq = {
"A": 7.015, "C": 2.299, "E": 7.094, "D": 4.737, "G": 6.577, "F": 3.655,
"I": 4.340, "H": 2.631, "K": 5.723, "M": 2.131, "L": 9.962, "N": 3.590,
"Q": 4.764, "P": 6.306, "S": 8.318, "R": 5.645, "T": 5.356, "W": 1.220,
"V": 5.973, "Y": 2.666,
'U': sys.float_info.min, 'X': sys.float_info.min # Invented
}
###################
# Kalign features #
###################
def _kalign_features(variants, features):
logging.info('Computing kalign features for %d variants...' %
len(variants))
for db, kalign_features in _KALIGN_FEATURES.items():
db_features = set(features).intersection(kalign_features)
if len(db_features) > 0:
for protein_id, group in variants.groupby('protein_id'):
_kalign_protein_features(
variants, group.index, db_features, db)
def _kalign_protein_features(variants, indices, features, db):
# Get kalign
protein_id, sequence = variants.loc[indices[0]][['protein_id', 'sequence']]
kalign = _get_kalign(protein_id, sequence, db)
# Compute features for each position
for position, group in variants.loc[indices].groupby('position'):
_kalign_position_features(variants, group.index, features, db, kalign)
def _kalign_position_features(variants, indices, features, db, kalign):
# Get position column
position = variants.loc[indices[0]].position
aas = kalign['aligned_aas'][:, position - 1]
aas_human = aas[kalign['human']]
weights_human = kalign['similarity'][kalign['human']]
aas_not_human = aas[np.logical_not(kalign['human'])]
weights_not_human = kalign['similarity'][np.logical_not(kalign['human'])]
for feature in features:
feature_parts = feature.split('_')
subset, aas_feature = feature_parts[2:4]
if subset == 'all':
weights = kalign['similarity'] if feature_parts[-1] == 'w' \
else None
for i in indices:
variants.set_value(i, feature, _aas_feature(
variants.loc[i], aas_feature, aas, weights))
elif subset == 'hum':
weights = weights_human if feature_parts[-1] == 'w' else None
for i in indices:
variants.set_value(i, feature, _aas_feature(
variants.loc[i], aas_feature, aas_human, weights))
elif subset == 'nhu':
weights = weights_not_human if feature_parts[-1] == 'w' else None
for i in indices:
variants.set_value(i, feature, _aas_feature(
variants.loc[i], aas_feature, aas_not_human, weights))
#############
# Get BLAST #
#############
def _get_blast_alignments(f):
# zcat UniRef100_P13807.100.xml.gz
# | awk '/<Iteration>/{i++}i==2{print}'
# | grep 'Hit_def\|query-from\|query-to\|hseq\|qseq\|bit-score\|evalue'
# | sed 's/<Hit_def>/<Hit_def>!!!/g'
# | awk -F '[<>]' '//{print $3}'
Alignment = collections.namedtuple('Alignment', 'title, hsps')
Hsp = collections.namedtuple(
'Hsp', 'query_start, query_end, sbjct, query, bits, expect')
re_inside_tags = re.compile(b'>(.*)<')
def get_field(s):
return re_inside_tags.search(line).groups()[0]
title = ''
hsps = []
for line in f:
if line == b'</Iteration>\n':
break
for line in f:
if line == b'<Hit>\n':
for line in f:
if line.startswith(b' <Hit_def>'):
title = line.split(b'>')[1].split(b'<')[0].decode()
if line == b' <Hit_hsps>\n':
hsps = []
for line in f:
if line == b' <Hsp>\n':
hsp = {}
for line in f:
if line.startswith(b' <Hsp_query-from>'):
hsp['query_start'] = int(get_field(line))
if line.startswith(b' <Hsp_query-to>'):
hsp['query_end'] = int(get_field(line))
if line.startswith(b' <Hsp_hseq>'):
hsp['sbjct'] = get_field(line)
if line.startswith(b' <Hsp_qseq>'):
hsp['query'] = get_field(line)
if line.startswith(b' <Hsp_bit-score>'):
hsp['bits'] = float(get_field(line))
if line.startswith(b' <Hsp_evalue>'):
hsp['expect'] = float(get_field(line))
if line == b' </Hsp>\n':
break
hsps.append(Hsp(**hsp))
elif line == b' </Hit_hsps>\n':
break
elif line == b'</Hit>\n':
break
yield Alignment(title, hsps)
def _get_blast(protein_id, sequence, db):
with io.BufferedReader(gzip.open(path_blast(protein_id, db), 'rb')) as f:
aas = []
scores = []
evalue = []
human = []
for alignment in _get_blast_alignments(f):
# New row
new_aas = np.array([b'-']*len(sequence), dtype=np.character)
new_scores = np.full(len(sequence), 0.0)
new_evalue = np.full(len(sequence), 1.0)
new_human = 'Homo sapiens' in alignment.title
for hsp in alignment.hsps:
start = hsp.query_start - 1
end = hsp.query_end
if (hsp.query == hsp.sbjct and
start == 0 and end == len(sequence)):
pass # Exact match
if (new_aas[start:end] != b'-').any():
# Append row
aas.append(new_aas)
scores.append(new_scores)
evalue.append(new_evalue)
human.append(new_human)
# New row
new_aas = np.array([b'-']*len(sequence),
dtype=np.character)
new_scores = np.full(len(sequence), 0.0)
new_evalue = np.full(len(sequence), 1.0)
new_human = 'Homo sapiens' in alignment.title
new_aas[start:end] = [
chr(s)
for q, s in zip(hsp.query, hsp.sbjct)
if q != ord(b'-')]
new_scores[start:end] = hsp.bits # Use bit score (normalized)
new_evalue[start:end] = hsp.expect
# Append row
aas.append(new_aas)
scores.append(new_scores)
evalue.append(new_evalue)
human.append(new_human)
return {
'aas': np.array(aas, dtype=np.character, order='F'),
'scores': np.array(scores, order='F'),
'evalue': np.array(evalue, order='F'),
'human': np.array(human, dtype=np.bool),
}
##############
# Get Kalign #
##############
def _get_kalign(protein_id, sequence, db):
with io.BufferedReader(
gzip.open(path_kalign(protein_id, db), 'rb')) as f:
# Find protein in alignment
self_name = uniref100(protein_id)
aas = []
names = []
titles = []
for title, seq in load_fasta_iter(f):
titles.append(title)
if '|' in title:
name = title.split('|')[1].split()[0]
else:
name = title
names.append(name)
if self_name == name:
# Protein sequence amino acid positions in alignment
self_seq = seq
seq_indices = np.where(seq != b'-')[0]
aas.append(seq)
if self_name not in names:
raise KeyError('%s not found in %s.%s.afa.gz' % (
self_name, self_name, db))
similarity = np.array([
float(((self_seq == row) & (row != b'-')).sum()) /
len(sequence)
for row in aas
])
return {
'aligned_aas': np.array(
[seq[seq_indices] for seq in aas], dtype=np.character),
'similarity': similarity,
'names': names,
'human': np.array([
'Homo sapiens' in title for title in titles],
dtype=np.bool)
}
#########################
# Features distribution #
#########################
def features_distribution(variants, features=None):
if features is None:
features = [c for c in variants.columns if 'feature_' in c]
sns.set_style("white")
figures = []
for feature in features:
fig = plt.figure()
plt.hist(
variants[
np.logical_not(variants.disease)][feature].dropna().values,
label='Neutral', bins=20, alpha=0.5, color='g', normed=True,
range=((variants[feature].min(), variants[feature].max())))
plt.hist(variants[variants.disease][feature].dropna().values,
label='Disease', bins=20, alpha=0.5, color='r', normed=True,
range=((variants[feature].min(), variants[feature].max())))
plt.legend(loc='upper right')
plt.title(feature_name(feature), y=1.02)
plt.locator_params(axis='y', nbins=6)
figures.append((feature_name(feature), fig))
return figures
|
vlopezferrando/pymut
|
pymut/features.py
|
Python
|
mit
| 22,474
|
[
"BLAST"
] |
253c7aabb40c6c5baa7990a218dbd06c40fa45b54a04d89b69e1f2cd63e9f951
|
from setuptools import setup
from terminus.version import version_str
setup(
name='terminus',
version=version_str,
author='Brian Cline',
author_email='brian.cline@gmail.com',
description=('Updates interfaces file on Debian Linux-based distros'),
long_description=open('README.rst').read(),
license='MIT',
keywords='network interface interfaces file debian configuration',
url='https://github.com/briancline/terminus',
packages=['terminus'],
install_requires=[],
test_suite='nose.collector',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
],
)
|
briancline/terminus
|
setup.py
|
Python
|
mit
| 933
|
[
"Brian"
] |
99329be4e69757a95baa39ed0765eccd0aa3372bb0e192d26fbc3ef9e9091977
|
import os
import unittest
import tensorflow as tf
from pymatgen.core import Molecule
from megnet.models import MEGNetModel
from megnet.utils.molecule import get_pmg_mol_from_smiles, pb
CWD = os.path.dirname(os.path.abspath(__file__))
class TestMolecule(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.molecule = Molecule(["C", "O", "O"], [[0, 0, 0], [-1, 0, 0], [1, 0, 0]])
cls.model = MEGNetModel.from_file(os.path.join(CWD, "../../../mvl_models/mp-2019.4.1/formation_energy.hdf5"))
def test_mol(self):
pred = self.model.predict_structure(self.molecule)
self.assertAlmostEqual(float(pred), 0.39973044, 5)
@unittest.skipIf(pb is None, "Openbabel is not installed")
def test_get_pmg_mol_from_smiles(self):
mol = get_pmg_mol_from_smiles("C")
self.assertTrue(isinstance(mol, Molecule))
if __name__ == "__main__":
unittest.main()
|
materialsvirtuallab/megnet
|
megnet/utils/tests/test_molecule_util.py
|
Python
|
bsd-3-clause
| 917
|
[
"pymatgen"
] |
43234df2db0514be0dee348d9ca43922fed294ea3ef88d27f4756ec8826ba6b4
|
##
# Copyright 2009-2012 Ghent University
# Copyright 2009-2012 Stijn De Weirdt
# Copyright 2010 Dries Verdegem
# Copyright 2010-2012 Kenneth Hoste
# Copyright 2011 Pieter De Baets
# Copyright 2011-2012 Jens Timmerman
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing the libsmm library, implemented as an easyblock
"""
import os
import shutil
from distutils.version import LooseVersion
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.filetools import run_cmd
from easybuild.tools.modules import get_software_root, get_software_version
class EB_libsmm(EasyBlock):
"""
Support for the CP2K small matrix library
Notes: - build can take really really long, and no real rebuilding needed for each get_version
- CP2K can be built without this
"""
@staticmethod
def extra_options():
# default dimensions
dd = [1,4,5,6,9,13,16,17,22]
extra_vars = [
('transpose_flavour', [1, "Transpose flavour of routines (default: 1)", CUSTOM]),
('max_tiny_dim', [12, "Maximum tiny dimension (default: 12)", CUSTOM]),
('dims', [dd, "Generate routines for these matrix dims (default: %s)" % dd, CUSTOM])
]
return EasyBlock.extra_options(extra_vars)
def configure_step(self):
"""Configure build: change to tools/build_libsmm dir"""
try:
dst = 'tools/build_libsmm'
os.chdir(dst)
self.log.debug('Change to directory %s' % dst)
except OSError, err:
self.log.exception('Failed to change to directory %s: %s' % (dst, err))
def build_step(self):
"""Build libsmm
Possible iterations over precision (single/double) and type (real/complex)
- also type of transpose matrix
- all set in the config file
Make the config.in file (is source afterwards in the build)
"""
fn = 'config.in'
cfg_tpl = """# This config file was generated by EasyBuild
# the build script can generate optimized routines packed in a library for
# 1) 'nn' => C=C+MATMUL(A,B)
# 2) 'tn' => C=C+MATMUL(TRANSPOSE(A),B)
# 3) 'nt' => C=C+MATMUL(A,TRANSPOSE(B))
# 4) 'tt' => C=C+MATMUL(TRANPOSE(A),TRANPOSE(B))
#
# select a tranpose_flavor from the list 1 2 3 4
#
transpose_flavor=%(transposeflavour)s
# 1) d => double precision real
# 2) s => single precision real
# 3) z => double precision complex
# 4) c => single precision complex
#
# select a data_type from the list 1 2 3 4
#
data_type=%(datatype)s
# target compiler... this are the options used for building the library.
# They should be aggessive enough to e.g. perform vectorization for the specific CPU (e.g. -ftree-vectorize -march=native),
# and allow some flexibility in reordering floating point expressions (-ffast-math).
# Higher level optimisation (in particular loop nest optimization) should not be used.
#
target_compile="%(targetcompile)s"
# target dgemm link options... these are the options needed to link blas (e.g. -lblas)
# blas is used as a fall back option for sizes not included in the library or in those cases where it is faster
# the same blas library should thus also be used when libsmm is linked.
#
OMP_NUM_THREADS=1
blas_linking="%(LIBBLAS)s"
# matrix dimensions for which optimized routines will be generated.
# since all combinations of M,N,K are being generated the size of the library becomes very large
# if too many sizes are being optimized for. Numbers have to be ascending.
#
dims_small="%(dims)s"
# tiny dimensions are used as primitves and generated in an 'exhaustive' search.
# They should be a sequence from 1 to N,
# where N is a number that is large enough to have good cache performance
# (e.g. for modern SSE cpus 8 to 12)
# Too large (>12?) is not beneficial, but increases the time needed to build the library
# Too small (<8) will lead to a slow library, but the build might proceed quickly
# The minimum number for a successful build is 4
#
dims_tiny="%(tiny_dims)s"
# host compiler... this is used only to compile a few tools needed to build the library.
# The library itself is not compiled this way.
# This compiler needs to be able to deal with some Fortran2003 constructs.
#
host_compile="%(hostcompile)s "
# number of processes to use in parallel for compiling / building and benchmarking the library.
# Should *not* be more than the physical (available) number of cores of the machine
#
tasks=%(tasks)s
"""
# only GCC is supported for now
if self.toolchain.comp_family() == toolchain.GCC: #@UndefinedVariable
hostcompile = os.getenv('F90')
# optimizations
opts = "-O2 -funroll-loops -ffast-math -ftree-vectorize -march=native -fno-inline-functions"
# Depending on the get_version, we need extra options
extra = ''
gccVersion = LooseVersion(get_software_version('GCC'))
if gccVersion >= LooseVersion('4.6'):
extra = "-flto"
targetcompile = "%s %s %s" % (hostcompile, opts, extra)
else:
self.log.error('No supported compiler found (tried GCC)')
# try and find BLAS lib
blas_found = False
blas_libs = ["ACML", "ATLAS", "IMKL"]
for blas_lib in blas_libs:
if get_software_root(blas_lib):
blas_found = True
else:
self.log.info("BLAS library %s not found" % blas_lib)
if not blas_found:
self.log.error('No known BLAS library found!')
cfgdict = {
'datatype': None,
'transposeflavour': self.cfg['transpose_flavour'],
'targetcompile': targetcompile,
'hostcompile': hostcompile,
'dims': ' '.join([str(d) for d in self.cfg['dims']]),
'tiny_dims': ' '.join([str(d) for d in range(1, self.cfg['max_tiny_dim']+1)]),
'tasks': self.cfg['parallel'],
'LIBBLAS': "%s %s" % (os.getenv('LDFLAGS'), os.getenv('LIBBLAS'))
}
# configure for various iterations
datatypes = [(1, 'double precision real'), (3, 'double precision complex')]
for (dt, descr) in datatypes:
cfgdict['datatype'] = dt
try:
txt = cfg_tpl % cfgdict
f = open(fn, 'w')
f.write(txt)
f.close()
self.log.debug("config file %s for datatype %s ('%s'): %s" % (fn, dt, descr, txt))
except IOError, err:
self.log.error("Failed to write %s: %s" % (fn, err))
self.log.info("Building for datatype %s ('%s')..." % (dt, descr))
run_cmd("./do_clean")
run_cmd("./do_all")
def install_step(self):
"""Install CP2K: clean, and copy lib directory to install dir"""
run_cmd("./do_clean")
try:
shutil.copytree('lib', os.path.join(self.installdir, 'lib'))
except Exception, err:
self.log.error("Something went wrong during dir lib copying to installdir: %s" % err)
def sanity_check_step(self):
"""Custom sanity check for libsmm"""
custom_paths = {
'files': ["lib/libsmm_%s.a" % x for x in ["dnn", "znn"]],
'dirs': []
}
super(EB_libsmm, self).sanity_check_step(custom_paths=custom_paths)
|
JensTimmerman/easybuild-easyblocks
|
easybuild/easyblocks/l/libsmm.py
|
Python
|
gpl-2.0
| 8,618
|
[
"CP2K"
] |
5cb3c61b5813789094dd57d3e3a0bbacd8b5d79f59578ab9202d4b9d89a1aa0d
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 6 22:24:26 2016
@author: david
"""
# This is a configuration step for the exercise. Please run it before calculating the derivative!
import numpy as np
import matplotlib.pyplot as plt
# Function for setting up the Chebyshev derivative matrix
def get_cheby_matrix(nx):
cx = np.zeros(nx+1)
x = np.zeros(nx+1)
for ix in range(0,nx+1):
x[ix] = np.cos(np.pi * ix / nx)
cx[0] = 2.
cx[nx] = 2.
cx[1:nx] = 1.
D = np.zeros((nx+1,nx+1))
for i in range(0, nx+1):
for j in range(0, nx+1):
if i==j and i!=0 and i!=nx:
D[i,i]=-x[i]/(2.0*(1.0-x[i]*x[i]))
else:
D[i,j]=(cx[i]*(-1)**(i+j))/(cx[j]*(x[i]-x[j]))
D[0,0] = (2.*nx**2+1.)/6.
D[nx,nx] = -D[0,0]
return D
# Initialize arbitrary test function on Chebyshev collocation points
nx = 199 # Number of grid points
x = np.zeros(nx+1)
for ix in range(0,nx+1):
x[ix] = np.cos(ix * np.pi / nx)
dxmin = min(abs(np.diff(x)))
dxmax = max(abs(np.diff(x)))
# Function example: Gaussian
# Width of Gaussian
s = .2
# Gaussian function (modify!)
f = np.exp(-1/s**2 * x**2)
# Analytical derivative
df_ana = -2/s**2 * x * np.exp(-1/s**2 * x**2)
# Calculate numerical derivative using differentiation matrix
# Initialize differentiation matrix
D = get_cheby_matrix(nx)
df_num = D @ f
# Calculate error between analytical and numerical solution
err = np.sum((df_num - df_ana)**2) / np.sum(df_ana**2) * 100
# Plot analytical and numerical result
plt.plot(x,f,'b',label='f(x)')
plt.plot(x,df_num,'r',label='d/dx f(x) - numerical',alpha=0.6)
plt.plot(x,df_ana,'r--',label='d/dx f(x) - analytical')
plt.xlabel('x')
plt.ylabel('f(x) and d/df f(x)')
plt.title('Error: %s %%'%err)
plt.legend(loc='upper right')
plt.show()
|
davofis/computational_seismology
|
05_pseudospectral/cheby_derivative.py
|
Python
|
gpl-3.0
| 1,828
|
[
"Gaussian"
] |
cb1f318641a81180b944f10c351f8557c8c622935f194e8b12972987bfef8b04
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para yaske
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
__channel__ = "yaske"
__category__ = "F"
__type__ = "generic"
__title__ = "Yaske.net"
__language__ = "ES"
DEBUG = config.get_setting("debug")
def isGeneric():
return True
def mainlist(item):
logger.info("pelisalacarta.yaske mainlist")
itemlist = []
itemlist.append( Item(channel=__channel__, title="Novedades" , action="peliculas", url="http://www.yaske.net/"))
itemlist.append( Item(channel=__channel__, title="Por año" , action="menu_anyos", url="http://www.yaske.net/"))
itemlist.append( Item(channel=__channel__, title="Por género" , action="menu_categorias", url="http://www.yaske.net/"))
itemlist.append( Item(channel=__channel__, title="Por calidad" , action="menu_calidades", url="http://www.yaske.net/"))
itemlist.append( Item(channel=__channel__, title="Por idioma" , action="menu_idiomas", url="http://www.yaske.net/"))
itemlist.append( Item(channel=__channel__, title="Buscar" , action="search") )
return itemlist
def search(item,texto):
logger.info("pelisalacarta.yaske search")
itemlist = []
try:
item.url = "http://www.yaske.net/es/peliculas/search/%s"
item.url = item.url % texto
item.extra = ""
itemlist.extend(peliculas(item))
itemlist = sorted(itemlist, key=lambda Item: Item.title)
return itemlist
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return []
def peliculas(item):
logger.info("pelisalacarta.yaske listado")
# Descarga la página
data = scrapertools.cache_page(item.url)
# Extrae las entradas
'''
<li class="item-movies c5">
<a class="image-block" href="http://www.yaske.net/es/pelicula/0002014/ver-sometimes-in-april-online.html" title="Siempre en abril">
<img src="http://t0.gstatic.com/images?q=tbn:ANd9GcSpMMsdPI9tKkvdHUA2qPknXygXuHaISe7FRgYM85zvPZhr1tbWDA" width="140" height="200" />
</a>
<ul class="bottombox">
<li title="Siempre en abril"><a href="http://www.yaske.net/es/pelicula/0002014/ver-sometimes-in-april-online.html" title="Siempre en abril">Siempre en abril</a></li>
<li>Drama, Guerra, Historica</li>
<li><img src='http://www.yaske.net/theme/01/data/images/flags/la_la.png' title='Latino ' width='25'/> <img src='http://www.yaske.net/theme/01/data/images/flags/en_es.png' title='English SUB Spanish' width='25'/> </li>
<li><a rel="lyteframe" rev="width: 600px; height: 380px; scrolling: no;" youtube="trailer" href="http://www.youtube.com/v/XiteY6o2UwI&hl&autoplay=1" target="_blank"><img src="http://4.bp.blogspot.com/-_t9RtdUMJlo/UgYO_qA49VI/AAAAAAAABj4/7O_ZrYtIMHw/s1600/vertrailer2.png" height="22" border="0"></a></li>
</ul>
<div class="quality">Dvd Rip</div>
<div class="view"><span>view: 10800</span></div>
</li>
'''
patron = '<li class="item-movies[^<]+'
patron += '<a class="image-block" href="([^"]+)" title="([^"]+)"[^<]+'
patron += '<img src="([^"]+)" width="\d+" height="\d+"[^<]+'
patron += '</a[^<]+'
patron += '<ul class="bottombox"[^<]+'
patron += '<li[^<]+<a[^<]+</a></li[^<]+'
patron += '<li[^<]+</li[^<]+'
patron += "<li>(.*?)</li[^<]+"
patron += '<li><a[^<]+<img[^<]+</a></li[^<]+'
patron += '</ul[^<]+'
patron += '<div class="quality">([^<]+)<'
matches = re.compile(patron,re.DOTALL).findall(data)
#scrapertools.printMatches(matches)
itemlist = []
for scrapedurl, scrapedtitle, scrapedthumbnail, idiomas, calidad in matches:
patronidiomas = "<img src='[^']+' title='([^']+)'"
matchesidiomas = re.compile(patronidiomas,re.DOTALL).findall(idiomas)
idiomas_disponibles = ""
for idioma in matchesidiomas:
idiomas_disponibles = idiomas_disponibles + idioma.strip() + "/"
if len(idiomas_disponibles)>0:
idiomas_disponibles = "["+idiomas_disponibles[:-1]+"]"
title = scrapedtitle.strip()+" "+idiomas_disponibles+"["+calidad+"]"
title = scrapertools.htmlclean(title)
url = scrapedurl
thumbnail = scrapedthumbnail
scrapedplot = ""
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item(channel=__channel__, action="findvideos", title=title , url=url , thumbnail=thumbnail , plot=scrapedplot , fulltitle=scrapertools.htmlclean(scrapedtitle.strip()), viewmode="movie", folder=True) )
# Extrae el paginador
patronvideos = "<a href='([^']+)'>\»\;</a>"
matches = re.compile(patronvideos,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
if len(matches)>0:
scrapedurl = urlparse.urljoin(item.url,matches[0])
itemlist.append( Item(channel=__channel__, action="peliculas", title=">> Página siguiente" , url=scrapedurl , folder=True) )
return itemlist
def menu_categorias(item):
logger.info("pelisalacarta.yaske menu_categorias")
# Descarga la página
data = scrapertools.cache_page(item.url)
logger.info("data="+data)
data = scrapertools.get_match(data,'div class="title">Categoria[^<]+</div>(.*?)</ul>')
logger.info("data="+data)
# Extrae las entradas
patron = "<li><a href='([^']+)'><img[^>]+>([^<]+)</a>"
matches = re.compile(patron,re.DOTALL).findall(data)
#scrapertools.printMatches(matches)
itemlist = []
for scrapedurl,scrapedtitle in matches:
scrapedthumbnail = ""
scrapedplot = ""
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=__channel__, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def menu_idiomas(item):
logger.info("pelisalacarta.yaske menu_idiomas")
# Descarga la página
data = scrapertools.cache_page(item.url)
logger.info("data="+data)
data = scrapertools.get_match(data,'<select name="language"(.*?)</select>')
logger.info("data="+data)
# Extrae las entradas
patron = "<option value='([^']*)'>([^<]+)</option>"
matches = re.compile(patron,re.DOTALL).findall(data)
#scrapertools.printMatches(matches)
itemlist = []
for scrapedurl,scrapedtitle in matches:
scrapedthumbnail = ""
scrapedplot = ""
url = "http://www.yaske.net/es/peliculas/custom/?year=&gender=&quality=&language="+scrapedurl
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+url+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=__channel__, action="peliculas", title=scrapedtitle , url=url , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
url = "http://www.yaske.net/es/peliculas/custom/?year=&gender=&quality=&language=sub"
itemlist.append( Item(channel=__channel__, action="peliculas", title="Subtitulado" , url=url , folder=True) )
return itemlist
def menu_anyos(item):
logger.info("pelisalacarta.yaske menu_anyos")
# Descarga la página
data = scrapertools.cache_page(item.url)
logger.info("data="+data)
data = scrapertools.get_match(data,'<select name="year"(.*?)</select>')
logger.info("data="+data)
# Extrae las entradas
patron = "<option value='([^']+)'>([^<]+)</option>"
matches = re.compile(patron,re.DOTALL).findall(data)
#scrapertools.printMatches(matches)
itemlist = []
for scrapedurl,scrapedtitle in matches:
scrapedthumbnail = ""
scrapedplot = ""
url = "http://www.yaske.net/es/peliculas/custom/?year="+scrapedurl+"&gender=&quality=&language="
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+url+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=__channel__, action="peliculas", title=scrapedtitle , url=url , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def menu_calidades(item):
logger.info("pelisalacarta.yaske menu_calidades")
# Descarga la página
data = scrapertools.cache_page(item.url)
logger.info("data="+data)
'''
<select name="quality" id="qualities" class="jqlist">
<option value="" selected="selected">Selecciona...</option>
<option value='c8'>hd real 720</option><option value='c7'>hd rip 320</option><option value='c6'>br-screener</option><option value='c5'>dvd rip</option><option value='c4'>dvd screener</option><option value='c3'>ts screener hq</option><option value='c2'>ts screener</option><option value='c1'>cam</option> </select></td>
</tr>
'''
data = scrapertools.get_match(data,'<select name="quality"(.*?)</select>')
logger.info("data="+data)
# Extrae las entradas
patron = "<option value='([^']+)'>([^<]+)</option>"
matches = re.compile(patron,re.DOTALL).findall(data)
#scrapertools.printMatches(matches)
itemlist = []
for scrapedurl,scrapedtitle in matches:
scrapedthumbnail = ""
scrapedplot = ""
url = "http://www.yaske.net/es/peliculas/custom/?year=&gender=&quality="+scrapedurl+"&language="
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+url+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=__channel__, action="peliculas", title=scrapedtitle , url=url , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def findvideos(item):
logger.info("pelisalacarta.yaske findvideos url="+item.url)
# Descarga la página
data = scrapertools.cache_page(item.url)
# Extrae las entradas
'''
<tr bgcolor="">
<td height="32" align="center"><a class="btn btn-mini enlace_link" style="text-decoration:none;" rel="nofollow" target="_blank" title="Ver..." href="http://www.yaske.net/es/reproductor/pelicula/2141/44446/"><i class="icon-play"></i><b> Opcion 04</b></a></td>
<td align="left"><img src="http://www.google.com/s2/favicons?domain=played.to"/>played</td>
<td align="center"><img src="http://www.yaske.net/theme/01/data/images/flags/la_la.png" width="21">Lat.</td>
<td align="center" class="center"><span title="" style="text-transform:capitalize;">hd real 720</span></td>
<td align="center"><div class="star_rating" title="HD REAL 720 ( 5 de 5 )">
<ul class="star"><li class="curr" style="width: 100%;"></li></ul>
</div>
</td> <td align="center" class="center">2553</td> </tr>
'''
patron = '<tr bgcolor=(.*?)</tr>'
matches = re.compile(patron,re.DOTALL).findall(data)
#scrapertools.printMatches(matches)
itemlist = []
for tr in matches:
logger.info("tr="+tr)
try:
title = scrapertools.get_match(tr,'<b>([^<]+)</b>')
server = scrapertools.get_match(tr,'"http\://www.google.com/s2/favicons\?domain\=([^"]+)"')
# <td align="center"><img src="http://www.yaske.to/theme/01/data/images/flags/la_la.png" width="19">Lat.</td>
idioma = scrapertools.get_match(tr,'<img src="http://www.yaske.[a-z]+/theme/01/data/images/flags/([a-z_]+).png"[^>]+>[^<]*<')
subtitulos = scrapertools.get_match(tr,'<img src="http://www.yaske.[a-z]+/theme/01/data/images/flags/[^"]+"[^>]+>([^<]*)<')
calidad = scrapertools.get_match(tr,'<td align="center" class="center"[^<]+<span title="[^"]*" style="text-transform.capitalize.">([^<]+)</span></td>')
#<a href="http://www.yaske.net/es/reproductor/pelicula/2244/15858/" title="Batman: El regreso del Caballero Oscuro, Parte 2"
url = scrapertools.get_match(tr,'<a.*?href="([^"]+)"')
thumbnail = ""
plot = ""
title = title.replace(" ","")
if "es_es" in idioma:
scrapedtitle = title + " en "+server.strip()+" [Español]["+calidad+"]"
elif "la_la" in idioma:
scrapedtitle = title + " en "+server.strip()+" [Latino]["+calidad+"]"
elif "en_es" in idioma:
scrapedtitle = title + " en "+server.strip()+" [Inglés SUB Español]["+calidad+"]"
else:
scrapedtitle = title + " en "+server.strip()+" ["+idioma+" / "+subtitulos+"]["+calidad+"]"
scrapedtitle = scrapertools.entityunescape(scrapedtitle)
scrapedtitle = scrapedtitle.strip()
scrapedurl = url
scrapedthumbnail = thumbnail
scrapedplot = plot
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=__channel__, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , fulltitle=item.fulltitle, folder=False) )
except:
import traceback
logger.info("Excepcion: "+traceback.format_exc())
return itemlist
def play(item):
logger.info("pelisalacarta.yaske play item.url="+item.url)
itemlist=[]
if item.url.startswith("http://adf.ly"):
from servers import adfly
item.url = adfly.get_long_url(item.url)
data = scrapertools.downloadpageGzip(item.url)
#logger.info("data="+data)
data = data.replace("http://www.yaske.net/archivos/allmyvideos/play.php?v=","http://allmyvideos.net/")
itemlist = servertools.find_video_items(data=data)
for newitem in itemlist:
newitem.fulltitle = item.fulltitle
return itemlist
# Verificación automática de canales: Esta función debe devolver "True" si está ok el canal.
def test():
from servers import servertools
# mainlist
mainlist_items = mainlist(Item())
# Da por bueno el canal si alguno de los vídeos de "Novedades" devuelve mirrors
peliculas_items = peliculas(mainlist_items[0])
bien = False
for pelicula_item in peliculas_items:
mirrors = findvideos( item=pelicula_item )
if len(mirrors)>0:
bien = True
break
return bien
|
golaizola/pelisalacarta-xbmc
|
pelisalacarta/channels/yaske.py
|
Python
|
gpl-3.0
| 14,531
|
[
"ADF"
] |
b5e49c060db5c7cfcc7983c096ba81b1f2b582336c839349cd1dc08e41e8ce98
|
from ddt import ddt, data, unpack
from .base_test import MentoringBaseTest
CORRECT, INCORRECT, PARTIAL = "correct", "incorrect", "partially-correct"
@ddt
class MentoringAssessmentTest(MentoringBaseTest):
def _selenium_bug_workaround_scroll_to(self, mentoring):
"""Workaround for selenium bug:
Some version of Selenium has a bug that prevents scrolling
to radiobuttons before being clicked. The click not taking
place, when it's outside the view.
Since the bug does not affect other content, asking Selenium
to click on the legend first, will properly scroll it.
It also have it's fair share of issues with the workbench header.
For this reason we click on the bottom-most element, scrolling to it.
Then, click on the title of the question (also scrolling to it)
hopefully, this gives us enough room for the full step with the
control buttons to fit.
"""
controls = mentoring.find_element_by_css_selector("div.submit")
title = mentoring.find_element_by_css_selector("h3.question-title")
controls.click()
title.click()
def assert_hidden(self, elem):
self.assertFalse(elem.is_displayed())
def assert_disabled(self, elem):
self.assertTrue(elem.is_displayed())
self.assertFalse(elem.is_enabled())
def assert_clickable(self, elem):
self.assertTrue(elem.is_displayed())
self.assertTrue(elem.is_enabled())
def assert_persistent_elements_present(self, mentoring):
self.assertIn("A Simple Assessment", mentoring.text)
self.assertIn("This paragraph is shared between all questions.", mentoring.text)
def assert_disabled(self, elem):
self.assertTrue(elem.is_displayed())
self.assertFalse(elem.is_enabled())
class _GetChoices(object):
def __init__(self, mentoring, selector=".choices"):
self._mcq = mentoring.find_element_by_css_selector(selector)
@property
def text(self):
return self._mcq.text
@property
def state(self):
return {
choice.text: choice.find_element_by_css_selector("input").is_selected()
for choice in self._mcq.find_elements_by_css_selector(".choice")}
def select(self, text):
state = {}
for choice in self._mcq.find_elements_by_css_selector(".choice"):
if choice.text == text:
choice.find_element_by_css_selector("input").click()
return
raise AssertionError("Expected selectable item present: {}".format(text))
def _assert_checkmark(self, mentoring, result):
"""Assert that only the desired checkmark is present."""
states = {CORRECT: 0, INCORRECT: 0, PARTIAL: 0}
states[result] += 1
for name, count in states.items():
self.assertEqual(len(mentoring.find_elements_by_css_selector(".checkmark-{}".format(name))), count)
def go_to_workbench_main_page(self):
self.browser.get(self.live_server_url)
def go_to_assessment(self, number):
mentoring = self.go_to_page('Assessment %s' % number)
class Namespace(object):
pass
controls = Namespace()
controls.submit = mentoring.find_element_by_css_selector("input.input-main")
controls.next_question = mentoring.find_element_by_css_selector("input.input-next")
controls.review = mentoring.find_element_by_css_selector("input.input-review")
controls.try_again = mentoring.find_element_by_css_selector("input.input-try-again")
controls.review_link = mentoring.find_element_by_css_selector(".review-link a")
return mentoring, controls
@staticmethod
def question_text(number):
if number:
return "QUESTION %s" % number
else:
return "QUESTION"
def freeform_answer(self, number, mentoring, controls, text_input, result, saved_value="", last=False):
self.wait_until_text_in(self.question_text(number), mentoring)
self.assert_persistent_elements_present(mentoring)
self._selenium_bug_workaround_scroll_to(mentoring)
answer = mentoring.find_element_by_css_selector("textarea.answer.editable")
self.assertIn("Please answer the questions below.", mentoring.text)
self.assertIn(self.question_text(number), mentoring.text)
self.assertIn("What is your goal?", mentoring.text)
self.assertEquals(saved_value, answer.get_attribute("value"))
if not saved_value:
self.assert_disabled(controls.submit)
self.assert_disabled(controls.next_question)
answer.clear()
answer.send_keys(text_input)
self.assertEquals(text_input, answer.get_attribute("value"))
self.assert_clickable(controls.submit)
self.ending_controls(controls, last)
self.assert_hidden(controls.review)
self.assert_hidden(controls.try_again)
controls.submit.click()
self.do_submit_wait(controls, last)
self._assert_checkmark(mentoring, result)
self.do_post(controls, last)
def ending_controls(self, controls, last):
if last:
self.assert_hidden(controls.next_question)
self.assert_disabled(controls.review)
else:
self.assert_disabled(controls.next_question)
self.assert_hidden(controls.review)
def selected_controls(self, controls, last):
self.assert_clickable(controls.submit)
if last:
self.assert_hidden(controls.next_question)
self.assert_disabled(controls.review)
else:
self.assert_disabled(controls.next_question)
self.assert_hidden(controls.review)
def do_submit_wait(self, controls, last):
if last:
self.wait_until_clickable(controls.review)
else:
self.wait_until_clickable(controls.next_question)
def do_post(self, controls, last):
if last:
controls.review.click()
else:
controls.next_question.click()
def single_choice_question(self, number, mentoring, controls, choice_name, result, last=False):
self.wait_until_text_in(self.question_text(number), mentoring)
self.assert_persistent_elements_present(mentoring)
self._selenium_bug_workaround_scroll_to(mentoring)
self.assertIn("Do you like this MCQ?", mentoring.text)
self.assert_disabled(controls.submit)
self.ending_controls(controls, last)
self.assert_hidden(controls.try_again)
choices = self._GetChoices(mentoring)
expected_state = {"Yes": False, "Maybe not": False, "I don't understand": False}
self.assertEquals(choices.state, expected_state)
choices.select(choice_name)
expected_state[choice_name] = True
self.assertEquals(choices.state, expected_state)
self.selected_controls(controls, last)
controls.submit.click()
self.do_submit_wait(controls, last)
self._assert_checkmark(mentoring, result)
self.do_post(controls, last)
def rating_question(self, number, mentoring, controls, choice_name, result, last=False):
self.wait_until_text_in(self.question_text(number), mentoring)
self.assert_persistent_elements_present(mentoring)
self._selenium_bug_workaround_scroll_to(mentoring)
self.assertIn("How much do you rate this MCQ?", mentoring.text)
self.assert_disabled(controls.submit)
self.ending_controls(controls, last)
self.assert_hidden(controls.review)
self.assert_hidden(controls.try_again)
choices = self._GetChoices(mentoring, ".rating")
expected_choices = {
"1 - Not good at all": False,
"2": False, "3": False, "4": False,
"5 - Extremely good": False,
"I don't want to rate it": False,
}
self.assertEquals(choices.state, expected_choices)
choices.select(choice_name)
expected_choices[choice_name] = True
self.assertEquals(choices.state, expected_choices)
self.ending_controls(controls, last)
controls.submit.click()
self.do_submit_wait(controls, last)
self._assert_checkmark(mentoring, result)
self.do_post(controls, last)
def peek_at_multiple_response_question(
self, number, mentoring, controls, last=False, extended_feedback=False, alternative_review=False,
):
self.wait_until_text_in(self.question_text(number), mentoring)
self.assert_persistent_elements_present(mentoring)
self._selenium_bug_workaround_scroll_to(mentoring)
self.assertIn("What do you like in this MRQ?", mentoring.text)
if extended_feedback:
self.assert_disabled(controls.submit)
if alternative_review:
self.assert_clickable(controls.review_link)
self.assert_hidden(controls.try_again)
else:
self.assert_clickable(controls.review)
else:
self.assert_disabled(controls.submit)
self.ending_controls(controls, last)
def multiple_response_question(self, number, mentoring, controls, choice_names, result, last=False):
self.peek_at_multiple_response_question(number, mentoring, controls, last=last)
choices = self._GetChoices(mentoring)
expected_choices = {
"Its elegance": False,
"Its beauty": False,
"Its gracefulness": False,
"Its bugs": False,
}
self.assertEquals(choices.state, expected_choices)
for name in choice_names:
choices.select(name)
expected_choices[name] = True
self.assertEquals(choices.state, expected_choices)
self.selected_controls(controls, last)
controls.submit.click()
self.do_submit_wait(controls, last)
self._assert_checkmark(mentoring, result)
controls.review.click()
def peek_at_review(self, mentoring, controls, expected, extended_feedback=False):
self.wait_until_text_in("You scored {percentage}% on this assessment".format(**expected), mentoring)
self.assert_persistent_elements_present(mentoring)
if expected["num_attempts"] < expected["max_attempts"]:
self.assertIn("Note: if you retake this assessment, only your final score counts", mentoring.text)
self.assertFalse(mentoring.find_elements_by_css_selector('.review-list'))
elif extended_feedback:
for q_type in ['correct', 'incorrect', 'partial']:
self.assertEqual(len(mentoring.find_elements_by_css_selector('.%s-list li' % q_type)), expected[q_type])
else:
self.assertFalse(mentoring.find_elements_by_css_selector('.review-list'))
self.assertIn("You answered {correct} questions correctly".format(**expected), mentoring.text)
self.assertIn("You answered {partial} questions partially correct".format(**expected), mentoring.text)
self.assertIn("You answered {incorrect} questions incorrectly".format(**expected), mentoring.text)
self.assertIn("You have used {num_attempts} of {max_attempts} submissions".format(**expected), mentoring.text)
self.assert_hidden(controls.submit)
self.assert_hidden(controls.next_question)
self.assert_hidden(controls.review)
self.assert_hidden(controls.review_link)
def assert_messages_text(self, mentoring, text):
messages = mentoring.find_element_by_css_selector('.assessment-messages')
self.assertEqual(messages.text, text)
self.assertTrue(messages.is_displayed())
def assert_messages_empty(self, mentoring):
messages = mentoring.find_element_by_css_selector('.assessment-messages')
self.assertEqual(messages.text, '')
self.assertFalse(messages.find_elements_by_xpath('./*'))
self.assertFalse(messages.is_displayed())
def extended_feedback_checks(self, mentoring, controls, expected_results):
# Multiple choice is third correctly answered question
self.assert_hidden(controls.review_link)
mentoring.find_elements_by_css_selector('.correct-list li a')[2].click()
self.peek_at_multiple_response_question(4, mentoring, controls, extended_feedback=True, alternative_review=True)
# Four correct items, plus the overall correct indicator.
correct_marks = mentoring.find_elements_by_css_selector('.checkmark-correct')
incorrect_marks = mentoring.find_elements_by_css_selector('.checkmark-incorrect')
self.assertEqual(len(correct_marks), 5)
self.assertEqual(len(incorrect_marks), 0)
item_feedbacks = [
"This is something everyone has to like about this MRQ",
"This is something everyone has to like about this MRQ",
"This MRQ is indeed very graceful",
"Nah, there aren't any!"
]
self.popup_check(mentoring, item_feedbacks, do_submit=False)
self.assert_hidden(controls.review)
self.assert_disabled(controls.submit)
controls.review_link.click()
self.peek_at_review(mentoring, controls, expected_results, extended_feedback=True)
# Rating question, right before MRQ.
mentoring.find_elements_by_css_selector('.incorrect-list li a')[0].click()
# Should be possible to visit the MRQ from there.
self.wait_until_clickable(controls.next_question)
controls.next_question.click()
self.peek_at_multiple_response_question(4, mentoring, controls, extended_feedback=True, alternative_review=True)
@data((1, False), ('Extended Feedback', True))
@unpack
def test_assessment(self, assessment, extended_feedback):
mentoring, controls = self.go_to_assessment(assessment)
self.freeform_answer(1, mentoring, controls, 'This is the answer', CORRECT)
self.single_choice_question(2, mentoring, controls, 'Maybe not', INCORRECT)
self.rating_question(3, mentoring, controls, "5 - Extremely good", CORRECT)
self.peek_at_multiple_response_question(4, mentoring, controls, last=True)
# see if assessment remembers the current step
self.go_to_workbench_main_page()
mentoring, controls = self.go_to_assessment(assessment)
self.multiple_response_question(4, mentoring, controls, ("Its beauty",), PARTIAL, last=True)
expected_results = {
"correct": 2, "partial": 1, "incorrect": 1, "percentage": 63,
"num_attempts": 1, "max_attempts": 2}
self.peek_at_review(mentoring, controls, expected_results, extended_feedback=extended_feedback)
self.assert_messages_text(mentoring, "Assessment additional feedback message text")
self.assert_clickable(controls.try_again)
controls.try_again.click()
self.freeform_answer(1, mentoring, controls, 'This is a different answer', CORRECT,
saved_value='This is the answer')
self.single_choice_question(2, mentoring, controls, 'Yes', CORRECT)
self.rating_question(3, mentoring, controls, "1 - Not good at all", INCORRECT)
user_selection = ("Its elegance", "Its beauty", "Its gracefulness")
self.multiple_response_question(4, mentoring, controls, user_selection, CORRECT, last=True)
expected_results = {
"correct": 3, "partial": 0, "incorrect": 1, "percentage": 75,
"num_attempts": 2, "max_attempts": 2}
self.peek_at_review(mentoring, controls, expected_results, extended_feedback=extended_feedback)
self.assert_disabled(controls.try_again)
self.assert_messages_empty(mentoring)
if extended_feedback:
self.extended_feedback_checks(mentoring, controls, expected_results)
def test_single_question_assessment(self):
"""
No 'Next Question' button on single question assessment.
"""
mentoring, controls = self.go_to_assessment(2)
self.single_choice_question(0, mentoring, controls, 'Maybe not', INCORRECT, last=True)
expected_results = {
"correct": 0, "partial": 0, "incorrect": 1, "percentage": 0,
"num_attempts": 1, "max_attempts": 2}
self.peek_at_review(mentoring, controls, expected_results)
self.assert_messages_empty(mentoring)
controls.try_again.click()
# this is a wait and assertion all together - it waits until expected text is in mentoring block
# and it fails with PrmoiseFailed exception if it's not
self.wait_until_text_in(self.question_text(0), mentoring)
|
open-craft/xblock-mentoring
|
tests/integration/test_assessment.py
|
Python
|
agpl-3.0
| 16,811
|
[
"VisIt"
] |
ac64306d39b0f2bdeb6720e0c395a8510e04dfb7462619422a5b29dd8a479da2
|
from __future__ import absolute_import, division, print_function
import numpy as np
from numpy import (
sqrt,
minimum,
)
try:
_ = np.use_fastnumpy # Use Enthought MKL optimizations
from numpy.fft import rfft, irfft, rfftfreq
except AttributeError:
try:
import mklfft # MKL FFT optimizations from Continuum Analytics
from numpy.fft import rfft, irfft, rfftfreq
except ImportError:
# Finally, just use Numpy's and Scipy's
from numpy.fft import rfft, irfft, rfftfreq
from scipy.signal import fftconvolve
class EC(object):
"""Equalization-Cancellation process used by the STEC model [wan2014]_.
The `equalize` method finds the optimal gains and delays that minimizes
the energy of the cancelled signal.
The `cancel` method uses the gains and delays found by the `equalize`
method to "cancel" the two signals.
The `jitter` method applies amplitude and time jitters to the input as a
form of internal noise.
Examples
--------
>>> ec = EC()
>>> alphas, taus = ec.equalize(left, right, cf)
>>> y = ec.cancel(left, right, alphas, taus)
References
----------
.. [wan2014] Wan, R., Durlach, N. I., and Colburn, H. S. (2014).
"Application of a short-time version of the Equalization-Cancellation
model to speech intelligibility experiments with speech maskers",
The Journal of the Acoustical Society of America, 136(2), 768--776
"""
def __init__(self, fs, win_len=None, overlap=0.5, sigma_e=0.25,
sigma_d=105e-6, padding_windows=10, fast_cancel=True):
"""Equalization--Cancellation process.
Parameters
-----------
fs : int
Sampling frequency of the EC process.
win_len : float
Duration of a window, in seconds, where to apply the EC process. If
`None`, the EC process is applied to the whole signal. Defaults to
`None`.
overlap : float
Overlap between windows, in fraction of window. Defaults to 0.5 (i.e.
50%).
sigma_e : float
Mean value of the amplitude jitter in the EC process. Default is 0.25
as reported by Durlach (1963).
sigma_d : float
Mean duration of the time jitter. Default is 105us, as reported by
Durlach (1963).
"""
self.fs = fs
self.win_len = win_len
self.overlap = overlap
self.sigma_e = sigma_e
self.sigma_d = sigma_d
self.padding_windows = padding_windows
self.fast_cancel = fast_cancel
def equalize(self, left, right, cf):
"""Finds the optimal gains and delays that minimize the energy of the
cancelled signals.
Parameters
----------
left, right : ndarrays
Signals for which to find the optimal parameters. They can be 1D
or 2D. If they are 2D, the signals are cancelled along the last
dimension.
cf : float or list of floats
Center frequency of the channel at which the equalization takes
place. If the inputs are multi-channel, then cf must be a list of
center frequencies.
Returns
-------
alphas : ndarray
Optimal gains. The shape depends on the input signals and on the
`win_len` and ``overlap`` attributes.
taus : ndarrays
Optimal delays in seconds. The shape depends on the input signals
and on the ``win_len`` and `overlap` attributes.
"""
left = np.asanyarray(left, dtype='float')
right = np.asanyarray(right, dtype='float')
if left.ndim > 2 or right.ndim > 2:
raise ValueError("Input signals must have at most 2 dimensions.",
left.ndim, right.ndim)
if left.shape != right.shape:
raise ValueError("Both inputs must have must have the same shape.",
left.shape, right.shape)
if left.ndim == 2:
try:
if len(cf) != left.shape[0]:
raise ValueError("cf must have as many values as there "
"are channels in the inputs.")
except TypeError:
raise ValueError("cf must be iterable if there are more than "
"one channel.")
if left.ndim == 1 and right.ndim == 1:
# Use the whole signal.
alphas, taus = self._equalization(left, right, cf)
else: # the inputs are 2D
alphas = []
taus = []
for i_chan, cf in enumerate(cf):
chan_alphas, chan_taus = self._equalization(
left[i_chan], right[i_chan], cf)
alphas.append(chan_alphas)
taus.append(chan_taus)
alphas = np.array(alphas)
taus = np.asarray(taus)
return alphas, taus
def _equalization(self, left, right, cf):
"""Equalize two signals.
Parameters
----------
left, right: array
Single dimension array for left and right signal.
cf : float
Center frequency at which the equalization takes place.
Returns
-------
alphas : array
Gains for each window.
taus : array
Time delays for each window, in seconds.
Notes
-----
The window duration is set by the attribute ``win_len``, in seconds,
and the overlap between windows by ``overlap``, in fraction (e.g. 0.5
for 50 % overlap).
"""
n = left.shape[-1]
if self.win_len is None:
win = n
step = n
else:
win = int(self.win_len * self.fs)
if self.overlap:
step = int(win * self.overlap)
else:
step = win
n_valid_windows = self._n_valid_windows(n, win, step)
alphas = np.zeros(n_valid_windows)
taus = np.zeros(n_valid_windows)
for i_frame, hop in enumerate(range(0, n - win + 1, step)):
a, tau = self._calculate_alpha_tau(left[hop:hop + win],
right[hop:hop + win], cf=cf)
alphas[i_frame] = a
taus[i_frame] = tau
return alphas, taus
@staticmethod
def _n_valid_windows(n_samples, win_len, step):
"""Calculate the number of valid windows, considering overlap.
Parameters
----------
n_samples : int
Length of vector.
win_len : int
Window length, in samples.
step : int
Number of samples between frames, essentially overlap * window
length.
Returns
-------
n_windows : int
Number of valid windows.
"""
valid = np.maximum(n_samples - win_len, 0)
n_windows = valid // step
return n_windows + 1
def _calculate_alpha_tau(self, left, right, cf):
"""Finds optimal parameters for the EC process.
Performs equations (1) in Wan et al. (2014).
Parameters
----------
left, right : ndarray
w : float
Center frequency of the channel, in Hz.
Returns
-------
a : float
Level equalization parameter
tau : float
Delay, in seconds, that should be applied to `right` in order to
get close too `left`. Could also be explained as the delay
applied to `left`, with respect to `right`.
"""
E_L = left.dot(left.T)
E_R = right.dot(right.T)
# Alpha parameter for level equalization
alpha = sqrt(E_L / E_R)
tau = self._find_tau(left, right, cf)
return alpha, tau
def _find_tau(self, left, right, cf):
""" Returns the delay (in seconds) of the maximum of the cross-correlation
of two signals.
"""
left = np.asanyarray(left)
right = np.asanyarray(right)
left = left - np.mean(left)
right = right - np.mean(right)
if left.dot(left) == 0 or right.dot(right) == 0:
return 0
else:
n_samples = left.shape[-1]
# Cross correlation
# It should be normalized, according to the definition, but
# we only need the max value, so it is not necessary to compute it.
rho = fftconvolve(left, right[::-1], 'full')
# Eq 6, we have to find tau_0 in the range where |tau| < fs / cf_0
# i.e. (pi / omega_0)
max_delay_in_samples = minimum(
np.floor(np.pi / (2 * np.pi * cf) * self.fs),
n_samples // 2)
# First we limit the range to -fs/cf_0 < tau < fs/cf_0...
allowed_range = np.arange(-max_delay_in_samples,
max_delay_in_samples + 1, dtype=int)
# ... then we find where the maximum is that range.
tau = allowed_range[rho[allowed_range + n_samples - 1].argmax()]
return tau / self.fs
def cancel(self, left, right, alpha, tau):
"""Cancel left and right signal using gains and delays.
Parameters
----------
left, right : array_like
Signals for which to find the optimal parameters. They can be 1D
or 2D. If they are 2D, the signals are cancelled along the last
dimension.
alpha : ndarray
Optimal amplitude cancellation gains.
tau : ndarray
Optimal cancellation delays.
Returns
-------
y : ndarray
"""
left = np.asanyarray(left, dtype='float')
right = np.asanyarray(right, dtype='float')
alpha = np.asanyarray(alpha)
tau = np.asanyarray(tau)
if left.ndim > 2 or right.ndim > 2:
raise ValueError("Input signals must have at most 2 dimensions.",
left.ndim, right.ndim)
if left.shape != right.shape:
raise ValueError("Both inputs must have must have the same shape.",
left.shape, right.shape)
if left.ndim == 1 and right.ndim == 1:
out = self._single_chan_cancel(left, right, alpha, tau)
else: # the inputs are 2D
out = np.zeros_like(left)
for i_chan, (chan_alpha, chan_tau) in enumerate(zip(alpha, tau)):
out[i_chan, :] = self._single_chan_cancel(
left[i_chan],
right[i_chan],
chan_alpha,
chan_tau)
return out
def _single_chan_cancel(self, left, right, alphas, taus):
"""Equalize two signals.
Parameters
----------
left, right: ndarrays
Single dimension array for left and right signal.
alphas : ndarray
Gains for each window.
taus : ndarray
Time delays for each window, in samples.
Returns
-------
out : ndarray
Cancelled signals.
Notes
-----
The window duration is set by the attribute `win_len`, in samples,
and the overlap between windows by `overlap`, in fraction (e.g. 0.5
for 50 % overlap).
"""
n = left.shape[-1]
if self.win_len is None:
win = n
step = n
# Make sure the alphas and taus are iterable.
try:
iter(alphas)
except TypeError:
alphas = (alphas,)
try:
iter(taus)
except TypeError:
taus = (taus,)
else:
win = int(self.win_len * self.fs)
if self.overlap:
step = int(win * self.overlap)
else:
step = win
out = np.zeros_like(left)
extra = self.padding_windows * win
for i_frame, (a, tau, hop) in enumerate(
zip(alphas, taus, range(0, n - win + 1, step))):
if tau == 0:
out[hop:hop + win] += 1 / sqrt(a) * left[hop:hop + win] \
- sqrt(a) * right[hop:hop + win]
else:
if self.fast_cancel:
# Shift only a section of the signal, instead of the its
# entirety. The "window" size is defined by the `padding_windows`
# parameter. The size of the original window is increased
# by 2*padding_windows (one before, one after).
lower = np.maximum(hop - extra, 0)
if lower == 0:
new_hop = hop
else:
new_hop = extra
upper = np.minimum(hop + win + extra, n)
out[hop:hop + win] += (
1 / sqrt(a) * self._shift(left[lower:upper], -tau / 2)
- sqrt(a) * self._shift(right[lower:upper], tau / 2)
)[new_hop:new_hop + win]
else:
out[hop:hop + win] += 1 / sqrt(a) \
* self._shift(left, -tau / 2)[hop:hop + win] \
- sqrt(a) * self._shift(right, tau / 2)[hop:hop + win]
if self.overlap:
out *= self.overlap
return out
def _shift(self, x, delay):
"""Shift signal according to a delay and pads with zeros.
Parameters
----------
x : array
Signal.
delay : int
Delay in seconds. Positive values correspond to a delay in time,
i.e. the signal "starts later". Negative values correspond to a
signal starting "earlier".
Returns
-------
out : ndarray
Delayed signal
"""
n = x.shape[-1]
y = rfft(x)
w = rfftfreq(n, 1 / self.fs) * 2 * np.pi
y *= np.exp(-1j * w * delay)
return np.real(irfft(y, n))
def jitter(self, x, out=None):
"""Applies amplitude and time jitter to a signal.
Parameters
----------
x : array_like
Input signal, will be casted to 'float'. It can be one or 2
dimensional.
out : None or array_like
Define where to write the jitter signal. Defaults to `None`,
i.e. creates a new array. Can be used to jitter an array "in
place".
Returns
-------
out : ndarray
Jittered signal.
Notes
-----
The amplitude jitters are taken from a normal Gaussian distribution
with a mean of zero and a standard distribution of ``sigma_e``. The time
jitters are taken from a normal Gaussian distribution with mean zero
and standard distribution ``sigma_d`` in seconds. The default jitter
values come from [durlach1963]_.
References
----------
.. [durlach1963] Durlach, N. I. (1963). "Equalization and
Cancellation Theory of Binaural Masking-Level Differences", J. Acoust.
Soc. Am., 35(), 1206--1218
"""
x = np.asanyarray(x, dtype='float')
epsilons, deltas = self.create_jitter(x)
out = self.apply_jitter(x, epsilons, deltas, out=out)
return out
def create_jitter(self, x):
"""Create amplitude and time jitter for a signal.
Parameters
----------
x : ndarray
Input signal.
Returns
-------
alphas : ndarray of floats
Amplitude jitters.
deltas : ndarray of ints
Jitter indices.
"""
n_x = x.shape[-1]
# Amplitude jitter
a_jitter = self.sigma_e * np.random.randn(*x.shape)
# Time jitter
if x.ndim > 1:
idx = np.tile(np.arange(n_x, dtype='float'), (x.shape[0], 1))
else:
idx = np.arange(n_x, dtype='float')
t_jitter = self.sigma_d * self.fs * np.random.randn(*idx.shape)
return a_jitter, t_jitter
@staticmethod
def apply_jitter(x, epsilons, deltas, out=None):
"""Apply jitter to a signal
Parameters
----------
x : ndarray
Input signal.
epsilons : ndarray of floats
Amplitude jitter coefficients.
deltas : ndarray of ints
Time jitters, they have to be integers because they will be
used as indices.
out : array or None
Array where to write the output. If None, which is the default,
the function returns a new array.
Returns
-------
out : ndarray
Jittered signal.
"""
n_cf = x.shape[0]
n_x = x.shape[-1]
if x.ndim > 1:
chan_idx = np.tile(np.arange(n_cf)[np.newaxis].T, (1, n_x))
idx = np.tile(np.arange(n_x, dtype='float'), (x.shape[0], 1))
else:
# Single channel
chan_idx = Ellipsis
idx = np.arange(n_x, dtype='float')
# Apply the jitter to the idx.
idx += deltas
# Limit the indices to the length of the array
idx = np.clip(idx, 0, n_x - 1, out=idx)
idx = np.round(idx, out=idx).astype('int')
# Create indices for channels, it's a n_cf x n_x array, where each row
# is filled with the row number.
# Same for the "ear" dimension
# ear_idx = np.tile(np.arange(2)[np.newaxis].T, (n_cf, 1, n_x))
if out is None:
out = x * (1 - epsilons)
out[..., :] = out[chan_idx, idx]
else:
x *= (1 - epsilons)
x[..., :] = x[chan_idx, idx]
out[...] = x
return out
|
achabotl/pambox
|
pambox/central/ec.py
|
Python
|
bsd-3-clause
| 17,944
|
[
"Gaussian"
] |
a914bac3128c4fc723332d99c65201bc0ea6f80a5a0f3c500240366a17a21fc3
|
#!/usr/bin/env python
#20-11-2017
#Authors:Sebastian ECHEVERRI RESTREPO,
# sebastian.echeverri.restrepo@skf.com, sebastianecheverrir@gmail.com
# James EWEN
# j.ewen14@imperial.ac.uk, jimmyewen@gmail.com
#################################################################################3
#Appart from generating the Fe and the O atoms for the surfaces, the bonds between
# them also have to be defined.
# It seems that it is easier to let lammps do this at the begining of a
# simulation.
#This script modifies the input files for lammps in order to define all the
# variables that er needed for the geneartion of the bonds
#################################################################################3
import math
import fileinput
def AddFe2O3(name):
#name = 'lopls'
#Reading the number of bond types
f = open(name+'.data','r')
lines = f.readlines()
for line in lines:
if line.find("bond types") != -1:
line=line.split()
nBondTypes = int(line[0])
f.close()
#modifying (rewriting) the file in.lopls
f = open('in.'+name,'wr+')
f.write("#-------------- Initialization Section -------------------- \n")
f.write("include "+name+".in.CreateBonds \n")
f.write("include "+name+".in.init \n")
f.write("include "+name+".in.settings \n")
f.write("include "+name+".in.charges \n\n")
f.write("dump dump1 all atom 1000 lopls.dump \n")
f.write("thermo_style custom step lx ly lz density temp press etotal\n")
f.write("thermo 1 \n")
f.write("write_data loplsInitial.data \n\n")
f.write("#--------------- Run Equilibriation -------------------- \n")
f.write("min_style cg \n")
f.write("minimize 0.0 0.0 100000 100000 \n\n")
f.close()
#generating a file to make the dummy start that lammps requires to generate bonds
# see http://lammps.sandia.gov/threads/msg54748.html
f = open(name+'.in.CreateBonds','wr+')
f.write("#dummy start needed to create bonds in lammps \n")
f.write("units metal \n")
f.write("atom_style full \n")
f.write("read_data "+name+".data extra/bond/types 6 \n")
f.write("pair_style lj/cut 10.0 \n")
f.write("pair_coeff * * 1.0 1.0 \n")
f.write("bond_style harmonic \n")
f.write("bond_coeff * 1.0 1.0 \n\n")
f.write("# Creating groups \n")
f.write("group fe type 30 \n")
f.write("group ox type 31 \n\n")
f.write("#Ceating bonds\n")
f.write("create_bonds fe ox "+str(nBondTypes+1)+" 1.900 2.000 \n")
f.write("create_bonds fe ox "+str(nBondTypes+2)+" 2.000 2.500 \n")
f.write("create_bonds ox ox "+str(nBondTypes+3)+" 2.800 2.900 \n")
f.write("create_bonds ox ox "+str(nBondTypes+4)+" 2.700 2.799 \n")
f.write("create_bonds ox ox "+str(nBondTypes+5)+" 2.600 2.699 \n")
f.write("create_bonds fe fe "+str(nBondTypes+6)+" 2.900 3.000 \n")
f.close()
#modifying (rewriting) the file lopls.in.init
f = open(name+".in.init",'wr+')
f.write("bond_style hybrid harmonic \n")
f.write("angle_style hybrid harmonic \n")
f.write("dihedral_style hybrid opls multi/harmonic \n")
f.write("improper_style hybrid harmonic \n")
f.write("pair_style hybrid lj/cut/coul/long 10.0 10.0 \n")
f.write("pair_modify mix geometric \n")
f.write("special_bonds lj/coul 0.0 0.0 0.5 \n")
f.write("kspace_style pppm 0.00001 \n")
f.close()
#modifying (rewriting) the file lopls.in.settings
replaced = False
for line in fileinput.input(name+".in.settings", inplace=1):
if line.startswith(' angle_coeff') and replaced is False:
print " bond_coeff "+str(nBondTypes+1)+" harmonic 5.63738 1.945000"
print " bond_coeff "+str(nBondTypes+2)+" harmonic 5.63738 2.116000"
print " bond_coeff "+str(nBondTypes+3)+" harmonic 5.63738 2.888000"
print " bond_coeff "+str(nBondTypes+4)+" harmonic 5.63738 2.775000"
print " bond_coeff "+str(nBondTypes+5)+" harmonic 5.63738 2.669000"
print " bond_coeff "+str(nBondTypes+6)+" harmonic 5.63738 2.971000"
replaced = True
print line,
#modifying (rewriting) the file lopls.data
# Adding the line "15 extra bond per atom" to allow more bonds per atom
for line in fileinput.input(name+".data", inplace=1):
print line,
if line.endswith('improper types\n'):
print "\t15 extra bond per atom"
|
JE1314/LAMMPS_builder
|
root/AddFe2O3.py
|
Python
|
gpl-3.0
| 4,567
|
[
"LAMMPS"
] |
dde6d0c124361da746d0691f1282eef3ced63c7ee3e7738a07d2d3cd02ab1aba
|
"""Configure file for PyMorph. Authors: Vinu Vikram, Yogesh Wadadekar and Ajit Kembhavi 2008"""
###----Specify the input images and Catalogues----###
imagefile = '00000002_r_stamp.fits'
whtfile = '00000002_r__r_W.fits' #The weight image. If it contains the
#string 'rms', this will treated as
#RMS_MAP and if it contains weight, then
#that will be treated as WEIGHT_MAP.
#If nothing found, then by default it
#is treated as MAP_RMS
sex_cata = 'sdss_sex.cat' #The sextractor catalogue which has
#the format given in the file
clus_cata = 'sdss_r.cat' #catalogue of galaxies from
#online catalogu service
#(name ra1 ra2 ra2 dec1 dec2 dec3)
datadir = '/home/vinu/scripts/pymorph-github/test_data/'
#the directory containing input images
#if commented out, then program uses
# current directory
###----Specify the output names of images and catalogues----###
out_cata = 'sdss_r_out.cat' #catalogue of galaxies in the field
rootname = 'r'
outdir = '/home/vinu/scripts/pymorph-github/test_data/'
#the directory containing output data
#if commented out, then program uses
# current directory
###----Psf list----###
psfselect = 0 #0 => No psfselection
#1 => Only Select psf
#2 => Select psf and run pipeline
#Recommended: Run with '1' and then run
#pipeline
starsize = 20 #psf image size will be startsize times
#the SMA given by SExtractor
#psflist = ['psf_1216382-1200443.fits', 'psf_1216408-1200251.fits', 'psf_1216424-1202057.fits','psf_1216487-1201246.fits','psf_1216504-1202104.fits']
psflist = '@psflist.list'
#List of psf containg their
#position information in the
#header (RA_TARG, DEC_TARG).
#Make psf with the names as here
#and use psf_header_update.py.
#It will update the header information.
mag_zero = 25.256 #magnitude zero point
###----Conditions for Masking----###
manual_mask = 0
mask_reg = 2.0
thresh_area = 0.2
threshold = 3.0 #Masking will be done for neighbours
#whose semimajor*threshold overlaps with
#threshold * semi-major axis of
#the object and area of the neighbour
#less than thresh_area * object area in
#sq.pixel.
#The masking will be for a circular
#region of radius mask_reg*semi-major
#axis of the nighbour with respect to
#the center of the neightbour.
###---Size of the cut out and search conditions---###
###---size = [resize?, varsize?, fracrad, square?, fixsize]---###
size = [0, 1, 9, 1, 120] #size of the stamp image
searchrad = '0.3arc' #The search radius
###----Parameters for calculating the physical parameters of galaxy----###
pixelscale = 0.39 #Pixel scale (arcsec/pixel)
H0 = 71 #Hubble parameter
WM = 0.27 #Omega matter
WV = 0.73 #Omega Lambda
###----Parameters to be set for calculating the CASGM----###
back_extraction_radius = 15.0
#back_ini_xcntr = 32.0
#back_ini_ycntr = 22.0
angle = 180.0
###----Fitting modes----###
repeat = False #Repeat the pipeline manually
galcut = True #True if we provide cutouts
decompose = False
detail = False #Detailed fitting
galfit = True #Always keep this True as it is not functional yet!
cas = True
findandfit = 0
maglim = [22, 15] #if findandfit= 1, then maglim = [faint_mag, bright_mag]
stargal = 0.8 #Star-galaxy classification
crashhandler = 0
###---Galfit Controls---###
components = ['bulge', 'disk'] #The components to be fitted to the objec
devauc = False # set to False to fit sersic bulge, set to true to fit devacouler's bulge (n = 4)
###---fixing = [bulge_center, disk_center, sky]
fitting = [1, 1, 1] # = 0, Fix params at SExtractor value
###----Set the SExtractor and GALFIT path here----###
GALFIT_PATH ='/home/vinu/software/galfit/galfit'
SEX_PATH = '/home/vinu/software/sextractor-2.5.0/sex/bin/sex'
PYMORPH_PATH = '/home/vinu/scripts/pymorph-github/pymorph'
galfitv = '2.0.2'
###----The following conditions are used to classify fit goo/bad----###
chi2sq = 2.5 #< chi2sq
Goodness = 0.60 #> Goodness
center_deviation = 3.0 #< abs(center - fitted center)
center_constrain = 2.0 #Keep center within +/- center_constrain
###----Database Informations----###
database = 'blank'
table = 'DevExp'
usr = 'sn_user'
pword = 'Kepler:1630'
dbparams = ['Morphology:DevExp', 'ObsID:1:int']
|
vvinuv/pymorph
|
test_data/config.py
|
Python
|
gpl-2.0
| 5,699
|
[
"Galaxy"
] |
b72459e197f4254d6b3df3a1bfb51082789d167e40d498575313fa45ba0a2928
|
import pysam
import csv
import sys
import collections
class Variant(object):
def __init__(self, original, new, refdict):
self.original = original
self.new = new
self.chrom = self.original['CHROM']
self.position = int(self.original['POS'])
self.coordinates = (self.chrom, self.position)
self.original_ref = self.original['REF']
self.new_ref = self.new['REF']
self.original_alt = self.original['ALT']
self.new_alt = self.new['ALT']
# Needed to remove these since this is a run with no filtering, meaning we
# don't know what is the real base
# self.real_reference_base = refdict[self.coordinates].reference
# self.real_alternate_base = refdict[self.coordinates].alternate
self.cutoffs = cos # cos is a global variable defined in the combine_snps function
def __repr__(self):
return 'Variant(original={}, new={})'.format(self.original, self.new)
def get_coverage_cutoffs(csv):
with open(csv, 'rb') as cf:
cutoff_reader = csv.reader(cf, delimiter=',')
next(cutoff_reader) # remove header
cutoffs = {int(row[0]): int(row[1]) for row in cutoff_reader}
return cutoffs
def get_new_row(self, orig_bam, new_bam):
# Added
if not (self.original_ref == self.new_alt and self.original_alt == self.new_ref):
return
start_pos = self.position - 1
end_pos = self.position
orig_refList = []
orig_altList = []
# These will correspond to the *real* reference and alternate bases,
# not the reversed ones in the new bam
new_refList = []
new_altList = []
orig_sam = pysam.Samfile(orig_bam, 'rb')
new_sam = pysam.Samfile(new_bam, 'rb')
for pileupcolumn in orig_sam.pileup(reference=self.chrom, start=start_pos, end=end_pos):
if pileupcolumn.pos == start_pos:
count = 0
bases = set()
for read in pileupcolumn.pileups:
if read.alignment.overlap(start_pos, end_pos) == 1:
count += 1
bases.add(read.alignment.seq[read.qpos])
quality = ord(read.alignment.qqual[read.qpos]) - 33
if read.alignment.seq[read.qpos] == self.original_ref and quality >= 20:
orig_refList.append(read.alignment.qname)
elif read.alignment.seq[read.qpos] == self.original_alt and quality >= 20:
orig_altList.append(read.alignment.qname)
# Check for three state SNPs
if len(bases) > 2:
return
# Check coverage
cutoff = self.cutoffs.get(count, 24)
if count >= 15 and len(orig_refList) >= cutoff and len(orig_altList) >= cutoff:
pass
else:
return
for pileupcolumn in new_sam.pileup(reference=self.chrom, start=start_pos, end=end_pos):
if pileupcolumn.pos == start_pos:
count = 0
bases = set()
for read in pileupcolumn.pileups:
if read.alignment.overlap(start_pos, end_pos) == 1:
count += 1
bases.add(read.alignment.seq[read.qpos])
quality = ord(read.alignment.qqual[read.qpos]) - 33
if read.alignment.seq[read.qpos] == self.new_ref and quality >= 20:
new_altList.append(read.alignment.qname)
elif read.alignment.seq[read.qpos] == self.new_alt and quality >= 20:
new_refList.append(read.alignment.qname)
# Check for three state SNPs
if len(bases) > 2:
return
# Check coverage
cutoff = self.cutoffs.get(count, 24)
if count >= 15 and len(new_refList) >= cutoff and len(new_altList) >= cutoff:
pass
else:
return
orig_sam.close()
new_sam.close()
reference_depth = len(set(orig_refList + new_refList))
alternate_depth = len(set(orig_altList + new_altList))
newrow = {'CHROM': self.chrom,
'POS': self.position,
'REF': self.original_ref,
'ALT': self.original_alt,
'R_Depth': reference_depth,
'A_Depth': alternate_depth}
# print 'orig_refList:', orig_refList
# print 'orig_altList:', orig_altList
# print 'new_refList:', new_refList
# print 'new_altList:', new_altList
return newrow
def combine_SNPs(orig_f, new_f, orig_bam, new_bam, ref_vcf, output_f, cutoff_table):
def get_coverage_cutoffs(cutoff_table):
with open(cutoff_table, 'rb') as cf:
cutoff_reader = csv.reader(cf, delimiter=',')
next(cutoff_reader) # remove header
cutoffs = {int(row[0]): int(row[1]) for row in cutoff_reader}
return cutoffs
global cos
cos = get_coverage_cutoffs(cutoff_table)
with open(ref_vcf, 'rb') as ref_vcf:
ref_reader = csv.reader(ref_vcf, delimiter='\t')
def get_single_base_positions(reader):
for row in reader:
if len(row[3]) == 1 and len(row[4]) == 1:
yield [row[0], int(row[1]), row[3], row[4]] # chrom, pos, ref, alt
else:
assert len(row[3]) == len(row[4])
position = int(row[1])
for refbase, altbase in zip(row[3], row[4]):
yield [row[0], position, refbase, altbase]
position += 1
Ref_tup = collections.namedtuple('Ref_tup', ['reference', 'alternate'])
# Dictionary containing the coordinates and the ref and alt bases from the reference vcf,
# the known SNPs file for filtering.
ref_dict = {(row[0], row[1]): Ref_tup(row[2], row[3]) for row in
get_single_base_positions(ref_reader)}
with open(orig_f, 'rb') as of, open(new_f, 'rb') as nf:
fields = ('CHROM', 'POS', 'REF', 'ALT', 'RD', 'AD',
'gene_id', 'exon_number', 'gene_name')
oreader = csv.DictReader(of, fields, delimiter='\t')
nreader = csv.DictReader(nf, fields, delimiter='\t')
orig_row_holder = {(row['CHROM'], row['POS']): row for row in oreader}
new_row_holder = {(row['CHROM'], row['POS']): row for row in nreader}
variants = []
for coord in orig_row_holder:
if coord in new_row_holder:
v = Variant(original=orig_row_holder[coord],
new=new_row_holder[coord],
refdict=ref_dict)
variants.append(v)
with open(output_f, 'wb') as fout:
fields = ('CHROM', 'POS', 'REF', 'ALT', 'R_Depth', 'A_Depth')
writer = csv.DictWriter(fout, fields, delimiter='\t', lineterminator='\n')
writer.writerow({field: field for field in fields})
for count, var in enumerate(variants, 1):
if count % 10000 == 0:
print 'rows examined:', count
newrow = var.get_new_row(orig_bam, new_bam)
if newrow:
writer.writerow(newrow)
# orig_f = '/scratch/Drew/testdir/original/16_A12_pUn_down/16_A12_pUn_down_INTER_py.csv'
# new_f = '/scratch/Drew/testdir/alternate/16_A12_pUn_down/16_A12_pUn_down_INTER_py.csv'
# output_f = 'snpstest.csv'
# orig_bam = '/scratch/Drew/testdir/original/16_A12_pUn_down/16_A12_pUn_down_thout/filter.bam'
# alt_bam = '/scratch/Drew/testdir/alternate/16_A12_pUn_down/16_A12_pUn_down_thout/filter.bam'
# combine_SNPs(orig_f, new_f, orig_bam, alt_bam, output_f)
def quick_mean_propR(input_f):
with open(input_f, 'rb') as f:
reader = csv.DictReader(f, delimiter='\t')
propRs = []
for i, row in enumerate(reader, 1):
ref_depth = float(row['R_Depth'])
alt_depth = float(row['A_Depth'])
if ref_depth != 0 and alt_depth != 0:
propR = ref_depth/(ref_depth + alt_depth)
propRs.append(propR)
mean_propR = sum(propRs)/len(propRs)
return (mean_propR, i)
# quick_mean_propR('snps.vcf')
# i = 0
# printcount = 0
# while True and printcount < 2:
# row = variants[i].get_new_row(orig_bam, new_bam)
# if row:
# print variants[i]
# print row
# print i
# printcount += 1
# i += 1
# sys.exit(0)
|
d-quinn/bio_quinn2013
|
SNP_calling/no_filter/snps_combine.py
|
Python
|
mit
| 8,790
|
[
"pysam"
] |
7dc2d291ab7d8f72e878ae0182bcc1ff03fd36ac8b12d94f965ed6e55b6831f9
|
# minimal.py ---
# Upi Bhalla, NCBS Bangalore 2014.
#
# Commentary:
#
# Minimal model for loading rdesigneur: reac-diff elec signaling in neurons
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
# Code:
import sys
sys.path.append('../../python')
import os
os.environ['NUMPTHREADS'] = '1'
import math
import numpy
import matplotlib.pyplot as plt
import moose
import proto18
EREST_ACT = -70e-3
def loadElec():
library = moose.Neutral( '/library' )
moose.setCwe( '/library' )
proto18.make_Ca()
proto18.make_Ca_conc()
proto18.make_K_AHP()
proto18.make_K_C()
proto18.make_Na()
proto18.make_K_DR()
proto18.make_K_A()
proto18.make_glu()
proto18.make_NMDA()
proto18.make_Ca_NMDA()
proto18.make_NMDA_Ca_conc()
proto18.make_axon()
moose.setCwe( '/library' )
model = moose.Neutral( '/model' )
cellId = moose.loadModel( 'mincell2.p', '/model/elec', "Neutral" )
return cellId
def loadChem( diffLength ):
chem = moose.Neutral( '/model/chem' )
neuroCompt = moose.NeuroMesh( '/model/chem/kinetics' )
neuroCompt.separateSpines = 1
neuroCompt.geometryPolicy = 'cylinder'
spineCompt = moose.SpineMesh( '/model/chem/compartment_1' )
moose.connect( neuroCompt, 'spineListOut', spineCompt, 'spineList', 'OneToOne' )
psdCompt = moose.PsdMesh( '/model/chem/compartment_2' )
#print 'Meshvolume[neuro, spine, psd] = ', neuroCompt.mesh[0].volume, spineCompt.mesh[0].volume, psdCompt.mesh[0].volume
moose.connect( neuroCompt, 'psdListOut', psdCompt, 'psdList', 'OneToOne' )
modelId = moose.loadModel( 'minimal.g', '/model/chem', 'ee' )
neuroCompt.name = 'dend'
spineCompt.name = 'spine'
psdCompt.name = 'psd'
def makeNeuroMeshModel():
diffLength = 6e-6 # Aim for 2 soma compartments.
elec = loadElec()
loadChem( diffLength )
neuroCompt = moose.element( '/model/chem/dend' )
neuroCompt.diffLength = diffLength
neuroCompt.cellPortion( elec, '/model/elec/#' )
for x in moose.wildcardFind( '/model/chem/##[ISA=PoolBase]' ):
if (x.diffConst > 0):
x.diffConst = 1e-11
for x in moose.wildcardFind( '/model/chem/##/Ca' ):
x.diffConst = 1e-10
# Put in dend solvers
ns = neuroCompt.numSegments
ndc = neuroCompt.numDiffCompts
print 'ns = ', ns, ', ndc = ', ndc
assert( neuroCompt.numDiffCompts == neuroCompt.mesh.num )
assert( ns == 1 ) # soma/dend only
assert( ndc == 2 ) # split into 2.
nmksolve = moose.Ksolve( '/model/chem/dend/ksolve' )
nmdsolve = moose.Dsolve( '/model/chem/dend/dsolve' )
nmstoich = moose.Stoich( '/model/chem/dend/stoich' )
nmstoich.compartment = neuroCompt
nmstoich.ksolve = nmksolve
nmstoich.dsolve = nmdsolve
nmstoich.path = "/model/chem/dend/##"
print 'done setting path, numPools = ', nmdsolve.numPools
assert( nmdsolve.numPools == 1 )
assert( nmdsolve.numAllVoxels == 2 )
assert( nmstoich.numAllPools == 1 )
# oddly, numLocalFields does not work.
ca = moose.element( '/model/chem/dend/DEND/Ca' )
assert( ca.numData == ndc )
# Put in spine solvers. Note that these get info from the neuroCompt
spineCompt = moose.element( '/model/chem/spine' )
sdc = spineCompt.mesh.num
print 'sdc = ', sdc
assert( sdc == 1 )
smksolve = moose.Ksolve( '/model/chem/spine/ksolve' )
smdsolve = moose.Dsolve( '/model/chem/spine/dsolve' )
smstoich = moose.Stoich( '/model/chem/spine/stoich' )
smstoich.compartment = spineCompt
smstoich.ksolve = smksolve
smstoich.dsolve = smdsolve
smstoich.path = "/model/chem/spine/##"
assert( smstoich.numAllPools == 3 )
assert( smdsolve.numPools == 3 )
assert( smdsolve.numAllVoxels == 1 )
# Put in PSD solvers. Note that these get info from the neuroCompt
psdCompt = moose.element( '/model/chem/psd' )
pdc = psdCompt.mesh.num
assert( pdc == 1 )
pmksolve = moose.Ksolve( '/model/chem/psd/ksolve' )
pmdsolve = moose.Dsolve( '/model/chem/psd/dsolve' )
pmstoich = moose.Stoich( '/model/chem/psd/stoich' )
pmstoich.compartment = psdCompt
pmstoich.ksolve = pmksolve
pmstoich.dsolve = pmdsolve
pmstoich.path = "/model/chem/psd/##"
assert( pmstoich.numAllPools == 3 )
assert( pmdsolve.numPools == 3 )
assert( pmdsolve.numAllVoxels == 1 )
foo = moose.element( '/model/chem/psd/Ca' )
print 'PSD: numfoo = ', foo.numData
print 'PSD: numAllVoxels = ', pmksolve.numAllVoxels
# Put in junctions between the diffusion solvers
nmdsolve.buildNeuroMeshJunctions( smdsolve, pmdsolve )
"""
CaNpsd = moose.vec( '/model/chem/psdMesh/PSD/PP1_PSD/CaN' )
print 'numCaN in PSD = ', CaNpsd.nInit, ', vol = ', CaNpsd.volume
CaNspine = moose.vec( '/model/chem/spine/SPINE/CaN_BULK/CaN' )
print 'numCaN in spine = ', CaNspine.nInit, ', vol = ', CaNspine.volume
"""
# set up adaptors
aCa = moose.Adaptor( '/model/chem/dend/DEND/adaptCa', ndc )
adaptCa = moose.vec( '/model/chem/dend/DEND/adaptCa' )
chemCa = moose.vec( '/model/chem/dend/DEND/Ca' )
print 'aCa = ', aCa, ' foo = ', foo, "len( ChemCa ) = ", len( chemCa ), ", numData = ", chemCa.numData, "len( adaptCa ) = ", len( adaptCa )
assert( len( adaptCa ) == ndc )
assert( len( chemCa ) == ndc )
path = '/model/elec/soma/Ca_conc'
elecCa = moose.element( path )
print "=========="
print elecCa
print adaptCa
print chemCa
moose.connect( elecCa, 'concOut', adaptCa[0], 'input', 'Single' )
moose.connect( adaptCa, 'output', chemCa, 'setConc', 'OneToOne' )
adaptCa.inputOffset = 0.0 #
adaptCa.outputOffset = 0.00008 # 80 nM offset in chem.
adaptCa.scale = 1e-3 # 520 to 0.0052 mM
#print adaptCa.outputOffset
#print adaptCa.scale
def addPlot( objpath, field, plot ):
#assert moose.exists( objpath )
if moose.exists( objpath ):
tab = moose.Table( '/graphs/' + plot )
obj = moose.element( objpath )
if obj.className == 'Neutral':
print "addPlot failed: object is a Neutral: ", objpath
return moose.element( '/' )
else:
#print "object was found: ", objpath, obj.className
moose.connect( tab, 'requestOut', obj, field )
return tab
else:
print "addPlot failed: object not found: ", objpath
return moose.element( '/' )
def makeElecPlots():
graphs = moose.Neutral( '/graphs' )
elec = moose.Neutral( '/graphs/elec' )
addPlot( '/model/elec/soma', 'getVm', 'elec/somaVm' )
addPlot( '/model/elec/spine_head', 'getVm', 'elec/spineVm' )
addPlot( '/model/elec/soma/Ca_conc', 'getCa', 'elec/somaCa' )
def makeChemPlots():
graphs = moose.Neutral( '/graphs' )
chem = moose.Neutral( '/graphs/chem' )
addPlot( '/model/chem/psd/Ca_CaM', 'getConc', 'chem/psdCaCam' )
addPlot( '/model/chem/psd/Ca', 'getConc', 'chem/psdCa' )
addPlot( '/model/chem/spine/Ca_CaM', 'getConc', 'chem/spineCaCam' )
addPlot( '/model/chem/spine/Ca', 'getConc', 'chem/spineCa' )
addPlot( '/model/chem/dend/DEND/Ca', 'getConc', 'chem/dendCa' )
def testNeuroMeshMultiscale():
elecDt = 50e-6
chemDt = 0.01
ePlotDt = 0.5e-3
cPlotDt = 0.01
plotName = 'nm.plot'
makeNeuroMeshModel()
print "after model is completely done"
for i in moose.wildcardFind( '/model/chem/#/#/#/transloc#' ):
print i[0].name, i[0].Kf, i[0].Kb, i[0].kf, i[0].kb
"""
for i in moose.wildcardFind( '/model/chem/##[ISA=PoolBase]' ):
if ( i[0].diffConst > 0 ):
grandpaname = i.parent[0].parent.name + '/'
paname = i.parent[0].name + '/'
print grandpaname + paname + i[0].name, i[0].diffConst
print 'Neighbors:'
for t in moose.element( '/model/chem/spine/ksolve/junction' ).neighbors['masterJunction']:
print 'masterJunction <-', t.path
for t in moose.wildcardFind( '/model/chem/#/ksolve' ):
k = moose.element( t[0] )
print k.path + ' localVoxels=', k.numLocalVoxels, ', allVoxels= ', k.numAllVoxels
"""
'''
moose.useClock( 4, '/model/chem/dend/dsolve', 'process' )
moose.useClock( 5, '/model/chem/dend/ksolve', 'process' )
moose.useClock( 5, '/model/chem/spine/ksolve', 'process' )
moose.useClock( 5, '/model/chem/psd/ksolve', 'process' )
'''
makeChemPlots()
makeElecPlots()
moose.setClock( 0, elecDt )
moose.setClock( 1, elecDt )
moose.setClock( 2, elecDt )
moose.setClock( 4, chemDt )
moose.setClock( 5, chemDt )
moose.setClock( 6, chemDt )
moose.setClock( 7, cPlotDt )
moose.setClock( 8, ePlotDt )
moose.useClock( 0, '/model/elec/##[ISA=Compartment]', 'init' )
moose.useClock( 1, '/model/elec/##[ISA=Compartment]', 'process' )
moose.useClock( 1, '/model/elec/##[ISA=SpikeGen]', 'process' )
moose.useClock( 2, '/model/elec/##[ISA=ChanBase],/model/##[ISA=SynBase],/model/##[ISA=CaConc]','process')
#moose.useClock( 5, '/model/chem/##[ISA=PoolBase],/model/##[ISA=ReacBase],/model/##[ISA=EnzBase]', 'process' )
#moose.useClock( 4, '/model/chem/##[ISA=Adaptor]', 'process' )
moose.useClock( 4, '/model/chem/#/dsolve', 'process' )
moose.useClock( 5, '/model/chem/#/ksolve', 'process' )
moose.useClock( 6, '/model/chem/dend/DEND/adaptCa', 'process' )
moose.useClock( 7, '/graphs/chem/#', 'process' )
moose.useClock( 8, '/graphs/elec/#', 'process' )
#hsolve = moose.HSolve( '/model/elec/hsolve' )
#moose.useClock( 1, '/model/elec/hsolve', 'process' )
#hsolve.dt = elecDt
#hsolve.target = '/model/elec/compt'
#moose.reinit()
moose.element( '/model/elec/spine_head' ).inject = 5e-12
moose.element( '/model/chem/psd/Ca' ).concInit = 0.001
moose.element( '/model/chem/spine/Ca' ).concInit = 0.002
moose.element( '/model/chem/dend/DEND/Ca' ).concInit = 0.003
moose.reinit()
"""
print 'pre'
eca = moose.vec( '/model/chem/psd/PSD/CaM/Ca' )
for i in range( 3 ):
print eca[i].concInit, eca[i].conc, eca[i].nInit, eca[i].n, eca[i].volume
print 'dend'
eca = moose.vec( '/model/chem/dend/DEND/Ca' )
#for i in ( 0, 1, 2, 30, 60, 90, 120, 144 ):
for i in range( 13 ):
print i, eca[i].concInit, eca[i].conc, eca[i].nInit, eca[i].n, eca[i].volume
print 'PSD'
eca = moose.vec( '/model/chem/psd/PSD/CaM/Ca' )
for i in range( 3 ):
print eca[i].concInit, eca[i].conc, eca[i].nInit, eca[i].n, eca[i].volume
print 'spine'
eca = moose.vec( '/model/chem/spine/SPINE/CaM/Ca' )
for i in range( 3 ):
print eca[i].concInit, eca[i].conc, eca[i].nInit, eca[i].n, eca[i].volume
"""
moose.start( 0.5 )
plt.ion()
fig = plt.figure( figsize=(8,8) )
chem = fig.add_subplot( 211 )
chem.set_ylim( 0, 0.004 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'time (seconds)' )
for x in moose.wildcardFind( '/graphs/chem/#[ISA=Table]' ):
pos = numpy.arange( 0, x.vector.size, 1 ) * cPlotDt
line1, = chem.plot( pos, x.vector, label=x.name )
plt.legend()
elec = fig.add_subplot( 212 )
plt.ylabel( 'Vm (V)' )
plt.xlabel( 'time (seconds)' )
for x in moose.wildcardFind( '/graphs/elec/#[ISA=Table]' ):
pos = numpy.arange( 0, x.vector.size, 1 ) * ePlotDt
line1, = elec.plot( pos, x.vector, label=x.name )
plt.legend()
fig.canvas.draw()
raw_input()
'''
for x in moose.wildcardFind( '/graphs/##[ISA=Table]' ):
t = numpy.arange( 0, x.vector.size, 1 )
pylab.plot( t, x.vector, label=x.name )
pylab.legend()
pylab.show()
'''
pylab.show()
print 'All done'
def main():
testNeuroMeshMultiscale()
if __name__ == '__main__':
main()
#
# minimal.py ends here.
|
dilawar/moose-full
|
moose-examples/snippets/MULTI/minchan.py
|
Python
|
gpl-2.0
| 12,176
|
[
"MOOSE"
] |
75d899272449ed9e587207b896ecb0d3befb78c3eff608d5fba18a9ae71e6265
|
"""
Tests i18n in courseware
"""
import re
from nose.plugins.attrib import attr
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse, NoReverseMatch
from django.test import TestCase
from django.test.client import Client
from django.utils import translation
from dark_lang.models import DarkLangConfig
from lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.user_api.preferences.api import set_user_preference
from student.tests.factories import UserFactory, RegistrationFactory, UserProfileFactory
class BaseI18nTestCase(TestCase):
"""
Base utilities for i18n test classes to derive from
"""
def setUp(self):
super(BaseI18nTestCase, self).setUp()
self.addCleanup(translation.deactivate)
def assert_tag_has_attr(self, content, tag, attname, value):
"""Assert that a tag in `content` has a certain value in a certain attribute."""
regex = r"""<{tag} [^>]*\b{attname}=['"]([\w\d\- ]+)['"][^>]*>""".format(tag=tag, attname=attname)
match = re.search(regex, content)
self.assertTrue(match, "Couldn't find desired tag '%s' with attr '%s' in %r" % (tag, attname, content))
attvalues = match.group(1).split()
self.assertIn(value, attvalues)
def release_languages(self, languages):
"""
Release a set of languages using the dark lang interface.
languages is a list of comma-separated lang codes, eg, 'ar, es-419'
"""
user = User()
user.save()
DarkLangConfig(
released_languages=languages,
changed_by=user,
enabled=True
).save()
@attr('shard_1')
class I18nTestCase(BaseI18nTestCase):
"""
Tests for i18n
"""
def test_default_is_en(self):
self.release_languages('fr')
response = self.client.get('/')
self.assert_tag_has_attr(response.content, "html", "lang", "en")
self.assertEqual(response['Content-Language'], 'en')
self.assert_tag_has_attr(response.content, "body", "class", "lang_en")
def test_esperanto(self):
self.release_languages('fr, eo')
response = self.client.get('/', HTTP_ACCEPT_LANGUAGE='eo')
self.assert_tag_has_attr(response.content, "html", "lang", "eo")
self.assertEqual(response['Content-Language'], 'eo')
self.assert_tag_has_attr(response.content, "body", "class", "lang_eo")
def test_switching_languages_bidi(self):
self.release_languages('ar, eo')
response = self.client.get('/')
self.assert_tag_has_attr(response.content, "html", "lang", "en")
self.assertEqual(response['Content-Language'], 'en')
self.assert_tag_has_attr(response.content, "body", "class", "lang_en")
self.assert_tag_has_attr(response.content, "body", "class", "ltr")
response = self.client.get('/', HTTP_ACCEPT_LANGUAGE='ar')
self.assert_tag_has_attr(response.content, "html", "lang", "ar")
self.assertEqual(response['Content-Language'], 'ar')
self.assert_tag_has_attr(response.content, "body", "class", "lang_ar")
self.assert_tag_has_attr(response.content, "body", "class", "rtl")
@attr('shard_1')
class I18nRegressionTests(BaseI18nTestCase):
"""
Tests for i18n
"""
def test_es419_acceptance(self):
# Regression test; LOC-72, and an issue with Django
self.release_languages('es-419')
response = self.client.get('/', HTTP_ACCEPT_LANGUAGE='es-419')
self.assert_tag_has_attr(response.content, "html", "lang", "es-419")
def test_unreleased_lang_resolution(self):
# Regression test; LOC-85
self.release_languages('fa')
# We've released 'fa', AND we have language files for 'fa-ir' but
# we want to keep 'fa-ir' as a dark language. Requesting 'fa-ir'
# in the http request (NOT with the ?preview-lang query param) should
# receive files for 'fa'
response = self.client.get('/', HTTP_ACCEPT_LANGUAGE='fa-ir')
self.assert_tag_has_attr(response.content, "html", "lang", "fa")
# Now try to access with dark lang
response = self.client.get('/?preview-lang=fa-ir')
self.assert_tag_has_attr(response.content, "html", "lang", "fa-ir")
def test_preview_lang(self):
# Regression test; LOC-87
self.release_languages('es-419')
site_lang = settings.LANGUAGE_CODE
# Visit the front page; verify we see site default lang
response = self.client.get('/')
self.assert_tag_has_attr(response.content, "html", "lang", site_lang)
# Verify we can switch language using the preview-lang query param
response = self.client.get('/?preview-lang=eo')
self.assert_tag_has_attr(response.content, "html", "lang", "eo")
# We should be able to see released languages using preview-lang, too
response = self.client.get('/?preview-lang=es-419')
self.assert_tag_has_attr(response.content, "html", "lang", "es-419")
# Clearing the language should go back to site default
response = self.client.get('/?clear-lang')
self.assert_tag_has_attr(response.content, "html", "lang", site_lang)
@attr('shard_1')
class I18nLangPrefTests(BaseI18nTestCase):
"""
Regression tests of language presented to the user, when they
choose a language preference, and when they have a preference
and use the dark lang preview functionality.
"""
def setUp(self):
super(I18nLangPrefTests, self).setUp()
# Create one user and save it to the database
email = 'test@edx.org'
pwd = 'test_password'
self.user = UserFactory.build(username='test', email=email)
self.user.set_password(pwd)
self.user.save()
# Create a registration for the user
RegistrationFactory(user=self.user)
# Create a profile for the user
UserProfileFactory(user=self.user)
# Create the test client
self.client = Client()
# Get the login url & log in our user
try:
login_url = reverse('login_post')
except NoReverseMatch:
login_url = reverse('login')
self.client.post(login_url, {'email': email, 'password': pwd})
# Url and site lang vars for tests to use
self.url = reverse('dashboard')
self.site_lang = settings.LANGUAGE_CODE
def test_lang_preference(self):
# Regression test; LOC-87
self.release_languages('ar, es-419')
# Visit the front page; verify we see site default lang
response = self.client.get(self.url)
self.assert_tag_has_attr(response.content, "html", "lang", self.site_lang)
# Set user language preference
set_user_preference(self.user, LANGUAGE_KEY, 'ar')
# and verify we now get an ar response
response = self.client.get(self.url)
self.assert_tag_has_attr(response.content, "html", "lang", 'ar')
# Verify that switching language preference gives the right language
set_user_preference(self.user, LANGUAGE_KEY, 'es-419')
response = self.client.get(self.url)
self.assert_tag_has_attr(response.content, "html", "lang", 'es-419')
def test_preview_precedence(self):
# Regression test; LOC-87
self.release_languages('ar, es-419')
# Set user language preference
set_user_preference(self.user, LANGUAGE_KEY, 'ar')
# Verify preview-lang takes precedence
response = self.client.get('{}?preview-lang=eo'.format(self.url))
self.assert_tag_has_attr(response.content, "html", "lang", 'eo')
# Hitting another page should keep the dark language set.
response = self.client.get(reverse('courses'))
self.assert_tag_has_attr(response.content, "html", "lang", "eo")
# Clearing language must set language back to preference language
response = self.client.get('{}?clear-lang'.format(self.url))
self.assert_tag_has_attr(response.content, "html", "lang", 'ar')
|
Learningtribes/edx-platform
|
lms/djangoapps/courseware/tests/test_i18n.py
|
Python
|
agpl-3.0
| 8,118
|
[
"VisIt"
] |
15127ccc304ac67a8b482547b1005506ac48166d9d48b9029c3429e45bea7057
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import range
from six import BytesIO
from tempfile import NamedTemporaryFile, mkdtemp
from os.path import exists, join
from unittest import TestCase, main
from shutil import rmtree
from uuid import uuid4
from skbio.util import (cardinal_to_ordinal, safe_md5, remove_files,
create_dir, find_duplicates, flatten,
is_casava_v180_or_later)
from skbio.util._misc import _handle_error_codes, MiniRegistry, reprnator
class TestMiniRegistry(TestCase):
def setUp(self):
self.registry = MiniRegistry()
def test_decoration(self):
self.assertNotIn("name1", self.registry)
self.assertNotIn("name2", self.registry)
self.n1_called = False
self.n2_called = False
@self.registry("name1")
def some_registration1():
self.n1_called = True
@self.registry("name2")
def some_registration2():
self.n2_called = True
self.assertIn("name1", self.registry)
self.assertEqual(some_registration1, self.registry["name1"])
self.assertIn("name2", self.registry)
self.assertEqual(some_registration2, self.registry["name2"])
self.registry["name1"]()
self.assertTrue(self.n1_called)
self.registry["name2"]()
self.assertTrue(self.n2_called)
def test_copy(self):
@self.registry("name")
def some_registration():
pass
new = self.registry.copy()
self.assertIsNot(new, self.registry)
@new("other")
def other_registration():
pass
self.assertIn("name", self.registry)
self.assertNotIn("other", self.registry)
self.assertIn("other", new)
self.assertIn("name", new)
def test_everything(self):
class SomethingToInterpolate(object):
def interpolate_me():
"""First line
Some description of things, also this:
Other things are happening now.
"""
def dont_interpolate_me():
"""First line
Some description of things, also this:
Other things are happening now.
"""
class Subclass(SomethingToInterpolate):
pass
@self.registry("a")
def a():
"""x"""
@self.registry("b")
def b():
"""y"""
@self.registry("c")
def c():
"""z"""
subclass_registry = self.registry.copy()
@subclass_registry("o")
def o():
"""p"""
self.registry.interpolate(SomethingToInterpolate, "interpolate_me")
subclass_registry.interpolate(Subclass, "interpolate_me")
self.assertEqual(SomethingToInterpolate.interpolate_me.__doc__,
"First line\n\n Some description of th"
"ings, also this:\n\n\t'a'\n\t x\n\t'b'\n\t y\n\t'c"
"'\n\t z\n\n Other things are happeni"
"ng now.\n ")
self.assertEqual(SomethingToInterpolate.dont_interpolate_me.__doc__,
"First line\n\n Some description of th"
"ings, also this:\n\n Other things are"
" happening now.\n ")
self.assertEqual(Subclass.interpolate_me.__doc__,
"First line\n\n Some description of th"
"ings, also this:\n\n\t'a'\n\t x\n\t'b'\n\t y\n\t'c"
"'\n\t z\n\t'o'\n\t p\n\n Other thin"
"gs are happening now.\n ")
self.assertEqual(Subclass.dont_interpolate_me.__doc__,
"First line\n\n Some description of th"
"ings, also this:\n\n Other things are"
" happening now.\n ")
class MiscTests(TestCase):
def setUp(self):
self.dirs_to_remove = []
def tearDown(self):
for element in self.dirs_to_remove:
rmtree(element)
def test_is_casava_v180_or_later(self):
self.assertFalse(is_casava_v180_or_later(b'@foo'))
id_ = b'@M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0'
self.assertTrue(is_casava_v180_or_later(id_))
with self.assertRaises(ValueError):
is_casava_v180_or_later(b'foo')
def test_safe_md5(self):
exp = 'ab07acbb1e496801937adfa772424bf7'
fd = BytesIO(b'foo bar baz')
obs = safe_md5(fd)
self.assertEqual(obs.hexdigest(), exp)
fd.close()
def test_remove_files(self):
# create list of temp file paths
test_fds = [NamedTemporaryFile(delete=False) for i in range(5)]
test_filepaths = [element.name for element in test_fds]
# should work just fine
remove_files(test_filepaths)
# check that an error is raised on trying to remove the files...
self.assertRaises(OSError, remove_files, test_filepaths)
# touch one of the filepaths so it exists
extra_file = NamedTemporaryFile(delete=False).name
test_filepaths.append(extra_file)
# no error is raised on trying to remove the files
# (although 5 don't exist)...
remove_files(test_filepaths, error_on_missing=False)
# ... and the existing file was removed
self.assertFalse(exists(extra_file))
# try to remove them with remove_files and verify that an IOError is
# raises
self.assertRaises(OSError, remove_files, test_filepaths)
# now get no error when error_on_missing=False
remove_files(test_filepaths, error_on_missing=False)
def test_create_dir(self):
# create a directory
tmp_dir_path = mkdtemp()
# create a random temporary directory name
tmp_dir_path2 = join(mkdtemp(), str(uuid4()))
tmp_dir_path3 = join(mkdtemp(), str(uuid4()))
self.dirs_to_remove += [tmp_dir_path, tmp_dir_path2, tmp_dir_path3]
# create on existing dir raises OSError if fail_on_exist=True
self.assertRaises(OSError, create_dir, tmp_dir_path,
fail_on_exist=True)
self.assertEqual(create_dir(tmp_dir_path, fail_on_exist=True,
handle_errors_externally=True), 1)
# return should be 1 if dir exist and fail_on_exist=False
self.assertEqual(create_dir(tmp_dir_path, fail_on_exist=False), 1)
# if dir not there make it and return always 0
self.assertEqual(create_dir(tmp_dir_path2), 0)
self.assertEqual(create_dir(tmp_dir_path3, fail_on_exist=True), 0)
def test_handle_error_codes_no_error(self):
obs = _handle_error_codes('/foo/bar/baz')
self.assertEqual(obs, 0)
def test_flatten(self):
self.assertEqual(flatten(['aa', 'bb', 'cc']), list('aabbcc'))
self.assertEqual(flatten([1, [2, 3], [[4, [5]]]]), [1, 2, 3, [4, [5]]])
class CardinalToOrdinalTests(TestCase):
def test_valid_range(self):
# taken and modified from http://stackoverflow.com/a/20007730/3776794
exp = ['0th', '1st', '2nd', '3rd', '4th', '5th', '6th', '7th', '8th',
'9th', '10th', '11th', '12th', '13th', '14th', '15th', '16th',
'17th', '18th', '19th', '20th', '21st', '22nd', '23rd', '24th',
'25th', '26th', '27th', '28th', '29th', '30th', '31st', '32nd',
'100th', '101st', '42042nd']
obs = [cardinal_to_ordinal(n) for n in
list(range(0, 33)) + [100, 101, 42042]]
self.assertEqual(obs, exp)
def test_invalid_n(self):
with self.assertRaisesRegexp(ValueError, '-1'):
cardinal_to_ordinal(-1)
class TestReprnator(TestCase):
def test_no_tokens(self):
self.assertEqual(reprnator("$START$", [], "#END#"), "$START$#END#")
def test_one_line(self):
self.assertEqual(reprnator("$START$", ["bill"], "#END#"),
"$START$bill#END#")
self.assertEqual(reprnator("$START$", ["bill", "bob"], "#END#"),
"$START$bill, bob#END#")
def test_overflow(self):
tokens = [
"ABCDEF",
"HIGJKL",
"MNOPQR",
"STUVWX",
"YZ",
]
self.assertEqual(reprnator("$START$", tokens * 4, "#END#"),
'$START$ABCDEF, HIGJKL, MNOPQR, STUVWX, YZ, ABCDEF, H'
'IGJKL, MNOPQR, STUVWX, YZ, \n ABCDEF, HIGJKL, '
'MNOPQR, STUVWX, YZ, ABCDEF, HIGJKL, MNOPQR, STUVWX, '
'YZ\n #END#')
self.assertEqual(reprnator("$START$", tokens * 3, "#END#"),
'$START$ABCDEF, HIGJKL, MNOPQR, STUVWX, YZ, ABCDEF, H'
'IGJKL, MNOPQR, STUVWX, YZ, \n ABCDEF, HIGJKL, '
'MNOPQR, STUVWX, YZ#END#')
def test_seperator(self):
self.assertEqual(reprnator("", list("abc"), "", separator='|'),
"a|b|c")
class TestFindDuplicates(TestCase):
def test_empty_input(self):
def empty_gen():
raise StopIteration()
yield
for empty in [], (), '', set(), {}, empty_gen():
self.assertEqual(find_duplicates(empty), set())
def test_no_duplicates(self):
self.assertEqual(find_duplicates(['a', 'bc', 'def', 'A']), set())
def test_one_duplicate(self):
self.assertEqual(find_duplicates(['a', 'bc', 'def', 'a']), set(['a']))
def test_many_duplicates(self):
self.assertEqual(find_duplicates(['a', 'bc', 'bc', 'def', 'a']),
set(['a', 'bc']))
def test_all_duplicates(self):
self.assertEqual(
find_duplicates(('a', 'bc', 'bc', 'def', 'a', 'def', 'def')),
set(['a', 'bc', 'def']))
def test_mixed_types(self):
def gen():
for e in 'a', 1, 'bc', 2, 'a', 2, 2, 3.0:
yield e
self.assertEqual(find_duplicates(gen()), set(['a', 2]))
if __name__ == '__main__':
main()
|
jensreeder/scikit-bio
|
skbio/util/tests/test_misc.py
|
Python
|
bsd-3-clause
| 10,757
|
[
"scikit-bio"
] |
90f730608ef2b847d609ec5fb2a1fe5fc720c7ac828eb02990f7a08b9389d8d4
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
import abc
import re
import os
import glob
import shutil
import warnings
from itertools import chain
from copy import deepcopy
import six
import numpy as np
from monty.serialization import loadfn
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.inputs import Incar, Poscar, Potcar, Kpoints
from pymatgen.io.vasp.outputs import Vasprun, Outcar
from monty.json import MSONable
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.symmetry.bandstructure import HighSymmKpath
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.core.sites import PeriodicSite
"""
This module defines the VaspInputSet abstract base class and a concrete
implementation for the parameters developed and tested by the core team
of pymatgen, including the Materials Virtual Lab, Materials Project and the MIT
high throughput project. The basic concept behind an input set is to specify
a scheme to generate a consistent set of VASP inputs from a structure
without further user intervention. This ensures comparability across
runs.
Read the following carefully before implementing new input sets:
1. 99% of what needs to be done can be done by specifying user_incar_settings
to override some of the defaults of various input sets. Unless there is an
extremely good reason to add a new set, DO NOT add one. E.g., if you want
to turn the hubbard U off, just set "LDAU": False as a user_incar_setting.
2. All derivative input sets should inherit from one of the usual MPRelaxSet or
MITRelaxSet, and proper superclass delegation should be used where possible.
In particular, you are not supposed to implement your own as_dict or
from_dict for derivative sets unless you know what you are doing.
Improper overriding the as_dict and from_dict protocols is the major
cause of implementation headaches. If you need an example, look at how the
MPStaticSet or MPNonSCFSets are constructed.
The above are recommendations. The following are UNBREAKABLE rules:
1. All input sets must take in a structure or list of structures as the first
argument.
2. user_incar_settings and user_kpoints_settings are absolute. Any new sets you
implement must obey this. If a user wants to override your settings,
you assume he knows what he is doing. Do not magically override user
supplied settings. You can issue a warning if you think the user is wrong.
3. All input sets must save all supplied args and kwargs as instance variables.
E.g., self.my_arg = my_arg and self.kwargs = kwargs in the __init__. This
ensures the as_dict and from_dict work correctly.
"""
__author__ = "Shyue Ping Ong, Wei Chen, Will Richards, Geoffroy Hautier, Anubhav Jain"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "May 28 2016"
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
class VaspInputSet(six.with_metaclass(abc.ABCMeta, MSONable)):
"""
Base class representing a set of Vasp input parameters with a structure
supplied as init parameters. Typically, you should not inherit from this
class. Start from DictSet or MPRelaxSet or MITRelaxSet.
"""
@abc.abstractproperty
def incar(self):
"""Incar object"""
pass
@abc.abstractproperty
def kpoints(self):
"""Kpoints object"""
pass
@abc.abstractproperty
def poscar(self):
"""Poscar object"""
pass
@property
def potcar_symbols(self):
"""
List of POTCAR symbols.
"""
elements = self.poscar.site_symbols
potcar_symbols = []
settings = self.config_dict["POTCAR"]
if isinstance(settings[elements[-1]], dict):
for el in elements:
potcar_symbols.append(settings[el]['symbol']
if el in settings else el)
else:
for el in elements:
potcar_symbols.append(settings.get(el, el))
return potcar_symbols
@property
def potcar(self):
"""
Potcar object.
"""
return Potcar(self.potcar_symbols, functional=self.potcar_functional)
@property
def all_input(self):
"""
Returns all input files as a dict of {filename: vasp object}
Returns:
dict of {filename: object}, e.g., {'INCAR': Incar object, ...}
"""
kpoints = self.kpoints
incar = self.incar
if np.product(kpoints.kpts) < 4 and incar.get("ISMEAR", 0) == -5:
incar["ISMEAR"] = 0
return {'INCAR': incar,
'KPOINTS': kpoints,
'POSCAR': self.poscar,
'POTCAR': self.potcar}
def write_input(self, output_dir,
make_dir_if_not_present=True, include_cif=False):
"""
Writes a set of VASP input to a directory.
Args:
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
include_cif (bool): Whether to write a CIF file in the output
directory for easier opening by VESTA.
"""
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
for k, v in self.all_input.items():
v.write_file(os.path.join(output_dir, k))
if include_cif:
s = self.all_input["POSCAR"].structure
fname = os.path.join(output_dir, "%s.cif" % re.sub(r'\s', "",
s.formula))
s.to(filename=fname)
def as_dict(self, verbosity=2):
d = MSONable.as_dict(self)
if verbosity == 1:
d.pop("structure", None)
return d
class DictSet(VaspInputSet):
"""
Concrete implementation of VaspInputSet that is initialized from a dict
settings. This allows arbitrary settings to be input. In general,
this is rarely used directly unless there is a source of settings in yaml
format (e.g., from a REST interface). It is typically used by other
VaspInputSets for initialization.
Special consideration should be paid to the way the MAGMOM initialization
for the INCAR is done. The initialization differs depending on the type of
structure and the configuration settings. The order in which the magmom is
determined is as follows:
1. If the site itself has a magmom setting, that is used.
2. If the species on the site has a spin setting, that is used.
3. If the species itself has a particular setting in the config file, that
is used, e.g., Mn3+ may have a different magmom than Mn4+.
4. Lastly, the element symbol itself is checked in the config file. If
there are no settings, VASP's default of 0.6 is used.
Args:
structure (Structure): The Structure to create inputs for.
config_dict (dict): The config dictionary to use.
files_to_transfer (dict): A dictionary of {filename: filepath}. This
allows the transfer of files from a previous calculation.
user_incar_settings (dict): User INCAR settings. This allows a user
to override INCAR settings, e.g., setting a different MAGMOM for
various elements or species. Note that in the new scheme,
ediff_per_atom and hubbard_u are no longer args. Instead, the
config_dict supports EDIFF_PER_ATOM and EDIFF keys. The former
scales with # of atoms, the latter does not. If both are
present, EDIFF is preferred. To force such settings, just supply
user_incar_settings={"EDIFF": 1e-5, "LDAU": False} for example.
The keys 'LDAUU', 'LDAUJ', 'LDAUL' are special cases since
pymatgen defines different values depending on what anions are
present in the structure, so these keys can be defined in one
of two ways, e.g. either {"LDAUU":{"O":{"Fe":5}}} to set LDAUU
for Fe to 5 in an oxide, or {"LDAUU":{"Fe":5}} to set LDAUU to
5 regardless of the input structure.
user_kpoints_settings (dict): Allow user to override kpoints setting by
supplying a dict. E.g., {"reciprocal_density": 1000}. Default is
None.
constrain_total_magmom (bool): Whether to constrain the total magmom
(NUPDOWN in INCAR) to be the sum of the expected MAGMOM for all
species. Defaults to False.
sort_structure (bool): Whether to sort the structure (using the
default sort order of electronegativity) before generating input
files. Defaults to True, the behavior you would want most of the
time. This ensures that similar atomic species are grouped
together.
potcar_functional (str): Functional to use. Default (None) is to use
the functional in Potcar.DEFAULT_FUNCTIONAL. Valid values:
"PBE", "LDA", "PW91", "LDA_US"
force_gamma (bool): Force gamma centered kpoint generation. Default
(False) is to use the Automatic Density kpoint scheme, which
will use the Gamma centered generation scheme for hexagonal
cells, and Monkhorst-Pack otherwise.
reduce_structure (None/str): Before generating the input files,
generate the reduced structure. Default (None), does not
alter the structure. Valid values: None, "niggli", "LLL".
"""
def __init__(self, structure, config_dict,
files_to_transfer=None, user_incar_settings=None,
user_kpoints_settings=None,
constrain_total_magmom=False, sort_structure=True,
potcar_functional="PBE", force_gamma=False,
reduce_structure=None):
if reduce_structure:
structure = structure.get_reduced_structure(reduce_structure)
if sort_structure:
structure = structure.get_sorted_structure()
self.structure = structure
self.config_dict = deepcopy(config_dict)
self.files_to_transfer = files_to_transfer or {}
self.constrain_total_magmom = constrain_total_magmom
self.sort_structure = sort_structure
self.potcar_functional = potcar_functional
self.force_gamma = force_gamma
self.reduce_structure = reduce_structure
self.user_incar_settings = user_incar_settings or {}
self.user_kpoints_settings = user_kpoints_settings
@property
def incar(self):
settings = dict(self.config_dict["INCAR"])
settings.update(self.user_incar_settings)
structure = self.structure
incar = Incar()
comp = structure.composition
elements = sorted([el for el in comp.elements if comp[el] > 0],
key=lambda e: e.X)
most_electroneg = elements[-1].symbol
poscar = Poscar(structure)
hubbard_u = settings.get("LDAU", False)
for k, v in settings.items():
if k == "MAGMOM":
mag = []
for site in structure:
if hasattr(site, 'magmom'):
mag.append(site.magmom)
elif hasattr(site.specie, 'spin'):
mag.append(site.specie.spin)
elif str(site.specie) in v:
mag.append(v.get(str(site.specie)))
else:
mag.append(v.get(site.specie.symbol, 0.6))
incar[k] = mag
elif k in ('LDAUU', 'LDAUJ', 'LDAUL'):
if hubbard_u:
if hasattr(structure[0], k.lower()):
m = dict([(site.specie.symbol, getattr(site, k.lower()))
for site in structure])
incar[k] = [m[sym] for sym in poscar.site_symbols]
# lookup specific LDAU if specified for most_electroneg atom
elif most_electroneg in v.keys():
if isinstance(v[most_electroneg], dict):
incar[k] = [v[most_electroneg].get(sym, 0)
for sym in poscar.site_symbols]
# else, use fallback LDAU value if it exists
else:
incar[k] = [v.get(sym, 0) for sym in poscar.site_symbols]
elif k.startswith("EDIFF") and k != "EDIFFG":
if "EDIFF" not in settings and k == "EDIFF_PER_ATOM":
incar["EDIFF"] = float(v) * structure.num_sites
else:
incar["EDIFF"] = float(settings["EDIFF"])
else:
incar[k] = v
has_u = hubbard_u and sum(incar['LDAUU']) > 0
if has_u:
# modify LMAXMIX if LSDA+U and you have d or f electrons
# note that if the user explicitly sets LMAXMIX in settings it will
# override this logic.
if 'LMAXMIX' not in settings.keys():
# contains f-electrons
if any([el.Z > 56 for el in structure.composition]):
incar['LMAXMIX'] = 6
# contains d-electrons
elif any([el.Z > 20 for el in structure.composition]):
incar['LMAXMIX'] = 4
else:
for key in list(incar.keys()):
if key.startswith('LDAU'):
del incar[key]
if self.constrain_total_magmom:
nupdown = sum([mag if abs(mag) > 0.6 else 0
for mag in incar['MAGMOM']])
incar['NUPDOWN'] = nupdown
return incar
@property
def poscar(self):
return Poscar(self.structure)
@property
def nelect(self):
"""
Gets the default number of electrons for a given structure.
"""
return int(round(
sum([self.structure.composition.element_composition[ps.element]
* ps.ZVAL
for ps in self.potcar])))
@property
def kpoints(self):
"""
Writes out a KPOINTS file using the fully automated grid method. Uses
Gamma centered meshes for hexagonal cells and Monk grids otherwise.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
"""
settings = self.user_kpoints_settings or self.config_dict["KPOINTS"]
# If grid_density is in the kpoints_settings use
# Kpoints.automatic_density
if settings.get('grid_density'):
return Kpoints.automatic_density(
self.structure, int(settings['grid_density']),
self.force_gamma)
# If reciprocal_density is in the kpoints_settings use
# Kpoints.automatic_density_by_vol
elif settings.get('reciprocal_density'):
return Kpoints.automatic_density_by_vol(
self.structure, int(settings['reciprocal_density']),
self.force_gamma)
# If length is in the kpoints_settings use Kpoints.automatic
elif settings.get('length'):
return Kpoints.automatic(settings['length'])
# Raise error. Unsure of which kpoint generation to use
else:
raise ValueError(
"Invalid KPoint Generation algo : Supported Keys are "
"grid_density: for Kpoints.automatic_density generation, "
"reciprocal_density: for KPoints.automatic_density_by_vol "
"generation, and length : for Kpoints.automatic generation")
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return self.__class__.__name__
def write_input(self, output_dir,
make_dir_if_not_present=True, include_cif=False):
super(DictSet, self).write_input(
output_dir=output_dir,
make_dir_if_not_present=make_dir_if_not_present,
include_cif=include_cif)
for k, v in self.files_to_transfer.items():
shutil.copy(v, os.path.join(output_dir, k))
class MITRelaxSet(DictSet):
"""
Standard implementation of VaspInputSet utilizing parameters in the MIT
High-throughput project.
The parameters are chosen specifically for a high-throughput project,
which means in general pseudopotentials with fewer electrons were chosen.
Please refer::
A Jain, G. Hautier, C. Moore, S. P. Ong, C. Fischer, T. Mueller,
K. A. Persson, G. Ceder. A high-throughput infrastructure for density
functional theory calculations. Computational Materials Science,
2011, 50(8), 2295-2310. doi:10.1016/j.commatsci.2011.02.023
"""
CONFIG = loadfn(os.path.join(MODULE_DIR, "MITRelaxSet.yaml"))
def __init__(self, structure, **kwargs):
super(MITRelaxSet, self).__init__(
structure, MITRelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPRelaxSet(DictSet):
"""
Implementation of VaspInputSet utilizing parameters in the public
Materials Project. Typically, the pseudopotentials chosen contain more
electrons than the MIT parameters, and the k-point grid is ~50% more dense.
The LDAUU parameters are also different due to the different psps used,
which result in different fitted values.
"""
CONFIG = loadfn(os.path.join(MODULE_DIR, "MPRelaxSet.yaml"))
def __init__(self, structure, **kwargs):
super(MPRelaxSet, self).__init__(
structure, MPRelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPHSERelaxSet(DictSet):
"""
Same as the MPRelaxSet, but with HSE parameters.
"""
CONFIG = loadfn(os.path.join(MODULE_DIR, "MPHSERelaxSet.yaml"))
def __init__(self, structure, **kwargs):
super(MPHSERelaxSet, self).__init__(
structure, MPHSERelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPStaticSet(MPRelaxSet):
def __init__(self, structure, prev_incar=None, prev_kpoints=None,
lepsilon=False, lcalcpol=False, reciprocal_density=100,
**kwargs):
"""
Run a static calculation.
Args:
structure (Structure): Structure from previous run.
prev_incar (Incar): Incar file from previous run.
prev_kpoints (Kpoints): Kpoints from previous run.
lepsilon (bool): Whether to add static dielectric calculation
reciprocal_density (int): For static calculations,
we usually set the reciprocal density by volume. This is a
convenience arg to change that, rather than using
user_kpoints_settings. Defaults to 100, which is ~50% more than
that of standard relaxation calculations.
\\*\\*kwargs: kwargs supported by MPRelaxSet.
"""
super(MPStaticSet, self).__init__(structure, **kwargs)
if isinstance(prev_incar, six.string_types):
prev_incar = Incar.from_file(prev_incar)
if isinstance(prev_kpoints, six.string_types):
prev_kpoints = Kpoints.from_file(prev_kpoints)
self.prev_incar = prev_incar
self.prev_kpoints = prev_kpoints
self.reciprocal_density = reciprocal_density
self.structure = structure
self.kwargs = kwargs
self.lepsilon = lepsilon
self.lcalcpol = lcalcpol
@property
def incar(self):
parent_incar = super(MPStaticSet, self).incar
incar = Incar(self.prev_incar) if self.prev_incar is not None else \
Incar(parent_incar)
incar.update(
{"IBRION": -1, "ISMEAR": -5, "LAECHG": True, "LCHARG": True,
"LORBIT": 11, "LVHAR": True, "LWAVE": False, "NSW": 0,
"ICHARG": 0, "ALGO": "Normal"})
if self.lepsilon:
incar["IBRION"] = 8
incar["LEPSILON"] = True
# Note that DFPT calculations MUST unset NSW. NSW = 0 will fail
# to output ionic.
incar.pop("NSW", None)
incar.pop("NPAR", None)
if self.lcalcpol:
incar["LCALCPOL"] = True
for k in ["MAGMOM", "NUPDOWN"] + list(self.kwargs.get(
"user_incar_settings", {}).keys()):
# For these parameters as well as user specified settings, override
# the incar settings.
if parent_incar.get(k, None) is not None:
incar[k] = parent_incar[k]
else:
incar.pop(k, None)
# use new LDAUU when possible b/c the Poscar might have changed
# representation
if incar.get('LDAU'):
u = incar.get('LDAUU', [])
j = incar.get('LDAUJ', [])
if sum([u[x] - j[x] for x, y in enumerate(u)]) > 0:
for tag in ('LDAUU', 'LDAUL', 'LDAUJ'):
incar.update({tag: parent_incar[tag]})
# ensure to have LMAXMIX for GGA+U static run
if "LMAXMIX" not in incar:
incar.update({"LMAXMIX": parent_incar["LMAXMIX"]})
# Compare ediff between previous and staticinputset values,
# choose the tighter ediff
incar["EDIFF"] = min(incar.get("EDIFF", 1), parent_incar["EDIFF"])
return incar
@property
def kpoints(self):
self.config_dict["KPOINTS"]["reciprocal_density"] = \
self.reciprocal_density
kpoints = super(MPStaticSet, self).kpoints
# Prefer to use k-point scheme from previous run
if self.prev_kpoints and self.prev_kpoints.style != kpoints.style:
if self.prev_kpoints.style == Kpoints.supported_modes.Monkhorst:
k_div = [kp + 1 if kp % 2 == 1 else kp
for kp in kpoints.kpts[0]]
kpoints = Kpoints.monkhorst_automatic(k_div)
else:
kpoints = Kpoints.gamma_automatic(kpoints.kpts[0])
return kpoints
@classmethod
def from_prev_calc(cls, prev_calc_dir, standardize=False, sym_prec=0.1,
international_monoclinic=True, reciprocal_density=100,
small_gap_multiply=None, **kwargs):
"""
Generate a set of Vasp input files for static calculations from a
directory of previous Vasp run.
Args:
prev_calc_dir (str): Directory containing the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
standardize (float): Whether to standardize to a primitive
standard cell. Defaults to False.
sym_prec (float): Tolerance for symmetry finding. If not 0,
the final structure from the previous run will be symmetrized
to get a primitive standard cell. Set to 0 if you don't want
that.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
small_gap_multiply ([float, float]): If the gap is less than
1st index, multiply the default reciprocal_density by the 2nd
index.
\\*\\*kwargs: All kwargs supported by MPStaticSet,
other than prev_incar and prev_structure and prev_kpoints which
are determined from the prev_calc_dir.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
prev_incar = vasprun.incar
prev_kpoints = vasprun.kpoints
# We will make a standard structure for the given symprec.
prev_structure = get_structure_from_prev_run(
vasprun, outcar, sym_prec=standardize and sym_prec,
international_monoclinic=international_monoclinic)
# multiply the reciprocal density if needed:
if small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= small_gap_multiply[0]:
reciprocal_density = reciprocal_density * small_gap_multiply[1]
return MPStaticSet(
structure=prev_structure, prev_incar=prev_incar,
prev_kpoints=prev_kpoints,
reciprocal_density=reciprocal_density, **kwargs)
class MPHSEBSSet(MPHSERelaxSet):
def __init__(self, structure, user_incar_settings=None, added_kpoints=None,
mode="Uniform", reciprocal_density=None,
kpoints_line_density=20, **kwargs):
"""
Implementation of a VaspInputSet for HSE band structure computations.
Remember that HSE band structures must be self-consistent in VASP. A
band structure along symmetry lines for instance needs BOTH a uniform
grid with appropriate weights AND a path along the lines with weight 0.
Thus, the "Uniform" mode is just like regular static SCF but allows
adding custom kpoints (e.g., corresponding to known VBM/CBM) to the
uniform grid that have zero weight (e.g., for better gap estimate).
The "Line" mode is just like Uniform mode, but additionally adds
k-points along symmetry lines with zero weight.
Args:
structure (Structure): Structure to compute
user_incar_settings (dict): A dict specifying additional incar
settings
added_kpoints (list): a list of kpoints (list of 3 number list)
added to the run. The k-points are in fractional coordinates
mode (str): "Line" - generate k-points along symmetry lines for
bandstructure. "Uniform" - generate uniform k-points grid
reciprocal_density (int): k-point density to use for uniform mesh
kpoints_line_density (int): k-point density for high symmetry lines
**kwargs (dict): Any other parameters to pass into DictVaspInputSet
"""
super(MPHSEBSSet, self).__init__(structure, **kwargs)
self.structure = structure
self.user_incar_settings = user_incar_settings or {}
self.config_dict["INCAR"].update(
{"NSW": 0, "ISMEAR": 0, "SIGMA": 0.05, "ISYM": 3, "LCHARG": False, "NELMIN": 5})
self.added_kpoints = added_kpoints if added_kpoints is not None else []
self.mode = mode
self.reciprocal_density = reciprocal_density or \
self.kpoints_settings['reciprocal_density']
self.kpoints_line_density = kpoints_line_density
@property
def kpoints(self):
kpts = []
weights = []
all_labels = []
# for both modes, include the Uniform mesh w/standard weights
grid = Kpoints.automatic_density_by_vol(self.structure,
self.reciprocal_density).kpts
ir_kpts = SpacegroupAnalyzer(self.structure, symprec=0.1)\
.get_ir_reciprocal_mesh(grid[0])
for k in ir_kpts:
kpts.append(k[0])
weights.append(int(k[1]))
all_labels.append(None)
# for both modes, include any user-added kpoints w/zero weight
for k in self.added_kpoints:
kpts.append(k)
weights.append(0.0)
all_labels.append("user-defined")
# for line mode only, add the symmetry lines w/zero weight
if self.mode.lower() == "line":
kpath = HighSymmKpath(self.structure)
frac_k_points, labels = kpath.get_kpoints(
line_density=self.kpoints_line_density,
coords_are_cartesian=False)
for k in range(len(frac_k_points)):
kpts.append(frac_k_points[k])
weights.append(0.0)
all_labels.append(labels[k])
comment = "HSE run along symmetry lines" if self.mode.lower() == "line" \
else "HSE run on uniform grid"
return Kpoints(comment=comment,
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(kpts), kpts=kpts, kpts_weights=weights,
labels=all_labels)
@classmethod
def from_prev_calc(cls, prev_calc_dir, mode="gap",
reciprocal_density=50, copy_chgcar=True, **kwargs):
"""
Generate a set of Vasp input files for HSE calculations from a
directory of previous Vasp run. if mode=="gap", it explicitly adds VBM and CBM
of the prev. run to the k-point list of this run.
Args:
prev_calc_dir (str): Directory containing the outputs
(vasprun.xml and OUTCAR) of previous vasp run.
mode (str): Either "uniform", "gap" or "line"
reciprocal_density (int): density of k-mesh
copy_chgcar (bool): whether to copy CHGCAR of previous run
\\*\\*kwargs: All kwargs supported by MPHSEBSStaticSet,
other than prev_structure which is determined from the previous
calc dir.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
# note: don't standardize the cell because we want to retain k-points
prev_structure = get_structure_from_prev_run(vasprun, outcar,
sym_prec=0)
added_kpoints = []
if mode.lower() == "gap":
bs = vasprun.get_band_structure()
vbm, cbm = bs.get_vbm()["kpoint"], bs.get_cbm()["kpoint"]
if vbm:
added_kpoints.append(vbm.frac_coords)
if cbm:
added_kpoints.append(cbm.frac_coords)
files_to_transfer = {}
if copy_chgcar:
chgcars = sorted(glob.glob(os.path.join(prev_calc_dir, "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
return MPHSEBSSet(
structure=prev_structure,
added_kpoints=added_kpoints, reciprocal_density=reciprocal_density,
mode=mode, files_to_transfer=files_to_transfer, **kwargs)
class MPNonSCFSet(MPRelaxSet):
def __init__(self, structure, prev_incar=None,
mode="line", nedos=601, reciprocal_density=100, sym_prec=0.1,
kpoints_line_density=20, optics=False, **kwargs):
"""
Init a MPNonSCFSet. Typically, you would use the classmethod
from_prev_calc to initialize from a previous SCF run.
Args:
structure (Structure): Structure to compute
prev_incar (Incar/string): Incar file from previous run.
mode (str): Line or Uniform mode supported.
nedos (int): nedos parameter. Default to 601.
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
sym_prec (float): Symmetry precision (for Uniform mode).
kpoints_line_density (int): Line density for Line mode.
optics (bool): whether to add dielectric function
\\*\\*kwargs: kwargs supported by MPVaspInputSet.
"""
super(MPNonSCFSet, self).__init__(structure, **kwargs)
if isinstance(prev_incar, six.string_types):
prev_incar = Incar.from_file(prev_incar)
self.prev_incar = prev_incar
self.kwargs = kwargs
self.nedos = nedos
self.reciprocal_density = reciprocal_density
self.sym_prec = sym_prec
self.kpoints_line_density = kpoints_line_density
self.optics = optics
self.mode = mode.lower()
if self.mode.lower() not in ["line", "uniform"]:
raise ValueError("Supported modes for NonSCF runs are 'Line' and "
"'Uniform'!")
if (self.mode.lower() != "uniform" or nedos < 2000) and optics:
warnings.warn("It is recommended to use Uniform mode with a high "
"NEDOS for optics calculations.")
@property
def incar(self):
incar = super(MPNonSCFSet, self).incar
if self.prev_incar is not None:
incar.update({k: v for k, v in self.prev_incar.items()})
# Overwrite necessary INCAR parameters from previous runs
incar.update({"IBRION": -1, "ISMEAR": 0, "SIGMA": 0.001,
"LCHARG": False, "LORBIT": 11, "LWAVE": False,
"NSW": 0, "ISYM": 0, "ICHARG": 11})
incar.update(self.kwargs.get("user_incar_settings", {}))
if self.mode.lower() == "uniform":
# Set smaller steps for DOS output
incar["NEDOS"] = self.nedos
if self.optics:
incar["LOPTICS"] = True
incar.pop("MAGMOM", None)
return incar
@property
def kpoints(self):
if self.mode == "line":
kpath = HighSymmKpath(self.structure)
frac_k_points, k_points_labels = kpath.get_kpoints(
line_density=self.kpoints_line_density,
coords_are_cartesian=False)
kpoints = Kpoints(
comment="Non SCF run along symmetry lines",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(frac_k_points),
kpts=frac_k_points, labels=k_points_labels,
kpts_weights=[1] * len(frac_k_points))
else:
kpoints = Kpoints.automatic_density_by_vol(self.structure,
self.reciprocal_density)
mesh = kpoints.kpts[0]
ir_kpts = SpacegroupAnalyzer(
self.structure,
symprec=self.sym_prec).get_ir_reciprocal_mesh(mesh)
kpts = []
weights = []
for k in ir_kpts:
kpts.append(k[0])
weights.append(int(k[1]))
kpoints = Kpoints(comment="Non SCF run on uniform grid",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(ir_kpts),
kpts=kpts, kpts_weights=weights)
return kpoints
@classmethod
def from_prev_calc(cls, prev_calc_dir, copy_chgcar=True,
nbands_factor=1.2, standardize=False, sym_prec=0.1,
international_monoclinic=True, reciprocal_density=100,
kpoints_line_density=20, small_gap_multiply=None,
**kwargs):
"""
Generate a set of Vasp input files for NonSCF calculations from a
directory of previous static Vasp run.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
copy_chgcar: Whether to copy the old CHGCAR. Defaults to True.
nbands_factor (float): Multiplicative factor for NBANDS. Choose a
higher number if you are doing an LOPTICS calculation.
standardize (float): Whether to standardize to a primitive
standard cell. Defaults to False.
sym_prec (float): Tolerance for symmetry finding. If not 0,
the final structure from the previous run will be symmetrized
to get a primitive standard cell. Set to 0 if you don't want
that.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
reciprocal_density (int): density of k-mesh by reciprocal
volume in uniform mode (defaults to 100)
kpoints_line_density (int): density of k-mesh in line mode
(defaults to 20)
small_gap_multiply ([float, float]): If the gap is less than
1st index, multiply the default reciprocal_density by the 2nd
index.
\\*\\*kwargs: All kwargs supported by MPNonSCFSet,
other than structure, prev_incar and prev_chgcar which
are determined from the prev_calc_dir.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
incar = vasprun.incar
# Get a Magmom-decorated structure
structure = get_structure_from_prev_run(
vasprun, outcar, sym_prec=standardize and sym_prec,
international_monoclinic=international_monoclinic)
# Turn off spin when magmom for every site is smaller than 0.02.
if outcar and outcar.magnetization:
site_magmom = np.array([i['tot'] for i in outcar.magnetization])
ispin = 2 if np.any(site_magmom[np.abs(site_magmom) > 0.02]) else 1
elif vasprun.is_spin:
ispin = 2
else:
ispin = 1
nbands = int(np.ceil(vasprun.parameters["NBANDS"] * nbands_factor))
incar.update({"ISPIN": ispin, "NBANDS": nbands})
files_to_transfer = {}
if copy_chgcar:
chgcars = sorted(glob.glob(os.path.join(prev_calc_dir, "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
# multiply the reciprocal density if needed:
if small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= small_gap_multiply[0]:
reciprocal_density = reciprocal_density * small_gap_multiply[1]
kpoints_line_density = kpoints_line_density * \
small_gap_multiply[1]
return MPNonSCFSet(structure=structure, prev_incar=incar,
reciprocal_density=reciprocal_density,
kpoints_line_density=kpoints_line_density,
files_to_transfer=files_to_transfer, **kwargs)
class MPSOCSet(MPStaticSet):
def __init__(self, structure, saxis=(0, 0, 1), prev_incar=None,
reciprocal_density=100, **kwargs):
"""
Init a MPSOCSet.
Args:
structure (Structure): the structure must have the 'magmom' site
property and each magnetic moment value must have 3
components. eg:- magmom = [[0,0,2], ...]
saxis (tuple): magnetic moment orientation
prev_incar (Incar): Incar file from previous run.
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
\\*\\*kwargs: kwargs supported by MPVaspInputSet.
"""
if not hasattr(structure[0], "magmom") and \
not isinstance(structure[0].magmom, list):
raise ValueError("The structure must have the 'magmom' site "
"property and each magnetic moment value must have 3 "
"components. eg:- magmom = [0,0,2]")
self.saxis = saxis
super(MPSOCSet, self).__init__(
structure, prev_incar=prev_incar,
reciprocal_density=reciprocal_density, **kwargs)
@property
def incar(self):
incar = super(MPSOCSet, self).incar
if self.prev_incar is not None:
incar.update({k: v for k, v in self.prev_incar.items()})
# Overwrite necessary INCAR parameters from previous runs
incar.update({"ISYM": -1, "LSORBIT": "T", "ICHARG": 11,
"SAXIS": list(self.saxis)})
incar.update(self.kwargs.get("user_incar_settings", {}))
return incar
@classmethod
def from_prev_calc(cls, prev_calc_dir, copy_chgcar=True,
nbands_factor=1.2, standardize=False, sym_prec=0.1,
international_monoclinic=True, reciprocal_density=100,
small_gap_multiply=None, **kwargs):
"""
Generate a set of Vasp input files for SOC calculations from a
directory of previous static Vasp run. SOC calc requires all 3
components for MAGMOM for each atom in the structure.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
copy_chgcar: Whether to copy the old CHGCAR. Defaults to True.
nbands_factor (float): Multiplicative factor for NBANDS. Choose a
higher number if you are doing an LOPTICS calculation.
standardize (float): Whether to standardize to a primitive
standard cell. Defaults to False.
sym_prec (float): Tolerance for symmetry finding. If not 0,
the final structure from the previous run will be symmetrized
to get a primitive standard cell. Set to 0 if you don't want
that.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
small_gap_multiply ([float, float]): If the gap is less than
1st index, multiply the default reciprocal_density by the 2nd
index.
\\*\\*kwargs: All kwargs supported by MPSOCSet,
other than structure, prev_incar and prev_chgcar which
are determined from the prev_calc_dir.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
incar = vasprun.incar
# Get a magmom-decorated structure
structure = get_structure_from_prev_run(
vasprun, outcar, sym_prec=standardize and sym_prec,
international_monoclinic=international_monoclinic)
# override magmom if provided
if kwargs.get("magmom", None):
structure = structure.copy(
site_properties={"magmom": kwargs["magmom"]})
kwargs.pop("magmom", None)
# magmom has to be 3D for SOC calculation.
if hasattr(structure[0], "magmom"):
if not isinstance(structure[0].magmom, list):
structure = structure.copy(site_properties={
"magmom": [[0, 0, site.magmom] for site in structure]})
else:
raise ValueError("Neither the previous structure has mamgom "
"property nor magmom provided")
nbands = int(np.ceil(vasprun.parameters["NBANDS"] * nbands_factor))
incar.update({"NBANDS": nbands})
files_to_transfer = {}
if copy_chgcar:
chgcars = sorted(glob.glob(os.path.join(prev_calc_dir, "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
# multiply the reciprocal density if needed:
if small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= small_gap_multiply[0]:
reciprocal_density = reciprocal_density * small_gap_multiply[1]
return MPSOCSet(structure, prev_incar=incar,
files_to_transfer=files_to_transfer,
reciprocal_density=reciprocal_density, **kwargs)
class MVLElasticSet(MPRelaxSet):
"""
MVL denotes VASP input sets that are implemented by the Materials Virtual
Lab (http://www.materialsvirtuallab.org) for various research.
This input set is used to calculate elastic constants in VASP. It is used
in the following work::
Z. Deng, Z. Wang, I.-H. Chu, J. Luo, S. P. Ong.
“Elastic Properties of Alkali Superionic Conductor Electrolytes
from First Principles Calculations”, J. Electrochem. Soc.
2016, 163(2), A67-A74. doi: 10.1149/2.0061602jes
To read the elastic constants, you may use the Outcar class which parses the
elastic constants.
Args:
scale (float): POTIM parameter. The default of 0.015 is usually fine,
but some structures may require a smaller step.
user_incar_settings (dict): A dict specifying additional incar
settings.
"""
def __init__(self, structure, potim=0.015, **kwargs):
super(MVLElasticSet, self).__init__(structure, **kwargs)
self.config_dict["INCAR"].update({"IBRION": 6, "NFREE": 2,
"POTIM": potim})
self.config_dict["INCAR"].pop("NPAR", None)
class MVLSlabSet(MPRelaxSet):
"""
Class for writing a set of slab vasp runs,
including both slabs (along the c direction) and orient unit cells (bulk),
to ensure the same KPOINTS, POTCAR and INCAR criterion.
Args:
k_product: default to 50, kpoint number * length for a & b directions,
also for c direction in bulk calculations
bulk (bool): Set to True for bulk calculation. Defaults to False.
**kwargs:
Other kwargs supported by :class:`DictSet`.
"""
def __init__(self, structure, k_product=50, bulk=False, **kwargs):
super(MVLSlabSet, self).__init__(structure, **kwargs)
self.structure = structure
self.k_product = k_product
self.bulk = bulk
self.kwargs = kwargs
slab_incar = {"EDIFF": 1e-6, "EDIFFG": -0.01, "ENCUT": 400,
"ISMEAR": 0, "SIGMA": 0.05, "ISIF": 3}
if not self.bulk:
slab_incar["ISIF"] = 2
slab_incar["AMIN"] = 0.01
slab_incar["AMIX"] = 0.2
slab_incar["BMIX"] = 0.001
slab_incar["NELMIN"] = 8
self.config_dict["INCAR"].update(slab_incar)
@property
def kpoints(self):
"""
k_product, default to 50, is kpoint number * length for a & b
directions, also for c direction in bulk calculations
Automatic mesh & Gamma is the default setting.
"""
# To get input sets, the input structure has to has the same number
# of required parameters as a Structure object (ie. 4). Slab
# attributes aren't going to affect the VASP inputs anyways so
# converting the slab into a structure should not matter
kpt = super(MVLSlabSet, self).kpoints
kpt.comment = "Automatic mesh"
kpt.style = 'Gamma'
# use k_product to calculate kpoints, k_product = kpts[0][0] * a
abc = self.structure.lattice.abc
kpt_calc = [int(self.k_product/abc[0]+0.5),
int(self.k_product/abc[1]+0.5), 1]
self.kpt_calc = kpt_calc
# calculate kpts (c direction) for bulk. (for slab, set to 1)
if self.bulk:
kpt_calc[2] = int(self.k_product/abc[2]+0.5)
kpt.kpts[0] = kpt_calc
return kpt
class MITNEBSet(MITRelaxSet):
"""
Class for writing NEB inputs. Note that EDIFF is not on a per atom
basis for this input set.
Args:
unset_encut (bool): Whether to unset ENCUT.
\\*\\*kwargs: Other kwargs supported by :class:`DictSet`.
"""
def __init__(self, structures, unset_encut=False, **kwargs):
if len(structures) < 3:
raise ValueError("You need at least 3 structures for an NEB.")
kwargs["sort_structure"] = False
super(MITNEBSet, self).__init__(structures[0], **kwargs)
self.structures = self._process_structures(structures)
self.unset_encut = False
if unset_encut:
self.config_dict["INCAR"].pop("ENCUT", None)
if "EDIFF" not in self.config_dict["INCAR"]:
self.config_dict["INCAR"]["EDIFF"] = self.config_dict[
"INCAR"].pop("EDIFF_PER_ATOM")
# NEB specific defaults
defaults = {'IMAGES': len(structures) - 2, 'IBRION': 1, 'ISYM': 0,
'LCHARG': False, "LDAU": False}
self.config_dict["INCAR"].update(defaults)
@property
def poscar(self):
return Poscar(self.structures[0])
@property
def poscars(self):
return [Poscar(s) for s in self.structures]
def _process_structures(self, structures):
"""
Remove any atom jumps across the cell
"""
input_structures = structures
structures = [input_structures[0]]
for s in input_structures[1:]:
prev = structures[-1]
for i in range(len(s)):
t = np.round(prev[i].frac_coords - s[i].frac_coords)
if np.any(np.abs(t)>0.5):
s.translate_sites([i], t, to_unit_cell=False)
structures.append(s)
return structures
def write_input(self, output_dir, make_dir_if_not_present=True,
write_cif=False, write_path_cif=False,
write_endpoint_inputs=False):
"""
NEB inputs has a special directory structure where inputs are in 00,
01, 02, ....
Args:
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
write_cif (bool): If true, writes a cif along with each POSCAR.
write_path_cif (bool): If true, writes a cif for each image.
write_endpoint_inputs (bool): If true, writes input files for
running endpoint calculations.
"""
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
self.incar.write_file(os.path.join(output_dir, 'INCAR'))
self.kpoints.write_file(os.path.join(output_dir, 'KPOINTS'))
self.potcar.write_file(os.path.join(output_dir, 'POTCAR'))
for i, p in enumerate(self.poscars):
d = os.path.join(output_dir, str(i).zfill(2))
if not os.path.exists(d):
os.makedirs(d)
p.write_file(os.path.join(d, 'POSCAR'))
if write_cif:
p.structure.to(filename=os.path.join(d, '{}.cif'.format(i)))
if write_endpoint_inputs:
end_point_param = MITRelaxSet(
self.structures[0],
user_incar_settings=self.user_incar_settings)
for image in ['00', str(len(self.structures) - 1).zfill(2)]:
end_point_param.incar.write_file(os.path.join(output_dir, image, 'INCAR'))
end_point_param.kpoints.write_file(os.path.join(output_dir, image, 'KPOINTS'))
end_point_param.potcar.write_file(os.path.join(output_dir, image, 'POTCAR'))
if write_path_cif:
sites = set()
l = self.structures[0].lattice
for site in chain(*(s.sites for s in self.structures)):
sites.add(PeriodicSite(site.species_and_occu, site.frac_coords, l))
nebpath = Structure.from_sites(sorted(sites))
nebpath.to(filename=os.path.join(output_dir, 'path.cif'))
class MITMDSet(MITRelaxSet):
"""
Class for writing a vasp md run. This DOES NOT do multiple stage
runs.
Args:
start_temp (int): Starting temperature.
end_temp (int): Final temperature.
nsteps (int): Number of time steps for simulations. The NSW parameter.
time_step (int): The time step for the simulation. The POTIM
parameter. Defaults to 2fs.
spin_polarized (bool): Whether to do spin polarized calculations.
The ISPIN parameter. Defaults to False.
sort_structure (bool): Whether to sort structure. Defaults to False
(different behavior from standard input sets).
\\*\\*kwargs: Other kwargs supported by :class:`DictSet`.
"""
def __init__(self, structure, start_temp, end_temp, nsteps, time_step=2,
spin_polarized=False, **kwargs):
# MD default settings
defaults = {'TEBEG': start_temp, 'TEEND': end_temp, 'NSW': nsteps,
'EDIFF_PER_ATOM': 0.000001, 'LSCALU': False,
'LCHARG': False,
'LPLANE': False, 'LWAVE': True, 'ISMEAR': 0,
'NELMIN': 4, 'LREAL': True, 'BMIX': 1,
'MAXMIX': 20, 'NELM': 500, 'NSIM': 4, 'ISYM': 0,
'ISIF': 0, 'IBRION': 0, 'NBLOCK': 1, 'KBLOCK': 100,
'SMASS': 0, 'POTIM': time_step, 'PREC': 'Normal',
'ISPIN': 2 if spin_polarized else 1,
"LDAU": False}
super(MITMDSet, self).__init__(structure, **kwargs)
self.start_temp = start_temp
self.end_temp = end_temp
self.nsteps = nsteps
self.time_step = time_step
self.spin_polarized = spin_polarized
self.kwargs = kwargs
# use VASP default ENCUT
self.config_dict["INCAR"].pop('ENCUT', None)
if defaults['ISPIN'] == 1:
self.config_dict["INCAR"].pop('MAGMOM', None)
self.config_dict["INCAR"].update(defaults)
@property
def kpoints(self):
return Kpoints.gamma_automatic()
def get_vasprun_outcar(path, parse_dos=True, parse_eigen=True):
vruns = list(glob.glob(os.path.join(path, "vasprun.xml*")))
outcars = list(glob.glob(os.path.join(path, "OUTCAR*")))
if len(vruns) == 0 or len(outcars) == 0:
raise ValueError(
"Unable to get vasprun.xml/OUTCAR from prev calculation in %s" %
path)
vsfile_fullpath = os.path.join(path, "vasprun.xml")
outcarfile_fullpath = os.path.join(path, "OUTCAR")
vsfile = vsfile_fullpath if vsfile_fullpath in vruns else sorted(vruns)[-1]
outcarfile = outcarfile_fullpath if outcarfile_fullpath in outcars else sorted(outcars)[-1]
return Vasprun(str(vsfile), parse_dos=parse_dos, parse_eigen=parse_eigen), \
Outcar(str(outcarfile))
def get_structure_from_prev_run(vasprun, outcar=None, sym_prec=0.1,
international_monoclinic=True):
"""
Process structure from previous run.
Args:
vasprun (Vasprun): Vasprun that contains the final structure
from previous run.
outcar (Outcar): Outcar that contains the magnetization info from
previous run.
sym_prec (float): Tolerance for symmetry finding for standardization. If
no standardization is desired, set to 0 or a False.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
Returns:
Returns the magmom-decorated structure that can be passed to get
Vasp input files, e.g. get_kpoints.
"""
structure = vasprun.final_structure
site_properties = {}
# magmom
if vasprun.is_spin:
if outcar and outcar.magnetization:
site_properties.update({"magmom": [i['tot']
for i in outcar.magnetization]})
else:
site_properties.update({"magmom": vasprun.parameters['MAGMOM']})
# ldau
if vasprun.parameters.get("LDAU", False):
for k in ("LDAUU", "LDAUJ", "LDAUL"):
vals = vasprun.incar[k]
m = {}
l = []
s = 0
for site in structure:
if site.specie.symbol not in m:
m[site.specie.symbol] = vals[s]
s += 1
l.append(m[site.specie.symbol])
if len(l) == len(structure):
site_properties.update({k.lower(): l})
else:
raise ValueError("length of list {} not the same as"
"structure".format(l))
structure = structure.copy(site_properties=site_properties)
if sym_prec:
sym_finder = SpacegroupAnalyzer(structure, symprec=sym_prec)
new_structure = sym_finder.get_primitive_standard_structure(
international_monoclinic=international_monoclinic)
# the primitive structure finding has had several bugs in the past
# defend through validation
vpa_old = structure.volume / structure.num_sites
vpa_new = new_structure.volume / new_structure.num_sites
if abs(vpa_old - vpa_new) / vpa_old > 0.02:
raise ValueError(
"Standardizing cell failed! VPA old: {}, VPA new: {}".format(
vpa_old, vpa_new))
sm = StructureMatcher()
if not sm.fit(structure, new_structure):
raise ValueError(
"Standardizing cell failed! Old structure doesn't match new.")
structure = new_structure
return structure
def batch_write_input(structures, vasp_input_set=MPRelaxSet, output_dir=".",
make_dir_if_not_present=True, subfolder=None,
sanitize=False, include_cif=False, **kwargs):
"""
Batch write vasp input for a sequence of structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
structures ([Structure]): Sequence of Structures.
vasp_input_set (VaspInputSet): VaspInputSet class that creates
vasp input files from structures. Note that a class should be
supplied. Defaults to MPRelaxSet.
output_dir (str): Directory to output files. Defaults to current
directory ".".
make_dir_if_not_present (bool): Create the directory if not present.
Defaults to True.
subfolder (callable): Function to create subdirectory name from
structure. Defaults to simply "formula_count".
sanitize (bool): Boolean indicating whether to sanitize the
structure before writing the VASP input files. Sanitized output
are generally easier for viewing and certain forms of analysis.
Defaults to False.
include_cif (bool): Whether to output a CIF as well. CIF files are
generally better supported in visualization programs.
\\*\\*kwargs: Additional kwargs are passed to the vasp_input_set class in
addition to structure.
"""
for i, s in enumerate(structures):
formula = re.sub(r'\s+', "", s.formula)
if subfolder is not None:
subdir = subfolder(s)
d = os.path.join(output_dir, subdir)
else:
d = os.path.join(output_dir, '{}_{}'.format(formula, i))
if sanitize:
s = s.copy(sanitize=True)
v = vasp_input_set(s, **kwargs)
v.write_input(d, make_dir_if_not_present=make_dir_if_not_present,
include_cif=include_cif)
|
xhqu1981/pymatgen
|
pymatgen/io/vasp/sets.py
|
Python
|
mit
| 58,868
|
[
"VASP",
"pymatgen"
] |
8c9f70f6e7d482d74e1facf70138e9411d97467ae7cf10038122bf1c83d4bbe5
|
# -*- coding: utf-8 -*-
#
# Author: Taylor Smith <taylor.smith@alkaline-ml.com>
#
# Common ARIMA functions
from sklearn.utils.validation import column_or_1d
import numpy as np
import warnings
from .. import context_managers as ctx
from ..utils import get_callable
from ..utils.array import diff, check_endog
from ..compat.numpy import DTYPE
from . import stationarity as statest_lib
from . import seasonality as seatest_lib
__all__ = [
'is_constant',
'ndiffs',
'nsdiffs'
]
VALID_TESTS = {
'kpss': statest_lib.KPSSTest,
'adf': statest_lib.ADFTest,
'pp': statest_lib.PPTest
}
VALID_STESTS = {
'ocsb': seatest_lib.OCSBTest,
'ch': seatest_lib.CHTest
}
def is_constant(x):
"""Test ``x`` for constancy.
Determine whether a vector is composed of all of the same elements
and nothing else.
Parameters
----------
x : array-like, shape=(n_samples,)
The time series vector.
Examples
--------
>>> import numpy as np
>>> x = np.array([1, 2, 3])
>>> y = np.ones(3)
>>> [is_constant(x), is_constant(y)]
[False, True]
"""
x = column_or_1d(x) # type: np.ndarray
return (x == x[0]).all()
def nsdiffs(x, m, max_D=2, test='ocsb', **kwargs):
"""Estimate the seasonal differencing term, ``D``.
Perform a test of seasonality for different levels of ``D`` to
estimate the number of seasonal differences required to make a given time
series stationary. Will select the maximum value of ``D`` for which
the time series is judged seasonally stationary by the statistical test.
Parameters
----------
x : array-like, shape=(n_samples, [n_features])
The array to difference.
m : int
The number of seasonal periods (i.e., frequency of the
time series)
max_D : int, optional (default=2)
Maximum number of seasonal differences allowed. Must
be a positive integer. The estimated value of ``D`` will not
exceed ``max_D``.
test : str, optional (default='ocsb')
Type of unit root test of seasonality to use in order
to detect seasonal periodicity. Valid tests include ("ocsb", "ch").
Note that the CHTest is very slow for large data.
Returns
-------
D : int
The estimated seasonal differencing term. This is the maximum value
of ``D`` such that ``D <= max_D`` and the time series is judged
seasonally stationary. If the time series is constant, will return 0.
"""
if max_D <= 0:
raise ValueError('max_D must be a positive integer')
# get the test - this validates m internally
testfunc = get_callable(test, VALID_STESTS)(m, **kwargs)\
.estimate_seasonal_differencing_term
x = check_endog(x, dtype=DTYPE, copy=False)
if is_constant(x):
return 0
D = 0
dodiff = testfunc(x)
while dodiff == 1 and D < max_D:
D += 1
x = diff(x, lag=m)
if is_constant(x):
return D
# Issue 351: if the differenced array is now shorter than the seasonal
# periodicity, we need to bail out now.
if len(x) < m:
warnings.warn("Appropriate D value may not have been reached; "
"length of seasonally-differenced array (%i) is "
"shorter than m (%i). Using D=%i"
% (len(x), m, D))
return D
dodiff = testfunc(x)
return D
def ndiffs(x, alpha=0.05, test='kpss', max_d=2, **kwargs):
"""Estimate ARIMA differencing term, ``d``.
Perform a test of stationarity for different levels of ``d`` to
estimate the number of differences required to make a given time
series stationary. Will select the maximum value of ``d`` for which
the time series is judged stationary by the statistical test.
Parameters
----------
x : array-like, shape=(n_samples, [n_features])
The array (time series) to difference.
alpha : float, optional (default=0.05)
Level of the test. This is the value above below which the P-value
will be deemed significant.
test : str, optional (default='kpss')
Type of unit root test of stationarity to use in order to
test the stationarity of the time-series. One of ('kpss', 'adf', 'pp')
max_d : int, optional (default=2)
Maximum number of non-seasonal differences allowed. Must
be a positive integer. The estimated value of ``d`` will not
exceed ``max_d``.
Returns
-------
d : int
The estimated differencing term. This is the maximum value of ``d``
such that ``d <= max_d`` and the time series is judged stationary.
If the time series is constant, will return 0.
References
----------
.. [1] R's auto_arima ndiffs function
https://github.com/robjhyndman/forecast/blob/19b0711e554524bf6435b7524517715658c07699/R/arima.R#L132 # noqa: E501
"""
if max_d <= 0:
raise ValueError('max_d must be a positive integer')
# get the test
testfunc = get_callable(test, VALID_TESTS)(alpha, **kwargs).should_diff
x = check_endog(x, dtype=DTYPE, copy=False)
# base case, if constant return 0
d = 0
if is_constant(x):
return d
with ctx.except_and_reraise(
np.linalg.LinAlgError,
raise_err=ValueError,
raise_msg="Encountered exception in stationarity test (%r). "
"This can occur in seasonal settings when a large "
"enough `m` coupled with a large enough `D` difference "
"the training array into too few samples for OLS "
"(input contains %i samples). Try fitting on a larger "
"training size" % (test, len(x)),
):
# get initial diff
pval, dodiff = testfunc(x)
# if initially NaN, return 0
if np.isnan(pval):
return 0 # (d is zero, but this is more explicit to the reader)
# Begin loop.
while dodiff and d < max_d:
d += 1
# do differencing
x = diff(x)
if is_constant(x):
return d
# get new result
pval, dodiff = testfunc(x)
# if it's NaN now, take the last non-null one
if np.isnan(pval):
return d - 1
# when d >= max_d
return d
|
tgsmith61591/pyramid
|
pmdarima/arima/utils.py
|
Python
|
mit
| 6,450
|
[
"ADF"
] |
5ce17556bca869f8cc59407a9e202a5961c44134e328d7028e859d794ea5f5e6
|
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs cassandra.
Cassandra homepage: http://cassandra.apache.org
cassandra-stress tool page:
http://docs.datastax.com/en/cassandra/2.1/cassandra/tools/toolsCStress_t.html
"""
import collections
import functools
import logging
import math
import posixpath
import time
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import cassandra
NUM_KEYS_PER_CORE = 2000000
PROPAGATION_WAIT_TIME = 30
# cassandra-stress command
WRITE_COMMAND = 'write'
COUNTER_WRITE_COMMAND = 'counter_write'
USER_COMMAND = 'user'
READ_COMMAND = 'read'
COUNTER_READ_COMMAND = 'counter_read'
MIXED_COMMAND = 'mixed'
PRELOAD_REQUIRED = (READ_COMMAND, COUNTER_READ_COMMAND, MIXED_COMMAND)
# cassandra-stress command [options]
flags.DEFINE_enum('cassandra_stress_command', WRITE_COMMAND,
[WRITE_COMMAND,
COUNTER_WRITE_COMMAND,
USER_COMMAND,
READ_COMMAND,
COUNTER_READ_COMMAND,
MIXED_COMMAND],
'cassandra-stress command to use.')
flags.DEFINE_integer('cassandra_stress_preload_num_keys', None,
'Number of keys to preload into cassandra database. '
'Read/counter_read/mixed modes require preloading '
'cassandra database. If not set, the number of the keys '
'preloaded will be the same as --num_keys for '
'read/counter_read/mixed mode, the same as the number of '
'loaders for write/counter_write/user mode.')
# Options for cassandra-stress
flags.DEFINE_integer('num_keys', 0,
'Number of keys used in cassandra-stress tool across '
'all loader vms. If unset, this benchmark will use '
'%s * num_cpus on data nodes as the value.'
% NUM_KEYS_PER_CORE)
flags.DEFINE_integer('num_cassandra_stress_threads', 150,
'Number of threads used in cassandra-stress tool '
'on each loader node.')
flags.DEFINE_integer('cassandra_stress_replication_factor', 3,
'Number of replicas.')
flags.DEFINE_enum('cassandra_stress_consistency_level', 'QUORUM',
['ONE', 'QUORUM', 'LOCAL_ONE', 'LOCAL_QUORUM',
'EACH_QUORUM', 'ALL', 'ANY'],
'Set the consistency level to use during cassandra-stress.')
flags.DEFINE_integer('cassandra_stress_retries', 1000,
'Number of retries when error encountered during stress.')
# Use "./cassandra-stress help -pop" to get more details.
# [dist=DIST(?)]: Seeds are selected from this distribution
# EXP(min..max):
# An exponential distribution over the range [min..max]
# EXTREME(min..max,shape):
# An extreme value (Weibull) distribution over the range [min..max]
# QEXTREME(min..max,shape,quantas):
# An extreme value, split into quantas, within which the chance of
# selection is uniform
# GAUSSIAN(min..max,stdvrng):
# A gaussian/normal distribution, where mean=(min+max)/2, and stdev
# is (mean-min)/stdvrng
# GAUSSIAN(min..max,mean,stdev):
# A gaussian/normal distribution, with explicitly defined mean and stdev
# UNIFORM(min..max):
# A uniform distribution over the range [min, max]
# Preceding the name with ~ will invert the distribution,
# e.g. ~EXP(1..10) will yield 10 most, instead of least, often.
flags.DEFINE_enum('cassandra_stress_population_distribution', None,
['EXP', 'EXTREME', 'QEXTREME', 'GAUSSIAN', 'UNIFORM',
'~EXP', '~EXTREME', '~QEXTREME', '~GAUSSIAN', '~UNIFORM'],
'The population distribution cassandra-stress uses. '
'By default, each loader vm is given a range of keys '
'[min, max], and loaders will read/insert keys sequentially '
'from min to max.')
flags.DEFINE_integer('cassandra_stress_population_size', None,
'The size of the population across all clients. '
'By default, the size of the population equals to '
'max(num_keys,cassandra_stress_preload_num_keys).')
flags.DEFINE_list('cassandra_stress_population_parameters', [],
'Additional parameters to use with distribution. '
'This benchmark will calculate min, max for each '
'distribution. Some distributions need more parameters. '
'See: "./cassandra-stress help -pop" for more details. '
'Comma-separated list.')
# Options to use with cassandra-stress mixed mode, below flags only matter if
# --cassandra_stress_command=mixed.
flags.DEFINE_string('cassandra_stress_mixed_ratio', 'write=1,read=1',
'Read/write ratio of cassandra-stress. Only valid if '
'--cassandra_stress_command=mixed. By default, '
'50% read and 50% write.')
# Options to use with cassandra-stress user mode, below flags only matter if
# --cassandra_stress_command=user.
# http://www.datastax.com/dev/blog/improved-cassandra-2-1-stress-tool-benchmark-any-schema
flags.DEFINE_string('cassandra_stress_profile', '',
'Path to cassandra-stress profile file. '
'Only valid if --cassandra_stress_command=user.')
flags.DEFINE_string('cassandra_stress_operations', 'insert=1',
'Specify what operations (inserts and/or queries) to '
'run and the ratio of each operation. '
'Only valid if --cassandra_stress_command=user.')
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'cassandra_stress'
BENCHMARK_CONFIG = """
cassandra_stress:
description: Benchmark Cassandra using cassandra-stress
vm_groups:
workers:
vm_spec: *default_single_core
disk_spec: *default_500_gb
vm_count: 3
client:
vm_spec: *default_single_core
"""
CASSANDRA_GROUP = 'workers'
CLIENT_GROUP = 'client'
SLEEP_BETWEEN_CHECK_IN_SECONDS = 5
TEMP_PROFILE_PATH = posixpath.join(vm_util.VM_TMP_DIR, 'profile.yaml')
CASSANDRA_STRESS = posixpath.join(cassandra.CASSANDRA_DIR, 'tools', 'bin',
'cassandra-stress')
# Results documentation:
# http://docs.datastax.com/en/cassandra/2.1/cassandra/tools/toolsCStressOutput_c.html
RESULTS_METRICS = (
'op rate', # Number of operations per second performed during the run.
'partition rate', # Number of partition operations per second performed
# during the run.
'row rate', # Number of row operations per second performed during the run.
'latency mean', # Average latency in milliseconds for each operation during
# that run.
'latency median', # Median latency in milliseconds for each operation
# during that run.
'latency 95th percentile', # 95% of the time the latency was less than
# the number displayed in the column.
'latency 99th percentile', # 99% of the time the latency was less than
# the number displayed in the column.
'latency 99.9th percentile', # 99.9% of the time the latency was less than
# the number displayed in the column.
'latency max', # Maximum latency in milliseconds.
'Total partitions', # Number of partitions.
'Total errors', # Number of errors.
'Total operation time') # Total operation time.
# Metrics are aggregated between client vms.
AGGREGATED_METRICS = {'op rate', 'partition rate', 'row rate',
'Total partitions', 'Total errors'}
# Maximum value will be choisen between client vms.
MAXIMUM_METRICS = {'latency max'}
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def CheckPrerequisites():
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
cassandra.CheckPrerequisites()
if FLAGS.cassandra_stress_command == USER_COMMAND:
data.ResourcePath(FLAGS.cassandra_stress_profile)
def CheckMetadata(metadata):
"""Verify that metadata is valid.
Args:
metadata: dict. Contains metadata for this benchmark.
"""
if metadata['command'] in PRELOAD_REQUIRED:
if metadata['population_size'] > metadata['num_preload_keys']:
raise errors.Benchmarks.PrepareException(
'For %s modes, number of preloaded keys must be larger than or '
'equal to population size.', PRELOAD_REQUIRED)
def GenerateMetadataFromFlags(benchmark_spec):
"""Generate metadata from command-line flags.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
dict. Contains metadata for this benchmark.
"""
vm_dict = benchmark_spec.vm_groups
metadata = {}
if not FLAGS.num_keys:
metadata['num_keys'] = (
NUM_KEYS_PER_CORE * vm_dict[CASSANDRA_GROUP][0].num_cpus)
else:
metadata['num_keys'] = FLAGS.num_keys
if FLAGS['cassandra_stress_preload_num_keys'].present:
metadata['num_preload_keys'] = FLAGS.cassandra_stress_preload_num_keys
elif FLAGS.cassandra_stress_command in PRELOAD_REQUIRED:
metadata['num_preload_keys'] = metadata['num_keys']
else:
metadata['num_preload_keys'] = len(vm_dict[CLIENT_GROUP])
metadata.update({
'num_data_nodes': len(vm_dict[CASSANDRA_GROUP]),
'num_loader_nodes': len(vm_dict[CLIENT_GROUP]),
'num_cassandra_stress_threads': FLAGS.num_cassandra_stress_threads,
'command': FLAGS.cassandra_stress_command,
'consistency_level': FLAGS.cassandra_stress_consistency_level,
'retries': FLAGS.cassandra_stress_retries,
'population_size': (FLAGS.cassandra_stress_population_size or
max(metadata['num_keys'],
metadata['num_preload_keys'])),
'population_dist': FLAGS.cassandra_stress_population_distribution,
'population_parameters': ','.join(
FLAGS.cassandra_stress_population_parameters)})
if FLAGS.cassandra_stress_command == USER_COMMAND:
metadata.update({
'profile': FLAGS.cassandra_stress_profile,
'operations': FLAGS.cassandra_stress_operations})
else:
if FLAGS.cassandra_stress_command == MIXED_COMMAND:
metadata['mixed_ratio'] = FLAGS.cassandra_stress_mixed_ratio
metadata['replication_factor'] = FLAGS.cassandra_stress_replication_factor
logging.info('Metadata: %s', metadata)
return metadata
def PreloadCassandraServer(benchmark_spec, metadata):
"""Preload cassandra cluster if necessary.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
metadata: dict. Contains metadata for this benchmark.
"""
if (FLAGS.cassandra_stress_command == 'read' or
FLAGS.cassandra_stress_command == 'mixed'):
cassandra_stress_command = 'write'
elif FLAGS.cassandra_stress_command == 'counter_read':
cassandra_stress_command = 'counter_write'
else:
cassandra_stress_command = FLAGS.cassandra_stress_command
logging.info('Preloading cassandra database with %s %s operations.',
metadata['num_preload_keys'], cassandra_stress_command)
RunCassandraStressTest(
benchmark_spec.vm_groups[CASSANDRA_GROUP],
benchmark_spec.vm_groups[CLIENT_GROUP],
metadata['num_preload_keys'], cassandra_stress_command)
logging.info('Waiting %s for keyspace to propagate.', PROPAGATION_WAIT_TIME)
time.sleep(PROPAGATION_WAIT_TIME)
def Prepare(benchmark_spec):
"""Install Cassandra and Java on target vms.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vm_dict = benchmark_spec.vm_groups
cassandra_vms = vm_dict[CASSANDRA_GROUP]
logging.info('VM dictionary %s', vm_dict)
logging.info('Authorizing loader[0] permission to access all other vms.')
vm_dict[CLIENT_GROUP][0].AuthenticateVm()
logging.info('Preparing data files and Java on all vms.')
vm_util.RunThreaded(lambda vm: vm.Install('cassandra'), benchmark_spec.vms)
seed_vm = cassandra_vms[0]
configure = functools.partial(cassandra.Configure, seed_vms=[seed_vm])
vm_util.RunThreaded(configure, cassandra_vms)
cassandra.StartCluster(seed_vm, cassandra_vms[1:])
if FLAGS.cassandra_stress_command == USER_COMMAND:
for vm in vm_dict[CLIENT_GROUP]:
vm.PushFile(FLAGS.cassandra_stress_profile,
TEMP_PROFILE_PATH)
metadata = GenerateMetadataFromFlags(benchmark_spec)
if metadata['num_preload_keys']:
CheckMetadata(metadata)
PreloadCassandraServer(benchmark_spec, metadata)
def _ResultFilePath(vm):
return posixpath.join(vm_util.VM_TMP_DIR,
vm.hostname + '.stress_results.txt')
def RunTestOnLoader(vm, loader_index, operations_per_vm, data_node_ips,
command, user_operations, population_per_vm,
population_dist, population_params):
"""Run Cassandra-stress test on loader node.
Args:
vm: The target vm.
loader_index: integer. The index of target vm in loader vms.
operations_per_vm: integer. The number of operations each loader vm
requests.
data_node_ips: list. List of IP addresses for all data nodes.
command: string. The cassandra-stress command to use.
user_operations: string. The operations to use with user mode.
population_per_vm: integer. Population per loader vm.
population_dist: string. The population distribution.
population_params: string. Representing additional population parameters.
"""
if command == USER_COMMAND:
command += ' profile={profile} ops\({ops}\)'.format(
profile=TEMP_PROFILE_PATH, ops=user_operations)
schema_option = ''
else:
if command == MIXED_COMMAND:
command += ' ratio\({ratio}\)'.format(
ratio=FLAGS.cassandra_stress_mixed_ratio)
# TODO: Support more complex replication strategy.
schema_option = '-schema replication\(factor={replication_factor}\)'.format(
replication_factor=FLAGS.cassandra_stress_replication_factor)
population_range = '%s..%s' % (loader_index * population_per_vm + 1,
(loader_index + 1) * population_per_vm)
if population_params:
population_params = '%s,%s' % (population_range, population_params)
else:
population_params = population_range
if population_dist:
population_dist = '-pop dist=%s\(%s\)' % (population_dist,
population_params)
else:
population_dist = '-pop seq=%s' % population_params
vm.RobustRemoteCommand(
'{cassandra} {command} cl={consistency_level} n={num_keys} '
'-node {nodes} {schema} {population_dist} '
'-log file={result_file} -rate threads={threads} '
'-errors retries={retries}'.format(
cassandra=CASSANDRA_STRESS,
command=command,
consistency_level=FLAGS.cassandra_stress_consistency_level,
num_keys=operations_per_vm,
nodes=','.join(data_node_ips),
schema=schema_option,
population_dist=population_dist,
result_file=_ResultFilePath(vm),
retries=FLAGS.cassandra_stress_retries,
threads=FLAGS.num_cassandra_stress_threads))
def RunCassandraStressTest(cassandra_vms, loader_vms, num_operations,
command, profile_operations='insert=1',
population_size=None, population_dist=None,
population_params=None):
"""Start all loader nodes as Cassandra clients and run stress test.
Args:
cassandra_vms: list. A list of vm objects. Cassandra servers.
load_vms: list. A list of vm objects. Cassandra clients.
num_keys: integer. The number of operations cassandra-stress clients should
issue.
command: string. The cassandra-stress command to use.
profile_operations: string. The operations to use with user mode.
population_size: integer. The population size.
population_dist: string. The population distribution.
population_params: string. Representing additional population parameters.
"""
num_loaders = len(loader_vms)
data_node_ips = [vm.internal_ip for vm in cassandra_vms]
population_size = population_size or num_operations
operations_per_vm = int(math.ceil(float(num_operations) / num_loaders))
population_per_vm = population_size / num_loaders
if num_operations % num_loaders:
logging.warn(
'Total number of operations rounded to %s '
'(%s operations per loader vm).',
operations_per_vm * num_loaders, operations_per_vm)
logging.info('Executing the benchmark.')
args = [((loader_vms[i], i, operations_per_vm, data_node_ips,
command, profile_operations, population_per_vm,
population_dist, population_params), {})
for i in xrange(0, num_loaders)]
vm_util.RunThreaded(RunTestOnLoader, args)
def CollectResultFile(vm, results):
"""Collect result file on vm.
Args:
vm: The target vm.
results: A dictionary of lists. Each list contains results of a field
defined in RESULTS_METRICS collected from each loader machines.
"""
result_path = _ResultFilePath(vm)
vm.PullFile(vm_util.GetTempDir(), result_path)
resp, _ = vm.RemoteCommand('tail -n 20 ' + result_path)
for metric in RESULTS_METRICS:
value = regex_util.ExtractGroup(r'%s[\t ]+: ([\d\.:]+)' % metric, resp)
if metric == RESULTS_METRICS[-1]: # Total operation time
value = value.split(':')
results[metric].append(
int(value[0]) * 3600 + int(value[1]) * 60 + int(value[2]))
else:
results[metric].append(float(value))
def CollectResults(benchmark_spec, metadata):
"""Collect and parse test results.
Args:
benchmark_spec: The benchmark specification. Contains all data
that is required to run the benchmark.
metadata: dict. Contains metadata for this benchmark.
Returns:
A list of sample.Sample objects.
"""
logging.info('Gathering results.')
vm_dict = benchmark_spec.vm_groups
loader_vms = vm_dict[CLIENT_GROUP]
raw_results = collections.defaultdict(list)
args = [((vm, raw_results), {}) for vm in loader_vms]
vm_util.RunThreaded(CollectResultFile, args)
results = []
for metric in RESULTS_METRICS:
if metric in MAXIMUM_METRICS:
value = max(raw_results[metric])
else:
value = math.fsum(raw_results[metric])
if metric not in AGGREGATED_METRICS:
value = value / len(loader_vms)
if metric.startswith('latency'):
unit = 'ms'
elif metric.endswith('rate'):
unit = 'operations per second'
elif metric == 'Total operation time':
unit = 'seconds'
results.append(sample.Sample(metric, value, unit, metadata))
logging.info('Cassandra results:\n%s', results)
return results
def Run(benchmark_spec):
"""Run Cassandra on target vms.
Args:
benchmark_spec: The benchmark specification. Contains all data
that is required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
metadata = GenerateMetadataFromFlags(benchmark_spec)
RunCassandraStressTest(
benchmark_spec.vm_groups[CASSANDRA_GROUP],
benchmark_spec.vm_groups[CLIENT_GROUP],
metadata['num_keys'],
metadata['command'],
metadata.get('operations'),
metadata['population_size'],
metadata['population_dist'],
metadata['population_parameters'])
return CollectResults(benchmark_spec, metadata)
def Cleanup(benchmark_spec):
"""Cleanup function.
Args:
benchmark_spec: The benchmark specification. Contains all data
that is required to run the benchmark.
"""
vm_dict = benchmark_spec.vm_groups
cassandra_vms = vm_dict[CASSANDRA_GROUP]
vm_util.RunThreaded(cassandra.Stop, cassandra_vms)
vm_util.RunThreaded(cassandra.CleanNode, cassandra_vms)
|
akshara775/PerfKitBenchmarker-master-2
|
perfkitbenchmarker/linux_benchmarks/cassandra_stress_benchmark.py
|
Python
|
apache-2.0
| 20,941
|
[
"Gaussian"
] |
7f509f57e6be959aeb67aa04f11b8be1f54fa5f8e6c913a7c56d57ba476dc412
|
# coding: utf8
{
' Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': ' Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.',
' by ': ' by ',
' is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities.': ' is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities.',
' on ': ' on ',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'# of Houses Damaged': '# of Houses Damaged',
'# of Houses Destroyed': '# of Houses Destroyed',
'# of People Affected': '# of People Affected',
'# of People Deceased': '# of People Deceased',
'# of People Injured': '# of People Injured',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'%s rows deleted': '%s rows deleted',
'%s rows updated': '%s rows updated',
"'Sounds-like' name search allowing search even the spelling of the name is not known exactly": "'Sounds-like' name search allowing search even the spelling of the name is not known exactly",
'(Constraints Only)': '(Constraints Only)',
') & then click on the map below to adjust the Lat/Lon fields:': ') & then click on the map below to adjust the Lat/Lon fields:',
'* Required Fields': '* Required Fields',
'0-15 minutes': '0-15 minutes',
'1 Assessment': '1 Assessment',
'1-3 days': '1-3 days',
'1. Fill the necessary fields in BLOCK letters.': '1. Fill the necessary fields in BLOCK letters.',
'15-30 minutes': '15-30 minutes',
'2. Always use one box per letter and leave one box space to seperate words.': '2. Always use one box per letter and leave one box space to seperate words.',
'2x4 Car': '2x4 Car',
'30-60 minutes': '30-60 minutes',
'4-7 days': '4-7 days',
'4x4 Car': '4x4 Car',
'8-14 days': '8-14 days',
'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.',
'A Warehouse/Site is a physical location with an address and GIS data where Items are Stored. It can be a Building, a particular area in a city or anything similar.': 'A Warehouse/Site is a physical location with an address and GIS data where Items are Stored. It can be a Building, a particular area in a city or anything similar.',
'A brief description of the group (optional)': 'A brief description of the group (optional)',
'A file downloaded from a GPS containing a series of geographic points in XML format.': 'A file downloaded from a GPS containing a series of geographic points in XML format.',
'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.',
'A library of digital resources, such as photos, documents and reports': 'A library of digital resources, such as photos, documents and reports',
'A place within a Site like a Shelf, room, bin number etc.': 'A place within a Site like a Shelf, room, bin number etc.',
'A snapshot of the bin or additional documents that contain supplementary information about it can be uploaded here.': 'A snapshot of the bin or additional documents that contain supplementary information about it can be uploaded here.',
'A snapshot of the location or additional documents that contain supplementary information about the Site Location can be uploaded here.': 'A snapshot of the location or additional documents that contain supplementary information about the Site Location can be uploaded here.',
'A snapshot of the location or additional documents that contain supplementary information about the Site can be uploaded here.': 'A snapshot of the location or additional documents that contain supplementary information about the Site can be uploaded here.',
'A survey series with id %s does not exist. Please go back and create one.': 'A survey series with id %s does not exist. Please go back and create one.',
'ABOUT': 'ABOUT',
'ABOUT THIS MODULE': 'ABOUT THIS MODULE',
'ACCESS DATA': 'ACCESS DATA',
'API is documented here': 'API is documented here',
'Ability to Fill Out Surveys': 'Ability to Fill Out Surveys',
'Ability to customize the list of details tracked at a Shelter': 'Ability to customize the list of details tracked at a Shelter',
'Ability to customize the list of human resource tracked at a Shelter': 'Ability to customize the list of human resource tracked at a Shelter',
'Ability to customize the list of important facilities needed at a Shelter': 'Ability to customize the list of important facilities needed at a Shelter',
'Ability to track partial fulfillment of the request': 'Ability to track partial fulfillment of the request',
'Ability to view Results of Completed and/or partially filled out Surveys': 'Ability to view Results of Completed and/or partially filled out Surveys',
'About': 'کے بارے میں',
'About Sahana': 'About Sahana',
'About Sahana Eden': 'About Sahana Eden',
'About this module': 'About this module',
'Access denied': 'رسائی نہیں ہوئی',
'Accessibility of Affected Location': 'Accessibility of Affected Location',
'Acronym': 'Acronym',
'Actionable': 'Actionable',
'Actionable by all targeted recipients': 'Actionable by all targeted recipients',
'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': 'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>',
'Active Problems': 'Active Problems',
'Activities': 'کاروائیاں',
'Activities of boys 13-17yrs before disaster': 'Activities of boys 13-17yrs before disaster',
'Activities of boys 13-17yrs now': 'Activities of boys 13-17yrs now',
'Activities of boys <12yrs before disaster': 'Activities of boys <12yrs before disaster',
'Activities of boys <12yrs now': 'Activities of boys <12yrs now',
'Activities of girls 13-17yrs before disaster': 'Activities of girls 13-17yrs before disaster',
'Activities of girls 13-17yrs now': 'Activities of girls 13-17yrs now',
'Activities of girls <12yrs before disaster': 'Activities of girls <12yrs before disaster',
'Activities of girls <12yrs now': 'Activities of girls <12yrs now',
'Activity': 'Activity',
'Activity Added': 'Activity Added',
'Activity Deleted': 'Activity Deleted',
'Activity Details': 'Activity Details',
'Activity Report': 'Activity Report',
'Activity Reports': 'Activity Reports',
'Activity Type': 'Activity Type',
'Activity Updated': 'Activity Updated',
'Add': 'اضافہ کریں',
'Add Activity': 'Add Activity',
'Add Activity Report': 'Add Activity Report',
'Add Activity Type': 'Add Activity Type',
'Add Address': 'Add Address',
'Add Aid Request': 'Add Aid Request',
'Add Assessment': 'Add Assessment',
'Add Bin Type': 'Add Bin Type',
'Add Bins': 'Add Bins',
'Add Budget': 'Add Budget',
'Add Bundle': 'Add Bundle',
'Add Catalog': 'Add Catalog',
'Add Catalog.': 'Add Catalog.',
'Add Category': 'Add Category',
'Add Category<>Sub-Category<>Catalog Relation': 'Add Category<>Sub-Category<>Catalog Relation',
'Add Category<>Sub-Category<>Catalog Relation ': 'Add Category<>Sub-Category<>Catalog Relation ',
'Add Cluster': 'Add Cluster',
'Add Config': 'Add Config',
'Add Contact': 'Add Contact',
'Add Contact Information': 'Add Contact Information',
'Add Disaster Victims': 'Add Disaster Victims',
'Add Distribution': 'Add Distribution',
'Add Distribution.': 'Add Distribution.',
'Add Donor': 'Add Donor',
'Add Feature Class': 'Add Feature Class',
'Add Feature Layer': 'Add Feature Layer',
'Add Flood Report': 'Add Flood Report',
'Add Group': 'Add Group',
'Add Group Member': 'Add Group Member',
'Add Hospital': 'Add Hospital',
'Add Identification Report': 'Add Identification Report',
'Add Identity': 'Add Identity',
'Add Image': 'Add Image',
'Add Incident': 'Add Incident',
'Add Incident Report': 'Add Incident Report',
'Add Inventory Item': 'Add Inventory Item',
'Add Inventory Store': 'Add Inventory Store',
'Add Item': 'Add Item',
'Add Item (s)': 'Add Item (s)',
'Add Item Catalog': 'Add Item Catalog',
'Add Item Catalog ': 'Add Item Catalog ',
'Add Item Catalog Category ': 'Add Item Catalog Category ',
'Add Item Category': 'Add Item Category',
'Add Item Sub-Category': 'Add Item Sub-Category',
'Add Item Sub-Category ': 'Add Item Sub-Category ',
'Add Key': 'Add Key',
'Add Kit': 'Add Kit',
'Add Layer': 'Add Layer',
'Add Line': 'Add Line',
'Add Location': 'Add Location',
'Add Locations': 'Add Locations',
'Add Log Entry': 'Add Log Entry',
'Add Member': 'Add Member',
'Add Membership': 'Add Membership',
'Add Message': 'Add Message',
'Add Metadata': 'Add Metadata',
'Add New': 'Add New',
'Add New ': 'Add New ',
'Add New Activity': 'Add New Activity',
'Add New Address': 'Add New Address',
'Add New Aid Request': 'Add New Aid Request',
'Add New Assessment': 'Add New Assessment',
'Add New Bin': 'Add New Bin',
'Add New Bin Type': 'Add New Bin Type',
'Add New Budget': 'Add New Budget',
'Add New Bundle': 'Add New Bundle',
'Add New Cluster': 'Add New Cluster',
'Add New Config': 'Add New Config',
'Add New Contact': 'Add New Contact',
'Add New Distribution': 'Add New Distribution',
'Add New Distribution Item': 'Add New Distribution Item',
'Add New Document': 'Add New Document',
'Add New Donor': 'Add New Donor',
'Add New Entry': 'Add New Entry',
'Add New Feature Class': 'Add New Feature Class',
'Add New Feature Layer': 'Add New Feature Layer',
'Add New Flood Report': 'Add New Flood Report',
'Add New Group': 'Add New Group',
'Add New Hospital': 'Add New Hospital',
'Add New Identity': 'Add New Identity',
'Add New Image': 'Add New Image',
'Add New Incident': 'Add New Incident',
'Add New Incident Report': 'Add New Incident Report',
'Add New Inventory Item': 'Add New Inventory Item',
'Add New Inventory Store': 'Add New Inventory Store',
'Add New Item': 'Add New Item',
'Add New Item Catalog': 'Add New Item Catalog',
'Add New Item Catalog Category': 'Add New Item Catalog Category',
'Add New Item Category': 'Add New Item Category',
'Add New Item Sub-Category': 'Add New Item Sub-Category',
'Add New Item to Kit': 'Add New Item to Kit',
'Add New Key': 'Add New Key',
'Add New Kit': 'Add New Kit',
'Add New Layer': 'Add New Layer',
'Add New Location': 'Add New Location',
'Add New Log Entry': 'Add New Log Entry',
'Add New Marker': 'Add New Marker',
'Add New Member': 'Add New Member',
'Add New Membership': 'Add New Membership',
'Add New Metadata': 'Add New Metadata',
'Add New Office': 'Add New Office',
'Add New Organization': 'Add New Organization',
'Add New Peer': 'Add New Peer',
'Add New Photo': 'Add New Photo',
'Add New Position': 'Add New Position',
'Add New Problem': 'Add New Problem',
'Add New Project': 'Add New Project',
'Add New Projection': 'Add New Projection',
'Add New Record': 'Add New Record',
'Add New Report': 'Add New Report',
'Add New Request': 'Add New Request',
'Add New Request Item': 'Add New Request Item',
'Add New Resource': 'Add New Resource',
'Add New Response': 'Add New Response',
'Add New River': 'Add New River',
'Add New Role': 'Add New Role',
'Add New Role to User': 'Add New Role to User',
'Add New School District': 'Add New School District',
'Add New School Report': 'Add New School Report',
'Add New Setting': 'Add New Setting',
'Add New Shelter': 'Add New Shelter',
'Add New Shelter Service': 'Add New Shelter Service',
'Add New Shelter Type': 'Add New Shelter Type',
'Add New Site': 'Add New Site',
'Add New Skill': 'Add New Skill',
'Add New Skill Type': 'Add New Skill Type',
'Add New Solution': 'Add New Solution',
'Add New Source': 'Add New Source',
'Add New Staff': 'Add New Staff',
'Add New Staff Type': 'Add New Staff Type',
'Add New Storage Location': 'Add New Storage Location',
'Add New Survey Answer': 'Add New Survey Answer',
'Add New Survey Question': 'Add New Survey Question',
'Add New Survey Section': 'Add New Survey Section',
'Add New Survey Series': 'Add New Survey Series',
'Add New Survey Template': 'Add New Survey Template',
'Add New Task': 'Add New Task',
'Add New Team': 'Add New Team',
'Add New Theme': 'Add New Theme',
'Add New Ticket': 'Add New Ticket',
'Add New Track': 'Add New Track',
'Add New Unit': 'Add New Unit',
'Add New User': 'Add New User',
'Add New User to Role': 'Add New User to Role',
'Add Office': 'Add Office',
'Add Organization': 'Add Organization',
'Add Peer': 'Add Peer',
'Add Person': 'Add Person',
'Add Personal Effects': 'Add Personal Effects',
'Add Photo': 'Add Photo',
'Add Point': 'Add Point',
'Add Polygon': 'Add Polygon',
'Add Position': 'Add Position',
'Add Problem': 'Add Problem',
'Add Project': 'Add Project',
'Add Projections': 'Add Projections',
'Add Question': 'Add Question',
'Add Recipient': 'Add Recipient',
'Add Recipient Site': 'Add Recipient Site',
'Add Recipient Site.': 'Add Recipient Site.',
'Add Record': 'Add Record',
'Add Recovery Report': 'Add Recovery Report',
'Add Reference Document': 'Add Reference Document',
'Add Report': 'Add Report',
'Add Request': 'Add Request',
'Add Request Detail': 'Add Request Detail',
'Add Request Item': 'Add Request Item',
'Add Resource': 'Add Resource',
'Add Response': 'Add Response',
'Add River': 'Add River',
'Add Role': 'Add Role',
'Add School District': 'Add School District',
'Add School Report': 'Add School Report',
'Add Section': 'Add Section',
'Add Sector': 'Add Sector',
'Add Sender Organization': 'Add Sender Organization',
'Add Sender Site': 'Add Sender Site',
'Add Sender Site.': 'Add Sender Site.',
'Add Service Profile': 'Add Service Profile',
'Add Setting': 'Add Setting',
'Add Shelter': 'Add Shelter',
'Add Shelter Service': 'Add Shelter Service',
'Add Shelter Type': 'Add Shelter Type',
'Add Shipment Transit Log': 'Add Shipment Transit Log',
'Add Shipment/Way Bills': 'Add Shipment/Way Bills',
'Add Site': 'Add Site',
'Add Skill': 'Add Skill',
'Add Skill Type': 'Add Skill Type',
'Add Skill Types': 'Add Skill Types',
'Add Solution': 'Add Solution',
'Add Source': 'Add Source',
'Add Staff': 'Add Staff',
'Add Staff Type': 'Add Staff Type',
'Add Storage Bin ': 'Add Storage Bin ',
'Add Storage Bin Type': 'Add Storage Bin Type',
'Add Storage Location': 'Add Storage Location',
'Add Storage Location ': 'Add Storage Location ',
'Add Sub-Category': 'Add Sub-Category',
'Add Subscription': 'Add Subscription',
'Add Survey Answer': 'Add Survey Answer',
'Add Survey Question': 'Add Survey Question',
'Add Survey Section': 'Add Survey Section',
'Add Survey Series': 'Add Survey Series',
'Add Survey Template': 'Add Survey Template',
'Add Task': 'Add Task',
'Add Team': 'Add Team',
'Add Theme': 'Add Theme',
'Add Ticket': 'Add Ticket',
'Add Unit': 'Add Unit',
'Add User': 'Add User',
'Add Volunteer Registration': 'Add Volunteer Registration',
'Add a Person': 'Add a Person',
'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.',
'Add a new Relief Item.': 'Add a new Relief Item.',
'Add a new Site from where the Item is being sent.': 'Add a new Site from where the Item is being sent.',
'Add a new Site where the Item is being sent to.': 'Add a new Site where the Item is being sent to.',
'Add an Photo.': 'Add an Photo.',
'Add main Item Category.': 'Add main Item Category.',
'Add main Item Sub-Category.': 'Add main Item Sub-Category.',
'Add new Group': 'Add new Group',
'Add new Individual': 'Add new Individual',
'Add new person.': 'Add new person.',
'Add new position.': 'Add new position.',
'Add new project.': 'Add new project.',
'Add new staff role.': 'Add new staff role.',
'Add the Storage Bin Type.': 'Add the Storage Bin Type.',
'Add the Storage Location where this bin is located.': 'Add the Storage Location where this bin is located.',
'Add the Storage Location where this this Bin belongs to.': 'Add the Storage Location where this this Bin belongs to.',
'Add the main Warehouse/Site information where this Bin belongs to.': 'Add the main Warehouse/Site information where this Bin belongs to.',
'Add the main Warehouse/Site information where this Item is to be added.': 'Add the main Warehouse/Site information where this Item is to be added.',
'Add the main Warehouse/Site information where this Storage location is.': 'Add the main Warehouse/Site information where this Storage location is.',
'Add the unit of measure if it doesnt exists already.': 'Add the unit of measure if it doesnt exists already.',
'Add to Bundle': 'Add to Bundle',
'Add to Catalog': 'Add to Catalog',
'Add to budget': 'Add to budget',
'Add/Edit/Remove Layers': 'Add/Edit/Remove Layers',
'Additional Beds / 24hrs': 'Additional Beds / 24hrs',
'Additional Comments': 'Additional Comments',
'Additional quantity quantifier – i.e. “4x5”.': 'Additional quantity quantifier – i.e. “4x5”.',
'Address': 'Address',
'Address Details': 'Address Details',
'Address Type': 'Address Type',
'Address added': 'Address added',
'Address deleted': 'Address deleted',
'Address updated': 'Address updated',
'Addresses': 'Addresses',
'Adequate': 'Adequate',
'Adequate food and water available': 'Adequate food and water available',
'Adjust Item(s) Quantity': 'Adjust Item(s) Quantity',
'Adjust Items due to Theft/Loss': 'Adjust Items due to Theft/Loss',
'Admin': 'Admin',
'Admin Email': 'Admin Email',
'Admin Name': 'Admin Name',
'Admin Tel': 'Admin Tel',
'Administration': 'Administration',
'Administrator': 'Administrator',
'Admissions/24hrs': 'Admissions/24hrs',
'Adolescent (12-20)': 'Adolescent (12-20)',
'Adolescent participating in coping activities': 'Adolescent participating in coping activities',
'Adult (21-50)': 'Adult (21-50)',
'Adult ICU': 'Adult ICU',
'Adult Psychiatric': 'Adult Psychiatric',
'Adult female': 'Adult female',
'Adult male': 'Adult male',
'Adults in prisons': 'Adults in prisons',
'Advanced Bin Search': 'Advanced Bin Search',
'Advanced Catalog Search': 'Advanced Catalog Search',
'Advanced Category Search': 'Advanced Category Search',
'Advanced Item Search': 'Advanced Item Search',
'Advanced Location Search': 'Advanced Location Search',
'Advanced Site Search': 'Advanced Site Search',
'Advanced Sub-Category Search': 'Advanced Sub-Category Search',
'Advanced Unit Search': 'Advanced Unit Search',
'Advanced:': 'Advanced:',
'Advisory': 'Advisory',
'Affectees Families settled in the school belong to district': 'Affectees Families settled in the school belong to district',
'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.',
'Age Group': 'Age Group',
'Age group': 'Age group',
'Age group does not match actual age.': 'Age group does not match actual age.',
'Aggregate Items': 'Aggregate Items',
'Agriculture': 'Agriculture',
'Aid Request': 'Aid Request',
'Aid Request Details': 'Aid Request Details',
'Aid Request added': 'Aid Request added',
'Aid Request deleted': 'Aid Request deleted',
'Aid Request updated': 'Aid Request updated',
'Aid Requests': 'Aid Requests',
'Air Transport Service': 'Air Transport Service',
'Air tajin': 'Air tajin',
'Aircraft Crash': 'Aircraft Crash',
'Aircraft Hijacking': 'Aircraft Hijacking',
'Airport Closure': 'Airport Closure',
'Airspace Closure': 'Airspace Closure',
'Alcohol': 'Alcohol',
'Alert': 'Alert',
'All Locations': 'All Locations',
'All Pledges': 'All Pledges',
'All Requested Items': 'All Requested Items',
'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': 'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.',
'Allowed to push': 'Allowed to push',
'Allows a Budget to be drawn up': 'Allows a Budget to be drawn up',
'Allows authorized users to control which layers are available to the situation map.': 'Allows authorized users to control which layers are available to the situation map.',
'Alternative infant nutrition in use': 'Alternative infant nutrition in use',
'Alternative places for studying': 'Alternative places for studying',
'Alternative places for studying available': 'Alternative places for studying available',
'Ambulance Service': 'Ambulance Service',
'An Inventory Store is a physical place which contains Relief Items available to be Distributed.': 'An Inventory Store is a physical place which contains Relief Items available to be Distributed.',
'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': 'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.',
'Analysis of Completed Surveys': 'Analysis of Completed Surveys',
'Animal Die Off': 'Animal Die Off',
'Animal Feed': 'Animal Feed',
'Animals': 'Animals',
'Answer Choices (One Per Line)': 'Answer Choices (One Per Line)',
'Anthropolgy': 'Anthropolgy',
'Any available Metadata in the files will be read automatically, such as Timestamp, Author, Latitude & Longitude.': 'Any available Metadata in the files will be read automatically, such as Timestamp, Author, Latitude & Longitude.',
'Any comments about this sync partner.': 'Any comments about this sync partner.',
'Apparent Age': 'Apparent Age',
'Apparent Gender': 'Apparent Gender',
'Appropriate clothing available': 'Appropriate clothing available',
'Appropriate cooking equipment/materials in HH': 'Appropriate cooking equipment/materials in HH',
'Approx. number of cases/48h': 'Approx. number of cases/48h',
'Approximately how many children under 5 with diarrhea in the past 48 hours?': 'Approximately how many children under 5 with diarrhea in the past 48 hours?',
'Archive not Delete': 'Archive not Delete',
'Arctic Outflow': 'Arctic Outflow',
'Are basic medical supplies available for health services since the disaster?': 'Are basic medical supplies available for health services since the disaster?',
'Are breast milk substitutes being used here since the disaster?': 'Are breast milk substitutes being used here since the disaster?',
'Are the areas that children, older people, and people with disabilities live in, play in and walk through on a daily basis physically safe?': 'Are the areas that children, older people, and people with disabilities live in, play in and walk through on a daily basis physically safe?',
'Are the chronically ill receiving sufficient care and assistance?': 'Are the chronically ill receiving sufficient care and assistance?',
'Are there adults living in prisons in this area?': 'Are there adults living in prisons in this area?',
'Are there alternative places for studying?': 'Are there alternative places for studying?',
'Are there cases of diarrhea among children under the age of 5?': 'Are there cases of diarrhea among children under the age of 5?',
'Are there children living in adult prisons in this area?': 'Are there children living in adult prisons in this area?',
'Are there children living in boarding schools in this area?': 'Are there children living in boarding schools in this area?',
'Are there children living in homes for disabled children in this area?': 'Are there children living in homes for disabled children in this area?',
'Are there children living in juvenile detention in this area?': 'Are there children living in juvenile detention in this area?',
'Are there children living in orphanages in this area?': 'Are there children living in orphanages in this area?',
'Are there children with chronical illnesses in your community?': 'Are there children with chronical illnesses in your community?',
'Are there health services functioning for the community since the disaster?': 'Are there health services functioning for the community since the disaster?',
'Are there older people living in care homes in this area?': 'Are there older people living in care homes in this area?',
'Are there older people with chronical illnesses in your community?': 'Are there older people with chronical illnesses in your community?',
'Are there people with chronical illnesses in your community?': 'Are there people with chronical illnesses in your community?',
'Are there separate latrines for women and men available?': 'Are there separate latrines for women and men available?',
'Are there staff present and caring for the residents in these institutions?': 'Are there staff present and caring for the residents in these institutions?',
'Area': 'Area',
'Assessment': 'Assessment',
'Assessment Details': 'Assessment Details',
'Assessment Type': 'Assessment Type',
'Assessment added': 'Assessment added',
'Assessment deleted': 'Assessment deleted',
'Assessment updated': 'Assessment updated',
'Assessments': 'تجزیے',
'Assessments are structured reports done by Professional Organizations': 'Assessments are structured reports done by Professional Organizations',
'Assessments are structured reports done by Professional Organizations - data includes WFP Assessments': 'Assessments are structured reports done by Professional Organizations - data includes WFP Assessments',
'Assign Storage Location': 'Assign Storage Location',
'Assigned': 'Assigned',
'Assigned To': 'Assigned To',
'Assigned to': 'Assigned to',
'Assistance for immediate repair/reconstruction of houses': 'Assistance for immediate repair/reconstruction of houses',
'Assistant': 'Assistant',
'At/Visited Location (not virtual)': 'At/Visited Location (not virtual)',
'Attend to information sources as described in <instruction>': 'Attend to information sources as described in <instruction>',
'Audit Read': 'Audit Read',
'Audit Write': 'Audit Write',
'Author': 'Author',
'Author: ': 'Author: ',
'Automotive': 'Automotive',
'Available Beds': 'Available Beds',
'Available Messages': 'Available Messages',
'Available Records': 'Available Records',
'Available databases and tables': 'Available databases and tables',
'Available from': 'Available from',
'Available in Viewer?': 'Available in Viewer?',
'Available until': 'Available until',
'Availablity': 'Availablity',
'Avalanche': 'Avalanche',
'Avoid the subject event as per the <instruction>': 'Avoid the subject event as per the <instruction>',
'Babies who are not being breastfed, what are they being fed on?': 'Babies who are not being breastfed, what are they being fed on?',
'Baby And Child Care': 'Baby And Child Care',
'Background Colour': 'Background Colour',
'Background Colour for Text blocks': 'Background Colour for Text blocks',
'Bahai': 'Bahai',
'Baldness': 'Baldness',
'Balochi': 'Balochi',
'Banana': 'Banana',
'Bank/micro finance': 'Bank/micro finance',
'Base Layer?': 'Base Layer?',
'Base Layers': 'Base Layers',
'Base Unit': 'Base Unit',
'Baseline Number of Beds': 'Baseline Number of Beds',
'Baseline number of beds of that type in this unit.': 'Baseline number of beds of that type in this unit.',
'Basic Details': 'Basic Details',
'Basic information on the requests and donations, such as category, the units, contact details and the status.': 'Basic information on the requests and donations, such as category, the units, contact details and the status.',
'Basic medical supplies available prior to disaster': 'Basic medical supplies available prior to disaster',
'Basic medical supplies available since disaster': 'Basic medical supplies available since disaster',
'Basic reports on the Shelter and drill-down by region': 'Basic reports on the Shelter and drill-down by region',
'Baud': 'Baud',
'Baud rate to use for your modem - The default is safe for most cases': 'Baud rate to use for your modem - The default is safe for most cases',
'Bed Capacity': 'Bed Capacity',
'Bed Capacity per Unit': 'Bed Capacity per Unit',
'Bed Type': 'Bed Type',
'Bedding materials available': 'Bedding materials available',
'Beneficiary Type': 'Beneficiary Type',
'Biological Hazard': 'Biological Hazard',
'Biscuits': 'بسکٹ',
'Blizzard': 'Blizzard',
'Blood Type (AB0)': 'Blood Type (AB0)',
'Blowing Snow': 'Blowing Snow',
'Boat': 'Boat',
'Bodies found': 'Bodies found',
'Bodies recovered': 'Bodies recovered',
'Body': 'Body',
'Body Recovery Reports': 'Body Recovery Reports',
'Body Recovery Request': 'Body Recovery Request',
'Body Recovery Requests': 'Body Recovery Requests',
'Bomb': 'Bomb',
'Bomb Explosion': 'Bomb Explosion',
'Bomb Threat': 'Bomb Threat',
'Border Colour for Text blocks': 'Border Colour for Text blocks',
'Boys 13-18 yrs in affected area': 'Boys 13-18 yrs in affected area',
'Boys 13-18 yrs not attending school': 'Boys 13-18 yrs not attending school',
'Boys 6-12 yrs in affected area': 'Boys 6-12 yrs in affected area',
'Boys 6-12 yrs not attending school': 'Boys 6-12 yrs not attending school',
'Breast milk substitutes in use since disaster': 'Breast milk substitutes in use since disaster',
'Breast milk substitutes used prior to disaster': 'Breast milk substitutes used prior to disaster',
'Bricks': 'اینٹوں',
'Bridge Closed': 'Bridge Closed',
'Brief Assessments': 'Brief Assessments',
'Bucket': 'Bucket',
'Buddhist': 'Buddhist',
'Budget': 'Budget',
'Budget Details': 'Budget Details',
'Budget Updated': 'Budget Updated',
'Budget added': 'Budget added',
'Budget deleted': 'Budget deleted',
'Budget updated': 'Budget updated',
'Budgeting Module': 'Budgeting Module',
'Budgets': 'Budgets',
'Buffer': 'Buffer',
'Building Aide': 'Building Aide',
'Building Collapsed': 'Building Collapsed',
'Bulk Uploader': 'Bulk Uploader',
'Bundle': 'Bundle',
'Bundle Contents': 'Bundle Contents',
'Bundle Details': 'Bundle Details',
'Bundle Updated': 'Bundle Updated',
'Bundle added': 'Bundle added',
'Bundle deleted': 'Bundle deleted',
'Bundle updated': 'Bundle updated',
'Bundles': 'Bundles',
'Burn': 'Burn',
'Burn ICU': 'Burn ICU',
'Burned/charred': 'Burned/charred',
'Business damaged': 'Business damaged',
'CBA Women': 'CBA Women',
'CSS file %s not writable - unable to apply theme!': 'CSS file %s not writable - unable to apply theme!',
'Calculate': 'Calculate',
'Can be grouped together into Feature Groups': 'Can be grouped together into Feature Groups',
'Can users register themselves for authenticated login access?': 'Can users register themselves for authenticated login access?',
'Cancel': 'منسوخ',
'Cancelled': 'Cancelled',
'Candidate Matches for Body %s': 'Candidate Matches for Body %s',
'Canned Fish': 'Canned Fish',
'Cannot be empty': 'Cannot be empty',
'Cannot delete whilst there are linked records. Please delete linked records first.': 'Cannot delete whilst there are linked records. Please delete linked records first.',
'Capacity (Max Persons)': 'Capacity (Max Persons)',
'Capacity (W x D X H)': 'Capacity (W x D X H)',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)',
'Capture Information on each disaster victim': 'Capture Information on each disaster victim',
'Capturing organizational information of a relief organization and all the projects they have in the region': 'Capturing organizational information of a relief organization and all the projects they have in the region',
'Capturing the essential services each Volunteer is providing and where': 'Capturing the essential services each Volunteer is providing and where',
'Capturing the projects each organization is providing and where': 'Capturing the projects each organization is providing and where',
'Cardiology': 'Cardiology',
'Cash available to restart business': 'Cash available to restart business',
'Cassava': 'Cassava',
'Casual Labor': 'Casual Labor',
'Catalog': 'Catalog',
'Catalog Name': 'Catalog Name',
'Category': 'زمرہ',
'Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog Relation',
'Category<>Sub-Category<>Catalog Relation added': 'Category<>Sub-Category<>Catalog Relation added',
'Category<>Sub-Category<>Catalog Relation deleted': 'Category<>Sub-Category<>Catalog Relation deleted',
'Category<>Sub-Category<>Catalog Relation updated': 'Category<>Sub-Category<>Catalog Relation updated',
'Central point to record details on People': 'Central point to record details on People',
'Change Password': 'پاس ورڈ تبدیل کریں',
'Check for errors in the URL, maybe the address was mistyped.': 'Check for errors in the URL, maybe the address was mistyped.',
'Check if the URL is pointing to a directory instead of a webpage.': 'Check if the URL is pointing to a directory instead of a webpage.',
'Check to delete': 'Check to delete',
'Check to delete:': 'Check to delete:',
'Checklist': 'Checklist',
'Checklist created': 'Checklist created',
'Checklist deleted': 'Checklist deleted',
'Checklist of Operations': 'Checklist of Operations',
'Checklist updated': 'Checklist updated',
'Chemical Hazard': 'Chemical Hazard',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': 'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack',
'Chicken': 'چکن',
'Child': 'بال',
'Child (2-11)': 'Child (2-11)',
'Child (< 18 yrs)': 'Child (< 18 yrs)',
'Child Abduction Emergency': 'Child Abduction Emergency',
'Child headed households (<18 yrs)': 'Child headed households (<18 yrs)',
'Children (2-5 years)': 'Children (2-5 years)',
'Children (5-15 years)': 'Children (5-15 years)',
'Children (< 2 years)': 'Children (< 2 years)',
'Children in adult prisons': 'Children in adult prisons',
'Children in boarding schools': 'Children in boarding schools',
'Children in homes for disabled children': 'Children in homes for disabled children',
'Children in juvenile detention': 'Children in juvenile detention',
'Children in orphanages': 'Children in orphanages',
'Children living on their own (without adults)': 'Children living on their own (without adults)',
'Children not enrolled in new school': 'Children not enrolled in new school',
'Children orphaned by the disaster': 'Children orphaned by the disaster',
'Children separated from their parents/caregivers': 'Children separated from their parents/caregivers',
'Children that have been sent to safe places': 'Children that have been sent to safe places',
'Children who have disappeared since the disaster': 'Children who have disappeared since the disaster',
'Children with chronical illnesses': 'Children with chronical illnesses',
'Chinese': 'Chinese',
'Chinese (Taiwan)': 'Chinese (Taiwan)',
'Choosing Skill and Resources of Volunteers': 'Choosing Skill and Resources of Volunteers',
'Christian': 'Christian',
'Church': 'Church',
'Circumstances of disappearance, other victims/witnesses who last saw the missing person alive.': 'Circumstances of disappearance, other victims/witnesses who last saw the missing person alive.',
'Civil Emergency': 'Civil Emergency',
'Clear Selection': 'Clear Selection',
"Click on 'Pledge' button in the left-hand column to make a Pledge to match a request for aid.": "Click on 'Pledge' button in the left-hand column to make a Pledge to match a request for aid.",
'Click on the link ': 'Click on the link ',
'Client IP': 'Client IP',
'Clinical Laboratory': 'Clinical Laboratory',
'Clinical Operations': 'Clinical Operations',
'Clinical Status': 'Clinical Status',
'Closed': 'بند کر دیا گیا',
'Closure': 'Closure',
'Clothing': 'Clothing',
'Cluster': 'Cluster',
'Cluster Details': 'Cluster Details',
'Cluster Distance': 'Cluster Distance',
'Cluster Threshold': 'Cluster Threshold',
'Cluster added': 'Cluster added',
'Cluster deleted': 'Cluster deleted',
'Cluster updated': 'Cluster updated',
'Cluster(s)': 'Cluster(s)',
'Clusters': 'Clusters',
'Code': 'کوڈ',
'Cold Wave': 'Cold Wave',
'Collective center': 'Collective center',
'Colour for Underline of Subheadings': 'Colour for Underline of Subheadings',
'Colour of Buttons when hovering': 'Colour of Buttons when hovering',
'Colour of bottom of Buttons when not pressed': 'Colour of bottom of Buttons when not pressed',
'Colour of bottom of Buttons when pressed': 'Colour of bottom of Buttons when pressed',
'Colour of dropdown menus': 'Colour of dropdown menus',
'Colour of selected Input fields': 'Colour of selected Input fields',
'Colour of selected menu items': 'Colour of selected menu items',
'Column Choices (One Per Line': 'Column Choices (One Per Line',
'Combined Method': 'Combined Method',
'Come back later.': 'Come back later.',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': 'Come back later. Everyone visiting this site is probably experiencing the same problem as you.',
'Comments': 'تبصرے',
'Commiting a changed spreadsheet to the database': 'Commiting a changed spreadsheet to the database',
'Community Centre': 'Community Centre',
'Community Health Center': 'Community Health Center',
'Community Member': 'Community Member',
'Complete Unit Label for e.g. meter for m.': 'Complete Unit Label for e.g. meter for m.',
'Completed': 'Completed',
'Complexion': 'Complexion',
'Compose': 'Compose',
'Compromised': 'Compromised',
'Config': 'Config',
'Config added': 'Config added',
'Config deleted': 'Config deleted',
'Config updated': 'Config updated',
'Configs': 'Configs',
'Configure Run-time Settings': 'Configure Run-time Settings',
'Confirmed': 'Confirmed',
'Confirmed Incidents': 'Confirmed Incidents',
'Conflict Resolution': 'Conflict Resolution',
'Consumable': 'Consumable',
'Contact': 'رابطہ کریں',
'Contact Data': 'Contact Data',
'Contact Details': 'Contact Details',
'Contact Information': 'Contact Information',
'Contact Method': 'Contact Method',
'Contact Person': 'Contact Person',
'Contact details': 'Contact details',
'Contact information added': 'Contact information added',
'Contact information deleted': 'Contact information deleted',
'Contact information updated': 'Contact information updated',
'Contact person in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': 'Contact person in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.',
'Contact us': 'ہم سے رابطہ کریں',
'Contacts': 'رابطہ',
'Contents': 'فہرست',
'Contradictory values!': 'Contradictory values!',
'Contributor': 'Contributor',
'Conversion Tool': 'Conversion Tool',
'Cooking Oil': 'کوکنگ آئل',
'Coordinate Conversion': 'Coordinate Conversion',
'Copy': 'کاپی',
'Copy any data from the one to be deleted into the one to keep': 'Copy any data from the one to be deleted into the one to keep',
'Corn': 'Corn',
'Cost Type': 'Cost Type',
'Cost per Megabyte': 'Cost per Megabyte',
'Cost per Minute': 'Cost per Minute',
'Country': 'ملک',
'Country of Residence': 'رہائش گاہ کے ملک',
'Create & manage Distribution groups to receive Alerts': 'Create & manage Distribution groups to receive Alerts',
'Create Checklist': 'Create Checklist',
'Create Group Entry': 'Create Group Entry',
'Create Import Job': 'Create Import Job',
'Create New Import Job': 'Create New Import Job',
'Create Request': 'Create Request',
'Create a group entry in the registry.': 'Create a group entry in the registry.',
'Creation of Surveys': 'Creation of Surveys',
'Crime': 'Crime',
'Criteria': 'Criteria',
'Currency': 'Currency',
'Current Group Members': 'Current Group Members',
'Current Identities': 'Current Identities',
'Current Log Entries': 'Current Log Entries',
'Current Memberships': 'Current Memberships',
'Current Registrations': 'Current Registrations',
'Current Team Members': 'Current Team Members',
'Current greatest needs of vulnerable groups': 'Current greatest needs of vulnerable groups',
'Current main income sources': 'Current main income sources',
'Current major expenses': 'Current major expenses',
'Current request': 'Current request',
'Current response': 'Current response',
'Current session': 'Current session',
'Current type of health problems, adults': 'Current type of health problems, adults',
'Current type of health problems, children': 'Current type of health problems, children',
'Current type of source for drinking water': 'Current type of source for drinking water',
'Current type of source for sanitary water': 'Current type of source for sanitary water',
'Custom Database Resource (e.g., anything defined as a resource in Sahana)': 'Custom Database Resource (e.g., anything defined as a resource in Sahana)',
'Customisable category of aid': 'Customisable category of aid',
'DNA Profile': 'DNA Profile',
'DNA Profiling': 'DNA Profiling',
'Daily': 'Daily',
'Dam Overflow': 'Dam Overflow',
'Dangerous Person': 'Dangerous Person',
'Dashboard': 'Dashboard',
'Data import policy': 'Data import policy',
'Data uploaded': 'Data uploaded',
'Database': 'Database',
'Date': 'تاریخ',
'Date & Time': 'تاریخ اور وقت',
'Date and Time': 'تاریخ اور وقت',
'Date and Time of Goods receipt. By default shows the current time but can be modified by editing in the drop down list.': 'Date and Time of Goods receipt. By default shows the current time but can be modified by editing in the drop down list.',
'Date and time this report relates to.': 'Date and time this report relates to.',
'Date of Birth': 'Date of Birth',
'Date of Latest Information on Beneficiaries Reached': 'Date of Latest Information on Beneficiaries Reached',
'Date of Report': 'Date of Report',
'Date/Time': 'Date/Time',
'Date/Time of Find': 'Date/Time of Find',
'Date/Time of disappearance': 'Date/Time of disappearance',
'De-duplicator': 'De-duplicator',
'Dead Body Details': 'Dead Body Details',
'Dead Body Reports': 'Dead Body Reports',
'Deaths/24hrs': 'Deaths/24hrs',
'Debug': 'Debug',
'Decimal Degrees': 'Decimal Degrees',
'Decomposed': 'Decomposed',
'Default Height of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Default Height of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.',
'Default Marker': 'Default Marker',
'Default Width of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Default Width of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.',
'Default synchronization policy': 'Default synchronization policy',
'Defaults': 'Defaults',
'Defaults updated': 'Defaults updated',
'Defecation area for animals': 'Defecation area for animals',
'Defines the icon used for display of features on handheld GPS.': 'Defines the icon used for display of features on handheld GPS.',
'Defines the icon used for display of features on interactive map & KML exports. A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class. If neither are defined, then the Default Marker is used.': 'Defines the icon used for display of features on interactive map & KML exports. A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class. If neither are defined, then the Default Marker is used.',
'Defines the marker used for display & the attributes visible in the popup.': 'Defines the marker used for display & the attributes visible in the popup.',
'Degrees must be a number between -180 and 180': 'Degrees must be a number between -180 and 180',
'Degrees must be between -180 and 180': 'Degrees must be between -180 and 180',
'Dehydration': 'Dehydration',
'Delete': 'خارج کر دیں',
'Delete Aid Request': 'Delete Aid Request',
'Delete Assessment': 'Delete Assessment',
'Delete Budget': 'Delete Budget',
'Delete Bundle': 'Delete Bundle',
'Delete Config': 'Delete Config',
'Delete Distribution': 'Delete Distribution',
'Delete Distribution Item': 'Delete Distribution Item',
'Delete Document': 'Delete Document',
'Delete Donor': 'Delete Donor',
'Delete Feature Class': 'Delete Feature Class',
'Delete Feature Layer': 'Delete Feature Layer',
'Delete Group': 'Delete Group',
'Delete Hospital': 'Delete Hospital',
'Delete Image': 'Delete Image',
'Delete Incident': 'Delete Incident',
'Delete Incident Report': 'Delete Incident Report',
'Delete Inventory Item': 'Delete Inventory Item',
'Delete Inventory Store': 'Delete Inventory Store',
'Delete Item': 'Delete Item',
'Delete Item Category': 'Delete Item Category',
'Delete Key': 'Delete Key',
'Delete Kit': 'Delete Kit',
'Delete Layer': 'Delete Layer',
'Delete Location': 'Delete Location',
'Delete Marker': 'Delete Marker',
'Delete Membership': 'Delete Membership',
'Delete Message': 'Delete Message',
'Delete Metadata': 'Delete Metadata',
'Delete Office': 'Delete Office',
'Delete Old': 'Delete Old',
'Delete Organization': 'Delete Organization',
'Delete Peer': 'Delete Peer',
'Delete Person': 'Delete Person',
'Delete Photo': 'Delete Photo',
'Delete Project': 'Delete Project',
'Delete Projection': 'Delete Projection',
'Delete Record': 'Delete Record',
'Delete Recovery Report': 'Delete Recovery Report',
'Delete Report': 'Delete Report',
'Delete Request': 'Delete Request',
'Delete Request Item': 'Delete Request Item',
'Delete Resource': 'Delete Resource',
'Delete Section': 'Delete Section',
'Delete Service Profile': 'Delete Service Profile',
'Delete Setting': 'Delete Setting',
'Delete Skill': 'Delete Skill',
'Delete Skill Type': 'Delete Skill Type',
'Delete Staff Type': 'Delete Staff Type',
'Delete Subscription': 'Delete Subscription',
'Delete Survey Answer': 'Delete Survey Answer',
'Delete Survey Question': 'Delete Survey Question',
'Delete Survey Section': 'Delete Survey Section',
'Delete Survey Series': 'Delete Survey Series',
'Delete Survey Template': 'Delete Survey Template',
'Delete Unit': 'Delete Unit',
'Delete User': 'Delete User',
'Delete from Server?': 'Delete from Server?',
'Delivered': 'Delivered',
'Delphi Decision Maker': 'Delphi Decision Maker',
'Demographic': 'شماریات آبادی',
'Demonstrations': 'Demonstrations',
'Dental Examination': 'Dental Examination',
'Dental Profile': 'Dental Profile',
'Department/Unit Name': 'Department/Unit Name',
'Deployment': 'Deployment',
'Describe the condition of the roads to your hospital.': 'Describe the condition of the roads to your hospital.',
'Description': 'وضاحت',
'Description of Bin Type': 'Description of Bin Type',
'Description of Contacts': 'Description of Contacts',
'Description of defecation area': 'Description of defecation area',
'Description of drinking water source': 'Description of drinking water source',
'Description of sanitary water source': 'Description of sanitary water source',
'Description of water source before the disaster': 'Description of water source before the disaster',
'Descriptive Text (e.g., Prose, etc)': 'Descriptive Text (e.g., Prose, etc)',
'Designated for': 'Designated for',
'Desire to remain with family': 'Desire to remain with family',
'Destination': 'مطلوبہ',
"Detailed address of the site for informational/logistics purpose. Please note that you can add GIS/Mapping data about this site in the 'Location' field mentioned below.": "Detailed address of the site for informational/logistics purpose. Please note that you can add GIS/Mapping data about this site in the 'Location' field mentioned below.",
'Details': 'تفصیلات',
'Dialysis': 'Dialysis',
'Diarrhea': 'Diarrhea',
'Diarrhea among children under 5': 'Diarrhea among children under 5',
'Dignitary Visit': 'Dignitary Visit',
'Dimensions of the storage bin. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': 'Dimensions of the storage bin. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.',
'Dimensions of the storage location. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': 'Dimensions of the storage location. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.',
'Disabled': 'Disabled',
'Disabled participating in coping activities': 'Disabled participating in coping activities',
'Disabled?': 'Disabled?',
'Disaster Victim Identification': 'Disaster Victim Identification',
'Disaster Victim Registry': 'Disaster Victim Registry',
'Disaster clean-up/repairs': 'Disaster clean-up/repairs',
'Discharge (cusecs)': 'Discharge (cusecs)',
'Discharges/24hrs': 'Discharges/24hrs',
'Discussion Forum': 'Discussion Forum',
'Discussion Forum on item': 'Discussion Forum on item',
'Dispatch': 'Dispatch',
'Dispatch Items': 'Dispatch Items',
'Dispensary': 'Dispensary',
'Displaced': 'Displaced',
'Displaced Populations': 'Displaced Populations',
'Display Polygons?': 'Display Polygons?',
'Dispose': 'Dispose',
'Dispose Expired/Unusable Items': 'Dispose Expired/Unusable Items',
'Distance between defecation area and water source': 'Distance between defecation area and water source',
'Distance between latrines and temporary shelter in meters': 'Distance between latrines and temporary shelter in meters',
'Distance between shelter and latrines': 'Distance between shelter and latrines',
'Distribution': 'Distribution',
'Distribution Details': 'Distribution Details',
'Distribution Item': 'Distribution Item',
'Distribution Item Details': 'Distribution Item Details',
'Distribution Item added': 'Distribution Item added',
'Distribution Item deleted': 'Distribution Item deleted',
'Distribution Item updated': 'Distribution Item updated',
'Distribution Items': 'Distribution Items',
'Distribution added': 'Distribution added',
'Distribution deleted': 'Distribution deleted',
'Distribution groups': 'Distribution groups',
'Distribution updated': 'Distribution updated',
'Distributions': 'ڈسٹری',
'District': 'ضلع',
'Do adolescent and youth in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'Do adolescent and youth in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)',
'Do households each have at least 2 containers (10-20 litres each) to hold water?': 'Do households each have at least 2 containers (10-20 litres each) to hold water?',
'Do households have appropriate equipment and materials to cook their food (stove, pots, dished plates, and a mug/drinking vessel, etc)?': 'Do households have appropriate equipment and materials to cook their food (stove, pots, dished plates, and a mug/drinking vessel, etc)?',
'Do households have bedding materials available (tarps, plastic mats, blankets)?': 'Do households have bedding materials available (tarps, plastic mats, blankets)?',
'Do households have household water storage containers?': 'Do households have household water storage containers?',
'Do minority members in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'Do minority members in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)',
'Do older people in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'Do older people in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)',
'Do people have at least 2 full sets of clothing (shirts, pants/sarong, underwear)?': 'Do people have at least 2 full sets of clothing (shirts, pants/sarong, underwear)?',
'Do people have reliable access to sufficient sanitation/hygiene items (bathing soap, laundry soap, shampoo, toothpaste and toothbrush)?': 'Do people have reliable access to sufficient sanitation/hygiene items (bathing soap, laundry soap, shampoo, toothpaste and toothbrush)?',
'Do people with disabilities in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'Do people with disabilities in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)',
'Do women and girls have easy access to sanitary materials?': 'Do women and girls have easy access to sanitary materials?',
'Do women in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'Do women in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)',
'Do you have access to cash to restart your business?': 'Do you have access to cash to restart your business?',
'Do you know of any incidents of violence?': 'Do you know of any incidents of violence?',
'Do you know of children living on their own (without adults)?': 'Do you know of children living on their own (without adults)?',
'Do you know of children separated from their parents or caregivers?': 'Do you know of children separated from their parents or caregivers?',
'Do you know of children that have been orphaned by the disaster?': 'Do you know of children that have been orphaned by the disaster?',
'Do you know of children that have been sent to safe places?': 'Do you know of children that have been sent to safe places?',
'Do you know of children that have disappeared without explanation in the period since the disaster?': 'Do you know of children that have disappeared without explanation in the period since the disaster?',
'Do you know of older people who are primary caregivers of children?': 'Do you know of older people who are primary caregivers of children?',
'Do you know of parents/caregivers missing children?': 'Do you know of parents/caregivers missing children?',
'Do you want to over-write the file metadata with new default values?': 'Do you want to over-write the file metadata with new default values?',
'Document': 'دستاویز',
'Document Details': 'Document Details',
'Document Scan': 'Document Scan',
'Document added': 'Document added',
'Document deleted': 'Document deleted',
'Document updated': 'Document updated',
'Documents': 'دستاویزات',
'Documents and Photos': 'دستاویزات اور تصاویر',
'Doing nothing (no structured activity)': 'Doing nothing (no structured activity)',
'Dollars': 'Dollars',
'Domestic chores': 'Domestic chores',
'Donation Phone #': 'Donation Phone #',
'Donor': 'Donor',
'Donor Details': 'Donor Details',
'Donor added': 'Donor added',
'Donor deleted': 'Donor deleted',
'Donor updated': 'Donor updated',
'Donors': 'Donors',
'Donors Report': 'Donors Report',
'Door frame': 'Door frame',
'Draft': 'Draft',
'Draft Features': 'Draft Features',
'Drawing up a Budget for Staff & Equipment across various Locations.': 'Drawing up a Budget for Staff & Equipment across various Locations.',
'Drill Down by Group': 'Drill Down by Group',
'Drill Down by Incident': 'Drill Down by Incident',
'Drill Down by Shelter': 'Drill Down by Shelter',
'Driving License': 'Driving License',
'Drought': 'Drought',
'Drugs': 'فارمیسی',
'Dug Well': 'Dug Well',
'Dust Storm': 'Dust Storm',
'Dwellings': 'Dwellings',
'E-mail': 'ای میل',
'EMS Reason': 'EMS Reason',
'EMS Status': 'EMS Status',
'EMS Status Reason': 'EMS Status Reason',
'EMS Traffic Status': 'EMS Traffic Status',
'Earthquake': 'Earthquake',
'Easy access to sanitation items for women/girls': 'Easy access to sanitation items for women/girls',
'Edit': 'Edit',
'Edit ': 'Edit ',
'Edit Activity': 'Edit Activity',
'Edit Address': 'Edit Address',
'Edit Aid Request': 'Edit Aid Request',
'Edit Application': 'Edit Application',
'Edit Assessment': 'Edit Assessment',
'Edit Budget': 'Edit Budget',
'Edit Bundle': 'Edit Bundle',
'Edit Category<>Sub-Category<>Catalog Relation': 'Edit Category<>Sub-Category<>Catalog Relation',
'Edit Cluster': 'Edit Cluster',
'Edit Config': 'Edit Config',
'Edit Contact': 'Edit Contact',
'Edit Contact Information': 'Edit Contact Information',
'Edit Contents': 'Edit Contents',
'Edit Defaults': 'Edit Defaults',
'Edit Description': 'Edit Description',
'Edit Details': 'Edit Details',
'Edit Disaster Victims': 'Edit Disaster Victims',
'Edit Distribution': 'Edit Distribution',
'Edit Distribution Item': 'Edit Distribution Item',
'Edit Document': 'Edit Document',
'Edit Donor': 'Edit Donor',
'Edit Email Settings': 'Edit Email Settings',
'Edit Feature Class': 'Edit Feature Class',
'Edit Feature Layer': 'Edit Feature Layer',
'Edit Flood Report': 'Edit Flood Report',
'Edit Gateway Settings': 'Edit Gateway Settings',
'Edit Group': 'Edit Group',
'Edit Hospital': 'Edit Hospital',
'Edit Identification Report': 'Edit Identification Report',
'Edit Identity': 'Edit Identity',
'Edit Image': 'Edit Image',
'Edit Image Details': 'Edit Image Details',
'Edit Incident': 'Edit Incident',
'Edit Incident Report': 'Edit Incident Report',
'Edit Inventory Item': 'Edit Inventory Item',
'Edit Inventory Store': 'Edit Inventory Store',
'Edit Item': 'Edit Item',
'Edit Item Catalog': 'Edit Item Catalog',
'Edit Item Catalog Categories': 'Edit Item Catalog Categories',
'Edit Item Category': 'Edit Item Category',
'Edit Item Sub-Categories': 'Edit Item Sub-Categories',
'Edit Key': 'Edit Key',
'Edit Kit': 'Edit Kit',
'Edit Layer': 'Edit Layer',
'Edit Location': 'Edit Location',
'Edit Log Entry': 'Edit Log Entry',
'Edit Map Services': 'Edit Map Services',
'Edit Marker': 'Edit Marker',
'Edit Membership': 'Edit Membership',
'Edit Message': 'Edit Message',
'Edit Messaging Settings': 'Edit Messaging Settings',
'Edit Metadata': 'Edit Metadata',
'Edit Modem Settings': 'Edit Modem Settings',
'Edit Office': 'Edit Office',
'Edit Organization': 'Edit Organization',
'Edit Parameters': 'Edit Parameters',
'Edit Peer': 'Edit Peer',
'Edit Peer Details': 'Edit Peer Details',
'Edit Person Details': 'Edit Person Details',
'Edit Personal Effects Details': 'Edit Personal Effects Details',
'Edit Photo': 'Edit Photo',
'Edit Pledge': 'Edit Pledge',
'Edit Position': 'Edit Position',
'Edit Problem': 'Edit Problem',
'Edit Project': 'Edit Project',
'Edit Projection': 'Edit Projection',
'Edit Record': 'Edit Record',
'Edit Recovery Details': 'Edit Recovery Details',
'Edit Registration': 'Edit Registration',
'Edit Registration Details': 'Edit Registration Details',
'Edit Report': 'Edit Report',
'Edit Request Item': 'Edit Request Item',
'Edit Resource': 'Edit Resource',
'Edit Response': 'Edit Response',
'Edit River': 'Edit River',
'Edit Role': 'Edit Role',
'Edit School District': 'Edit School District',
'Edit School Report': 'Edit School Report',
'Edit Setting': 'Edit Setting',
'Edit Settings': 'Edit Settings',
'Edit Shelter': 'Edit Shelter',
'Edit Shelter Service': 'Edit Shelter Service',
'Edit Shelter Type': 'Edit Shelter Type',
'Edit Shipment Transit Log': 'Edit Shipment Transit Log',
'Edit Shipment/Way Bills': 'Edit Shipment/Way Bills',
'Edit Shipment<>Item Relation': 'Edit Shipment<>Item Relation',
'Edit Site': 'Edit Site',
'Edit Skill': 'Edit Skill',
'Edit Skill Type': 'Edit Skill Type',
'Edit Solution': 'Edit Solution',
'Edit Source': 'Edit Source',
'Edit Staff': 'Edit Staff',
'Edit Staff Type': 'Edit Staff Type',
'Edit Storage Bin Type(s)': 'Edit Storage Bin Type(s)',
'Edit Storage Bins': 'Edit Storage Bins',
'Edit Storage Location': 'Edit Storage Location',
'Edit Subscription': 'Edit Subscription',
'Edit Survey Answer': 'Edit Survey Answer',
'Edit Survey Question': 'Edit Survey Question',
'Edit Survey Section': 'Edit Survey Section',
'Edit Survey Series': 'Edit Survey Series',
'Edit Survey Template': 'Edit Survey Template',
'Edit Task': 'Edit Task',
'Edit Team': 'Edit Team',
'Edit Theme': 'Edit Theme',
'Edit Themes': 'Edit Themes',
'Edit Ticket': 'Edit Ticket',
'Edit Track': 'Edit Track',
'Edit Unit': 'Edit Unit',
'Edit User': 'Edit User',
'Edit Volunteer Registration': 'Edit Volunteer Registration',
'Edit current record': 'Edit current record',
'Edit message': 'Edit message',
'Edit the Application': 'Edit the Application',
'Editable?': 'Editable?',
'Education': 'تعلیم',
'Education materials received': 'Education materials received',
'Education materials, source': 'Education materials, source',
'Effects Inventory': 'Effects Inventory',
'Eggs': 'انڈے',
'Either a shelter or a location must be specified': 'Either a shelter or a location must be specified',
'Either file upload or image URL required.': 'Either file upload or image URL required.',
'Elderly person headed households (>60 yrs)': 'Elderly person headed households (>60 yrs)',
'Electrical': 'Electrical',
'Electricity': 'Electricity',
'Elevated': 'Elevated',
'Email': 'ای میل',
'Email Settings': 'Email Settings',
'Email address verified, however registration is still pending approval - please wait until confirmation received.': 'Email address verified, however registration is still pending approval - please wait until confirmation received.',
'Email settings updated': 'Email settings updated',
'Embalming': 'Embalming',
'Embassy': 'Embassy',
'Emergency Capacity Building project': 'Emergency Capacity Building project',
'Emergency Department': 'Emergency Department',
'Emergency Support Facility': 'Emergency Support Facility',
'Emergency Support Service': 'Emergency Support Service',
'Enable/Disable Layers': 'Enable/Disable Layers',
'Enabled': 'چالو',
'End date': 'End date',
'End of Period': 'End of Period',
'English': 'English',
'Enter Coordinates:': 'Enter Coordinates:',
'Enter a GPS Coord': 'Enter a GPS Coord',
'Enter a date before': 'Enter a date before',
'Enter a few characters of the name to select an existing Location or else simply type the name of the new Location.': 'Enter a few characters of the name to select an existing Location or else simply type the name of the new Location.',
'Enter a name for the spreadsheet you are uploading (mandatory).': 'Enter a name for the spreadsheet you are uploading (mandatory).',
'Enter a new support request.': 'Enter a new support request.',
'Enter a summary of the request here.': 'Enter a summary of the request here.',
'Enter a unique label!': 'Enter a unique label!',
'Enter tags separated by commas.': 'Enter tags separated by commas.',
'Enter the same password as above': 'Enter the same password as above',
'Enter your firstname': 'Enter your firstname',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.',
'Equipment': 'آلات',
'Error encountered while applying the theme.': 'Error encountered while applying the theme.',
'Error in message': 'Error in message',
'Error logs for "%(app)s"': 'Error logs for "%(app)s"',
'Errors': 'Errors',
'Estimated # of households who are affected by the emergency': 'Estimated # of households who are affected by the emergency',
'Estimated # of people who are affected by the emergency': 'Estimated # of people who are affected by the emergency',
'Estimated total number of people in institutions': 'Estimated total number of people in institutions',
'Euros': 'Euros',
'Evacuating': 'Evacuating',
'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)',
'Event Time': 'Event Time',
'Event Type': 'Event Type',
'Event type': 'Event type',
'Example': 'Example',
'Exceeded': 'Exceeded',
'Execute a pre-planned activity identified in <instruction>': 'Execute a pre-planned activity identified in <instruction>',
'Existing food stocks, main dishes': 'Existing food stocks, main dishes',
'Existing food stocks, side dishes': 'Existing food stocks, side dishes',
'Expected In': 'Expected In',
'Expected Out': 'Expected Out',
'Expiry Time': 'Expiry Time',
'Explosive Hazard': 'Explosive Hazard',
'Export': 'Export',
'Export Data': 'Export Data',
'Export Database as CSV': 'Export Database as CSV',
'Export in GPX format': 'Export in GPX format',
'Export in KML format': 'Export in KML format',
'Export in OSM format': 'Export in OSM format',
'Export in PDF format': 'Export in PDF format',
'Export in RSS format': 'Export in RSS format',
'Export in XLS format': 'Export in XLS format',
'External Features': 'External Features',
'Eye Color': 'Eye Color',
'Facebook': 'Facebook',
'Facial hair, color': 'Facial hair, color',
'Facial hair, type': 'Facial hair, type',
'Facial hear, length': 'Facial hear, length',
"Facilitate uploading of missing person's photograph": "Facilitate uploading of missing person's photograph",
'Facility Operations': 'Facility Operations',
'Facility Status': 'Facility Status',
'Facility Type': 'Facility Type',
'Factors affecting school attendance': 'Factors affecting school attendance',
'Failed!': 'Failed!',
'Falling Object Hazard': 'Falling Object Hazard',
'Families/HH': 'Families/HH',
'Family': 'خاندانی',
'Family tarpaulins received': 'Family tarpaulins received',
'Family tarpaulins, source': 'Family tarpaulins, source',
'Family/friends': 'Family/friends',
'Farmland/fishing material assistance, Rank': 'Farmland/fishing material assistance, Rank',
'Fax': 'Fax',
'Feature': 'Feature',
'Feature Class': 'Feature Class',
'Feature Class Details': 'Feature Class Details',
'Feature Class added': 'Feature Class added',
'Feature Class deleted': 'Feature Class deleted',
'Feature Class updated': 'Feature Class updated',
'Feature Classes': 'Feature Classes',
'Feature Classes are collections of Locations (Features) of the same type': 'Feature Classes are collections of Locations (Features) of the same type',
'Feature Classes to Feature Groups': 'Feature Classes to Feature Groups',
'Feature Groups': 'Feature Groups',
'Feature Layer Details': 'Feature Layer Details',
'Feature Layer added': 'Feature Layer added',
'Feature Layer deleted': 'Feature Layer deleted',
'Feature Layer updated': 'Feature Layer updated',
'Feature Layers': 'Feature Layers',
'Feature Namespace': 'Feature Namespace',
'Feature Type': 'Feature Type',
'Features Include': 'Features Include',
'Female': 'عورت',
'Female headed households': 'Female headed households',
'Few': 'کچھ',
'Field Hospital': 'Field Hospital',
'File': 'File',
'Fill in Latitude': 'Fill in Latitude',
'Fill in Longitude': 'Fill in Longitude',
'Filter': 'Filter',
'Filter Field': 'Filter Field',
'Filter Value': 'Filter Value',
'Filtered search of aid pledges and requests': 'Filtered search of aid pledges and requests',
'Find': 'تلاش کریں',
'Find Recovery Report': 'Find Recovery Report',
'Find by Name': 'Find by Name',
'Finder': 'Finder',
'Fingerprint': 'Fingerprint',
'Fingerprinting': 'Fingerprinting',
'Fingerprints': 'Fingerprints',
'Finish': 'Finish',
'Fire': 'Fire',
'Fire suppression and rescue': 'Fire suppression and rescue',
'First Name': 'پہلا نام',
'First name': 'پہلا نام',
'Fishing': 'Fishing',
'Flash Flood': 'Flash Flood',
'Flash Freeze': 'Flash Freeze',
'Fleet Management': 'Fleet Management',
'Flood': 'Flood',
'Flood Alerts': 'Flood Alerts',
'Flood Alerts show water levels in various parts of the country': 'Flood Alerts show water levels in various parts of the country',
'Flood Report': 'Flood Report',
'Flood Report Details': 'Flood Report Details',
'Flood Report added': 'Flood Report added',
'Flood Report deleted': 'Flood Report deleted',
'Flood Report updated': 'Flood Report updated',
'Flood Reports': 'Flood Reports',
'Flow Status': 'Flow Status',
'Focal Point': 'Focal Point',
'Fog': 'Fog',
'Food': 'خوراک',
'Food assistance available/expected': 'Food assistance available/expected',
'Footer': 'Footer',
'Footer file %s missing!': 'Footer file %s missing!',
'For Eden instances enter the application base URL, e.g. http://sync.sahanfoundation.org/eden, for other peers the URL of the synchronization interface.': 'For Eden instances enter the application base URL, e.g. http://sync.sahanfoundation.org/eden, for other peers the URL of the synchronization interface.',
'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).': 'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).',
'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': 'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.',
'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.',
'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners',
'For live help from the Sahana community on using this application, go to': 'For live help from the Sahana community on using this application, go to',
'For messages that support alert network internal functions': 'For messages that support alert network internal functions',
'For more details on the Sahana Eden system, see the': 'For more details on the Sahana Eden system, see the',
'For more information, see ': 'For more information, see ',
'Forest Fire': 'Forest Fire',
'Formal camp': 'Formal camp',
'Format': 'Format',
'Forms': 'Forms',
'Found': 'Found',
'Freezing Drizzle': 'Freezing Drizzle',
'Freezing Rain': 'Freezing Rain',
'Freezing Spray': 'Freezing Spray',
'French': 'French',
'Friday': 'Friday',
'Frost': 'Frost',
'Fuel': 'Fuel',
'Full': 'Full',
'Full beard': 'Full beard',
'Fullscreen Map': 'Fullscreen Map',
'Functional Tests': 'Functional Tests',
'Functions available': 'Functions available',
'Funding Organization': 'Funding Organization',
'Funeral': 'Funeral',
'GIS Reports of Shelter': 'GIS Reports of Shelter',
'GIS integration to view location details of the Shelter': 'GIS integration to view location details of the Shelter',
'GPS': 'GPS',
'GPS Marker': 'GPS Marker',
'GPS Track': 'GPS Track',
'GPS Track File': 'GPS Track File',
'GPX Track': 'GPX Track',
'Gale Wind': 'Gale Wind',
'Gateway Settings': 'Gateway Settings',
'Gateway settings updated': 'Gateway settings updated',
'Gender': 'Gender',
'General Medical/Surgical': 'General Medical/Surgical',
'General emergency and public safety': 'General emergency and public safety',
'Generator': 'Generator',
'Geophysical (inc. landslide)': 'Geophysical (inc. landslide)',
'Geraldo module not available within the running Python - this needs installing for PDF output!': 'Geraldo module not available within the running Python - this needs installing for PDF output!',
'Girls 13-18 yrs in affected area': 'Girls 13-18 yrs in affected area',
'Girls 13-18 yrs not attending school': 'Girls 13-18 yrs not attending school',
'Girls 6-12 yrs in affected area': 'Girls 6-12 yrs in affected area',
'Girls 6-12 yrs not attending school': 'Girls 6-12 yrs not attending school',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'Give a brief description of the image, e.g. what can be seen where on the picture (optional).',
'Give information about where and when you have seen the person': 'Give information about where and when you have seen the person',
'Global Messaging Settings': 'Global Messaging Settings',
'Goatee': 'Goatee',
'Google Group': 'Google Group',
'Government': 'Government',
'Government UID': 'Government UID',
'Government building': 'Government building',
'Grade': 'Grade',
'Group': 'Group',
'Group Details': 'Group Details',
'Group ID': 'Group ID',
'Group Member added': 'Group Member added',
'Group Members': 'Group Members',
'Group Memberships': 'Group Memberships',
'Group Title': 'Group Title',
'Group Type': 'Group Type',
'Group added': 'Group added',
'Group deleted': 'Group deleted',
'Group description': 'Group description',
'Group name': 'Group name',
'Group type': 'Group type',
'Group updated': 'Group updated',
"Grouping by 'Family Unit' or other group category": "Grouping by 'Family Unit' or other group category",
'Groups': 'گروپ',
'Groups removed': 'Groups removed',
'Guest': 'Guest',
'Hail': 'Hail',
'Hair Color': 'Hair Color',
'Hair Length': 'Hair Length',
'Hair Style': 'Hair Style',
'Has data from this Reference Document been entered into Sahana?': 'Has data from this Reference Document been entered into Sahana?',
'Has the safety and security of women and children in your community changed since the emergency?': 'Has the safety and security of women and children in your community changed since the emergency?',
'Has your business been damaged in the course of the disaster?': 'Has your business been damaged in the course of the disaster?',
'Have households received any shelter/NFI assistance or is assistance expected in the coming days?': 'Have households received any shelter/NFI assistance or is assistance expected in the coming days?',
'Have normal food sources been disrupted?': 'Have normal food sources been disrupted?',
'Have schools received or are expecting to receive any assistance?': 'Have schools received or are expecting to receive any assistance?',
'Have the people received or are you expecting any medical or food assistance in the coming days?': 'Have the people received or are you expecting any medical or food assistance in the coming days?',
'Hazard Pay': 'Hazard Pay',
'Hazardous Material': 'Hazardous Material',
'Hazardous Road Conditions': 'Hazardous Road Conditions',
'Header Background': 'Header Background',
'Header background file %s missing!': 'Header background file %s missing!',
'Headquarters': 'Headquarters',
'Health': 'صحت',
'Health care assistance, Rank': 'Health care assistance, Rank',
'Health center': 'Health center',
'Health center with beds': 'Health center with beds',
'Health center without beds': 'Health center without beds',
'Health services functioning prior to disaster': 'Health services functioning prior to disaster',
'Health services functioning since disaster': 'Health services functioning since disaster',
'Healthcare Worker': 'Healthcare Worker',
'Heat Wave': 'Heat Wave',
'Heat and Humidity': 'Heat and Humidity',
'Height': 'Height',
'Height (cm)': 'Height (cm)',
'Help': 'مدد',
'Helps to monitor status of hospitals': 'Helps to monitor status of hospitals',
'Helps to report and search for Missing Persons': 'Helps to report and search for Missing Persons',
'Here are the solution items related to the problem.': 'Here are the solution items related to the problem.',
'High': 'High',
'High Water': 'High Water',
'Hindu': 'Hindu',
'History': 'History',
'Hit the back button on your browser to try again.': 'Hit the back button on your browser to try again.',
'Holiday Address': 'Holiday Address',
'Home': 'ہوم',
'Home Address': 'Home Address',
'Home Country': 'Home Country',
'Home Crime': 'Home Crime',
'Hospital': 'ہسپتال',
'Hospital Details': 'Hospital Details',
'Hospital Information': 'Hospital Information',
'Hospital Status Report': 'Hospital Status Report',
'Hospital information added': 'Hospital information added',
'Hospital information deleted': 'Hospital information deleted',
'Hospital information updated': 'Hospital information updated',
'Hospital status assessment.': 'Hospital status assessment.',
'Hospitals': 'ہسپتال',
'Hot Spot': 'Hot Spot',
'Hourly': 'Hourly',
'Household kits received': 'Household kits received',
'Household kits, source': 'Household kits, source',
'How did boys 13-17yrs spend most of their time prior to the disaster?': 'How did boys 13-17yrs spend most of their time prior to the disaster?',
'How did boys <12yrs spend most of their time prior to the disaster?': 'How did boys <12yrs spend most of their time prior to the disaster?',
'How did boys girls 13-17yrs spend most of their time prior to the disaster?': 'How did boys girls 13-17yrs spend most of their time prior to the disaster?',
'How did girls <12yrs spend most of their time prior to the disaster?': 'How did girls <12yrs spend most of their time prior to the disaster?',
'How do boys 13-17yrs spend most of their time now?': 'How do boys 13-17yrs spend most of their time now?',
'How do boys <12yrs spend most of their time now?': 'How do boys <12yrs spend most of their time now?',
'How do girls 13-17yrs spend most of their time now?': 'How do girls 13-17yrs spend most of their time now?',
'How do girls <12yrs spend most of their time now?': 'How do girls <12yrs spend most of their time now?',
'How does it work?': 'How does it work?',
'How is this person affected by the disaster? (Select all that apply)': 'How is this person affected by the disaster? (Select all that apply)',
'How long does it take you to reach the available water resources? Specify the time required to go there and back, including queuing time, by foot.': 'How long does it take you to reach the available water resources? Specify the time required to go there and back, including queuing time, by foot.',
'How long does it take you to walk to the health service?': 'How long does it take you to walk to the health service?',
'How long will the food last?': 'How long will the food last?',
'How long will this water resource last?': 'How long will this water resource last?',
'How many Boys (0-17 yrs) are Dead due to the crisis': 'How many Boys (0-17 yrs) are Dead due to the crisis',
'How many Boys (0-17 yrs) are Injured due to the crisis': 'How many Boys (0-17 yrs) are Injured due to the crisis',
'How many Boys (0-17 yrs) are Missing due to the crisis': 'How many Boys (0-17 yrs) are Missing due to the crisis',
'How many Girls (0-17 yrs) are Dead due to the crisis': 'How many Girls (0-17 yrs) are Dead due to the crisis',
'How many Girls (0-17 yrs) are Injured due to the crisis': 'How many Girls (0-17 yrs) are Injured due to the crisis',
'How many Girls (0-17 yrs) are Missing due to the crisis': 'How many Girls (0-17 yrs) are Missing due to the crisis',
'How many Men (18 yrs+) are Dead due to the crisis': 'How many Men (18 yrs+) are Dead due to the crisis',
'How many Men (18 yrs+) are Injured due to the crisis': 'How many Men (18 yrs+) are Injured due to the crisis',
'How many Men (18 yrs+) are Missing due to the crisis': 'How many Men (18 yrs+) are Missing due to the crisis',
'How many Women (18 yrs+) are Dead due to the crisis': 'How many Women (18 yrs+) are Dead due to the crisis',
'How many Women (18 yrs+) are Injured due to the crisis': 'How many Women (18 yrs+) are Injured due to the crisis',
'How many Women (18 yrs+) are Missing due to the crisis': 'How many Women (18 yrs+) are Missing due to the crisis',
'How many days will the supplies last?': 'How many days will the supplies last?',
'How many doctors in the health centers are still actively working?': 'How many doctors in the health centers are still actively working?',
'How many houses are uninhabitable (uninhabitable = foundation and structure destroyed)?': 'How many houses are uninhabitable (uninhabitable = foundation and structure destroyed)?',
'How many houses suffered damage but remain usable (usable = windows broken, cracks in walls, roof slightly damaged)?': 'How many houses suffered damage but remain usable (usable = windows broken, cracks in walls, roof slightly damaged)?',
'How many latrines are available in the village/IDP centre/Camp?': 'How many latrines are available in the village/IDP centre/Camp?',
'How many midwives in the health centers are still actively working?': 'How many midwives in the health centers are still actively working?',
'How many nurses in the health centers are still actively working?': 'How many nurses in the health centers are still actively working?',
'How many of the primary school age boys (6-12) in the area are not attending school?': 'How many of the primary school age boys (6-12) in the area are not attending school?',
'How many of the primary school age girls (6-12) in the area are not attending school?': 'How many of the primary school age girls (6-12) in the area are not attending school?',
'How many of the primary/secondary schools are now open and running a regular schedule of class?': 'How many of the primary/secondary schools are now open and running a regular schedule of class?',
'How many of the secondary school age boys (13-18) in the area are not attending school?': 'How many of the secondary school age boys (13-18) in the area are not attending school?',
'How many of the secondary school age girls (13-18) in the area are not attending school?': 'How many of the secondary school age girls (13-18) in the area are not attending school?',
'How many primary school age boys (6-12) are in the affected area?': 'How many primary school age boys (6-12) are in the affected area?',
'How many primary school age girls (6-12) are in the affected area?': 'How many primary school age girls (6-12) are in the affected area?',
'How many primary/secondary schools were opening prior to the disaster?': 'How many primary/secondary schools were opening prior to the disaster?',
'How many secondary school age boys (13-18) are in the affected area?': 'How many secondary school age boys (13-18) are in the affected area?',
'How many secondary school age girls (13-18) are in the affected area?': 'How many secondary school age girls (13-18) are in the affected area?',
'How many teachers have been affected by the disaster (affected = unable to work)?': 'How many teachers have been affected by the disaster (affected = unable to work)?',
'How many teachers worked in the schools prior to the disaster?': 'How many teachers worked in the schools prior to the disaster?',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.',
'Humanitarian NGO': 'Humanitarian NGO',
'Hurricane': 'Hurricane',
'Hurricane Force Wind': 'Hurricane Force Wind',
'Hygiene': 'صحت',
'Hygiene kits received': 'Hygiene kits received',
'Hygiene kits, source': 'Hygiene kits, source',
'I am an Organization providing Relief on the Ground': 'I am an Organization providing Relief on the Ground',
'I am part of the Global Community of Volunteers': 'I am part of the Global Community of Volunteers',
'ID Label': 'ID Label',
'ID Label: ': 'ID Label: ',
'ID Tag': 'ID Tag',
'ID type': 'ID type',
'Ice Pressure': 'Ice Pressure',
'Iceberg': 'Iceberg',
'Ideally a full URL to the source file, otherwise just a note on where data came from.': 'Ideally a full URL to the source file, otherwise just a note on where data came from.',
'Identification': 'شناخت',
'Identification Report': 'Identification Report',
'Identification Reports': 'Identification Reports',
'Identification Status': 'Identification Status',
'Identification label of the Storage bin.': 'Identification label of the Storage bin.',
'Identity': 'Identity',
'Identity Details': 'Identity Details',
'Identity added': 'Identity added',
'Identity deleted': 'Identity deleted',
'Identity updated': 'Identity updated',
'If Unit = m, Base Unit = Km, then multiplicator is 0.0001 since 1m = 0.001 km.': 'If Unit = m, Base Unit = Km, then multiplicator is 0.0001 since 1m = 0.001 km.',
'If enabled then a log is maintained of all records a user accesses. If disabled then it can still be enabled on a per-module basis.': 'If enabled then a log is maintained of all records a user accesses. If disabled then it can still be enabled on a per-module basis.',
'If enabled then a log is maintained of all records a user edits. If disabled then it can still be enabled on a per-module basis.': 'If enabled then a log is maintained of all records a user edits. If disabled then it can still be enabled on a per-module basis.',
'If no marker defined then the system default marker is used': 'If no marker defined then the system default marker is used',
'If no, specify why': 'If no, specify why',
'If the location is a geographic area, then state at what level here.': 'If the location is a geographic area, then state at what level here.',
'If this is a hospital, please check the hospital checkbox, then select the hospital. If there is no record for this hospital, you can create one and enter just the name if no other information is available.': 'If this is a hospital, please check the hospital checkbox, then select the hospital. If there is no record for this hospital, you can create one and enter just the name if no other information is available.',
'If this is a school that has a school code, please check the school checkbox, then enter the code. If the school code is not available, do not check the school checkbox. Instead include the school name in the shelter name or in the comments.': 'If this is a school that has a school code, please check the school checkbox, then enter the code. If the school code is not available, do not check the school checkbox. Instead include the school name in the shelter name or in the comments.',
'If this is set to True then mails will be deleted from the server after downloading.': 'If this is set to True then mails will be deleted from the server after downloading.',
"If this setting is enabled then all deleted records are just flagged as deleted instead of being really deleted. They will appear in the raw database access but won't be visible to normal users.": "If this setting is enabled then all deleted records are just flagged as deleted instead of being really deleted. They will appear in the raw database access but won't be visible to normal users.",
'If yes, specify what and by whom': 'If yes, specify what and by whom',
'If yes, which and how': 'If yes, which and how',
"If you cannot find the record of the person you want to report missing, you can add it by clicking 'Add Person' below:": "If you cannot find the record of the person you want to report missing, you can add it by clicking 'Add Person' below:",
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': 'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.',
'If you know what the Geonames ID of this location is then you can enter it here.': 'If you know what the Geonames ID of this location is then you can enter it here.',
'If you know what the OSM ID of this location is then you can enter it here.': 'If you know what the OSM ID of this location is then you can enter it here.',
'If you need to add a new document then you can click here to attach one.': 'If you need to add a new document then you can click here to attach one.',
'If you would like to help, then please': 'اگر آپ کو مدد کی ضرورت ہے ، تو براہ مہربانی گا',
'Illegal Immigrant': 'Illegal Immigrant',
'Image': 'Image',
'Image Details': 'Image Details',
'Image Tags': 'Image Tags',
'Image Type': 'Image Type',
'Image Upload': 'Image Upload',
'Image added': 'Image added',
'Image deleted': 'Image deleted',
'Image updated': 'Image updated',
'Image/Attachment': 'Image/Attachment',
'Image/Other Attachment': 'Image/Other Attachment',
'Imagery': 'Imagery',
'Images': 'تصاویر',
'Immediate reconstruction assistance, Rank': 'Immediate reconstruction assistance, Rank',
'Import': 'Import',
'Import & Export Data': 'Import & Export Data',
'Import Data': 'Import Data',
'Import Job': 'Import Job',
'Import Jobs': 'Import Jobs',
'Import and Export': 'Import and Export',
'Import from Ushahidi Instance': 'Import from Ushahidi Instance',
'Import if Master': 'Import if Master',
'Import job created': 'Import job created',
'Import multiple tables as CSV': 'Import multiple tables as CSV',
'Import/Export': 'Import/Export',
'Import/Master': 'Import/Master',
'Important': 'Important',
'Importantly where there are no aid services being provided': 'Importantly where there are no aid services being provided',
'Imported': 'Imported',
'Importing data from spreadsheets': 'Importing data from spreadsheets',
'In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).',
'In GeoServer, this is the Workspace Name. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'In GeoServer, this is the Workspace Name. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).',
'In Inventories': 'In Inventories',
'In Progress': 'In Progress',
'In Transit': 'In Transit',
'In general, what are the greatest needs of older people, people with disabilities, children, youth and women in your community?': 'In general, what are the greatest needs of older people, people with disabilities, children, youth and women in your community?',
'Inbound Mail Settings': 'Inbound Mail Settings',
'Incident': 'Incident',
'Incident Categories': 'Incident Categories',
'Incident Details': 'Incident Details',
'Incident Report': 'Incident Report',
'Incident Report Details': 'Incident Report Details',
'Incident Report added': 'Incident Report added',
'Incident Report deleted': 'Incident Report deleted',
'Incident Report updated': 'Incident Report updated',
'Incident Reporting': 'حادثہ کی اطلاع',
'Incident Reporting System': 'Incident Reporting System',
'Incident Reports': 'Incident Reports',
'Incident added': 'Incident added',
'Incident deleted': 'Incident deleted',
'Incident updated': 'Incident updated',
'Incidents': 'واقعات',
'Incomplete': 'Incomplete',
'Individuals': 'Individuals',
'Industrial Crime': 'Industrial Crime',
'Industry Fire': 'Industry Fire',
'Industry close to village/camp': 'Industry close to village/camp',
'Infant (0-1)': 'Infant (0-1)',
'Infectious Disease': 'Infectious Disease',
'Infectious Diseases': 'Infectious Diseases',
'Infestation': 'Infestation',
'Informal Leader': 'Informal Leader',
'Informal camp': 'Informal camp',
'Input Job': 'Input Job',
'Instance Type': 'Instance Type',
'Instant Porridge': 'Instant Porridge',
"Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": "Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.",
'Insufficient': 'Insufficient',
'Insufficient vars: Need module, resource, jresource, instance': 'Insufficient vars: Need module, resource, jresource, instance',
'Intake Items': 'Intake Items',
'Internal Features': 'Internal Features',
'Internal State': 'Internal State',
'International NGO': 'International NGO',
'International Organization': 'International Organization',
'International Staff': 'International Staff',
'Intervention': 'Intervention',
'Interview taking place at': 'Interview taking place at',
'Invalid': 'Invalid',
'Invalid Query': 'Invalid Query',
'Invalid email': 'Invalid email',
'Invalid login': 'Invalid login',
'Invalid request!': 'Invalid request!',
'Invalid ticket': 'Invalid ticket',
'Inventories with Item': 'Inventories with Item',
'Inventory': 'Inventory',
'Inventory Item Details': 'Inventory Item Details',
'Inventory Item added': 'Inventory Item added',
'Inventory Item deleted': 'Inventory Item deleted',
'Inventory Item updated': 'Inventory Item updated',
'Inventory Items': 'فہرست اجزاء',
'Inventory Management': 'انوینٹری مینجمنٹ',
'Inventory Store': 'فہرست سٹور',
'Inventory Store Details': 'Inventory Store Details',
'Inventory Store added': 'Inventory Store added',
'Inventory Store deleted': 'Inventory Store deleted',
'Inventory Store updated': 'Inventory Store updated',
'Inventory Stores': 'فہرست سٹور',
'Inventory of Effects': 'Inventory of Effects',
'Inventory/Ledger': 'Inventory/Ledger',
'Is adequate food and water available for these institutions?': 'Is adequate food and water available for these institutions?',
'Is it safe to collect water?': 'Is it safe to collect water?',
'Is there any industrial or agro-chemical production close to the affected area/village?': 'Is there any industrial or agro-chemical production close to the affected area/village?',
'Is this a Hospital?': 'Is this a Hospital?',
'Is this a School?': 'Is this a School?',
'Issuing Authority': 'Issuing Authority',
'It is built using the Template agreed by a group of NGOs working together as the': 'It is built using the Template agreed by a group of NGOs working together as the',
'Item': 'Item',
'Item Catalog Categories': 'Item Catalog Categories',
'Item Catalog Category': 'Item Catalog Category',
'Item Catalog Category Details': 'Item Catalog Category Details',
'Item Catalog Category added': 'Item Catalog Category added',
'Item Catalog Category deleted': 'Item Catalog Category deleted',
'Item Catalog Category updated': 'Item Catalog Category updated',
'Item Catalog Details': 'Item Catalog Details',
'Item Catalog added': 'Item Catalog added',
'Item Catalog deleted': 'Item Catalog deleted',
'Item Catalog updated': 'Item Catalog updated',
'Item Catalogs': 'Item Catalogs',
'Item Categories': 'Item Categories',
'Item Category': 'Item Category',
'Item Category Details': 'Item Category Details',
'Item Category added': 'Item Category added',
'Item Category deleted': 'Item Category deleted',
'Item Category updated': 'Item Category updated',
'Item Details': 'Item Details',
'Item Sub-Categories': 'Item Sub-Categories',
'Item Sub-Category': 'Item Sub-Category',
'Item Sub-Category Details': 'Item Sub-Category Details',
'Item Sub-Category added': 'Item Sub-Category added',
'Item Sub-Category deleted': 'Item Sub-Category deleted',
'Item Sub-Category updated': 'Item Sub-Category updated',
'Item added': 'Item added',
'Item already in Bundle!': 'Item already in Bundle!',
'Item already in Kit!': 'Item already in Kit!',
'Item already in budget!': 'Item already in budget!',
'Item deleted': 'Item deleted',
'Item updated': 'Item updated',
'Items': 'آئٹم',
'Jerry can': 'Jerry can',
'Jew': 'Jew',
'Job Title': 'Job Title',
'Jobs': 'Jobs',
'Just Once': 'Just Once',
'KPIs': 'KPIs',
'Key': 'Key',
'Key Details': 'Key Details',
'Key added': 'Key added',
'Key deleted': 'Key deleted',
'Key updated': 'Key updated',
'Keys': 'Keys',
'Kit': 'Kit',
'Kit Contents': 'Kit Contents',
'Kit Details': 'Kit Details',
'Kit Updated': 'Kit Updated',
'Kit added': 'Kit added',
'Kit deleted': 'Kit deleted',
'Kit updated': 'Kit updated',
'Kits': 'Kits',
'Known Identities': 'Known Identities',
'Known incidents of violence against women/girls': 'Known incidents of violence against women/girls',
'Known incidents of violence since disaster': 'Known incidents of violence since disaster',
'LICENCE': 'LICENCE',
'LICENSE': 'LICENSE',
'LMS Administration': 'LMS Administration',
'Label': 'Label',
'Lack of school uniform': 'Lack of school uniform',
'Lack of supplies at school': 'Lack of supplies at school',
'Lack of transport to school': 'Lack of transport to school',
'Lactating women': 'Lactating women',
'Lahar': 'Lahar',
'Landslide': 'Landslide',
'Language': 'زبان',
'Last Name': 'آخری نام',
'Last known location': 'Last known location',
'Last name': 'آخری نام',
'Last synchronization on': 'Last synchronization on',
'Last synchronization time': 'Last synchronization time',
'Latitude': 'عرض بلد',
'Latitude & Longitude': 'Latitude & Longitude',
'Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.',
'Latitude should be between': 'Latitude should be between',
'Law enforcement, military, homeland and local/private security': 'Law enforcement, military, homeland and local/private security',
'Layer': 'Layer',
'Layer Details': 'Layer Details',
'Layer added': 'Layer added',
'Layer deleted': 'Layer deleted',
'Layer updated': 'Layer updated',
'Layers': 'Layers',
'Layers updated': 'Layers updated',
'Layout': 'Layout',
'Left-to-Right': 'Left-to-Right',
'Legend Format': 'Legend Format',
'Length': 'Length',
'Level': 'Level',
'Library support not available for OpenID': 'Library support not available for OpenID',
'Line': 'Line',
'Link Item & Shipment': 'Link Item & Shipment',
'Link an Item & Shipment': 'Link an Item & Shipment',
'Linked Records': 'Linked Records',
'Linked records': 'Linked records',
'List': 'فہرست',
'List ': 'فہرست ',
'List / Add Services': 'List / Add Services',
'List / Add Types': 'List / Add Types',
'List Activities': 'List Activities',
'List Aid Requests': 'List Aid Requests',
'List All': 'List All',
'List All Memberships': 'List All Memberships',
'List Assessments': 'List Assessments',
'List Budgets': 'List Budgets',
'List Bundles': 'List Bundles',
'List Category<>Sub-Category<>Catalog Relation': 'List Category<>Sub-Category<>Catalog Relation',
'List Checklists': 'List Checklists',
'List Clusters': 'List Clusters',
'List Configs': 'List Configs',
'List Contacts': 'List Contacts',
'List Distribution Items': 'List Distribution Items',
'List Distributions': 'List Distributions',
'List Documents': 'List Documents',
'List Donors': 'List Donors',
'List Feature Classes': 'List Feature Classes',
'List Feature Layers': 'List Feature Layers',
'List Flood Reports': 'List Flood Reports',
'List Groups': 'List Groups',
'List Groups/View Members': 'List Groups/View Members',
'List Hospitals': 'List Hospitals',
'List Identities': 'List Identities',
'List Images': 'List Images',
'List Incident Reports': 'List Incident Reports',
'List Incidents': 'List Incidents',
'List Inventory Items': 'List Inventory Items',
'List Inventory Stores': 'List Inventory Stores',
'List Item Catalog Categories': 'List Item Catalog Categories',
'List Item Catalogs': 'List Item Catalogs',
'List Item Categories': 'List Item Categories',
'List Item Sub-Categories': 'List Item Sub-Categories',
'List Items': 'List Items',
'List Keys': 'List Keys',
'List Kits': 'List Kits',
'List Layers': 'List Layers',
'List Locations': 'List Locations',
'List Log Entries': 'List Log Entries',
'List Markers': 'List Markers',
'List Members': 'List Members',
'List Memberships': 'List Memberships',
'List Messages': 'List Messages',
'List Metadata': 'List Metadata',
'List Missing Persons': 'List Missing Persons',
'List Offices': 'List Offices',
'List Organizations': 'List Organizations',
'List Peers': 'List Peers',
'List Personal Effects': 'List Personal Effects',
'List Persons': 'List Persons',
'List Photos': 'List Photos',
'List Positions': 'List Positions',
'List Problems': 'List Problems',
'List Projections': 'List Projections',
'List Projects': 'List Projects',
'List Records': 'List Records',
'List Registrations': 'List Registrations',
'List Reports': 'List Reports',
'List Request Items': 'List Request Items',
'List Requests': 'List Requests',
'List Resources': 'List Resources',
'List Responses': 'List Responses',
'List Rivers': 'List Rivers',
'List Roles': 'List Roles',
'List School Districts': 'List School Districts',
'List School Reports': 'List School Reports',
'List Sections': 'List Sections',
'List Service Profiles': 'List Service Profiles',
'List Settings': 'List Settings',
'List Shelter Services': 'List Shelter Services',
'List Shelter Types': 'List Shelter Types',
'List Shelters': 'List Shelters',
'List Shipment Transit Logs': 'List Shipment Transit Logs',
'List Shipment/Way Bills': 'List Shipment/Way Bills',
'List Shipment<>Item Relation': 'List Shipment<>Item Relation',
'List Sites': 'List Sites',
'List Skill Types': 'List Skill Types',
'List Skills': 'List Skills',
'List Solutions': 'List Solutions',
'List Sources': 'List Sources',
'List Staff': 'List Staff',
'List Staff Types': 'List Staff Types',
'List Storage Bin Type(s)': 'List Storage Bin Type(s)',
'List Storage Bins': 'List Storage Bins',
'List Storage Location': 'List Storage Location',
'List Subscriptions': 'List Subscriptions',
'List Survey Answers': 'List Survey Answers',
'List Survey Questions': 'List Survey Questions',
'List Survey Sections': 'List Survey Sections',
'List Survey Series': 'List Survey Series',
'List Survey Templates': 'List Survey Templates',
'List Tasks': 'List Tasks',
'List Teams': 'List Teams',
'List Themes': 'List Themes',
'List Tickets': 'List Tickets',
'List Tracks': 'List Tracks',
'List Units': 'List Units',
'List Users': 'List Users',
'List all': 'List all',
'List of Items': 'List of Items',
'List of Missing Persons': 'List of Missing Persons',
'List of Peers': 'List of Peers',
'List of Reports': 'List of Reports',
'List of Requests': 'List of Requests',
'List of Spreadsheets': 'List of Spreadsheets',
'List of Spreadsheets uploaded': 'List of Spreadsheets uploaded',
'List of addresses': 'List of addresses',
'List unidentified': 'List unidentified',
'List/Add': 'List/Add',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
'Live Help': 'Live Help',
'Livelihood': 'گزر اوقات',
'Load Details': 'Load Details',
'Load the details to help decide which is the best one to keep out of the 2.': 'Load the details to help decide which is the best one to keep out of the 2.',
'Loading Locations...': 'لوڈ ہو رہا ہے جگہ...',
'Local Name': 'Local Name',
'Local Names': 'Local Names',
'Location': 'جگہ',
'Location De-duplicated': 'Location De-duplicated',
'Location Details': 'Location Details',
'Location added': 'Location added',
'Location deleted': 'Location deleted',
'Location details': 'Location details',
'Location updated': 'Location updated',
'Location: ': 'Location: ',
'Locations': 'جگہ',
'Locations De-duplicator': 'Locations De-duplicator',
'Locations should be different!': 'Locations should be different!',
'Lockdown': 'Lockdown',
'Log': 'Log',
'Log Entry Details': 'Log Entry Details',
'Log entry added': 'Log entry added',
'Log entry deleted': 'Log entry deleted',
'Log entry updated': 'Log entry updated',
'Logged in': 'لاگ ان میں',
'Logged out': 'لاگ آؤٹ',
'Login': 'لاگ ان',
'Logistics': 'Logistics',
'Logistics Management': 'رسد کا انتظام',
'Logistics Management System': 'Logistics Management System',
'Logo': 'Logo',
'Logo file %s missing!': 'Logo file %s missing!',
'Logout': 'لاگ آؤٹ',
'Long Text': 'Long Text',
'Longitude': 'طول بلد',
'Longitude is West - East (sideways). Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Longitude is West - East (sideways). Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.',
'Longitude should be between': 'Longitude should be between',
'Looking up Parents': 'Looking up Parents',
'Looting': 'Looting',
'Lost Password': 'حصول پاس ورڈ',
'Low': 'Low',
'Magnetic Storm': 'Magnetic Storm',
'Main cash source': 'Main cash source',
'Main income sources before disaster': 'Main income sources before disaster',
'Major outward damage': 'Major outward damage',
'Make Pledge': 'Make Pledge',
'Make a Request': 'Make a Request',
'Make a Request for Aid': 'Make a Request for Aid',
'Make preparations per the <instruction>': 'Make preparations per the <instruction>',
'Male': 'Male',
'Malnutrition present prior to disaster': 'Malnutrition present prior to disaster',
'Manage': 'Manage',
'Manage Category': 'Manage Category',
'Manage Item catalog': 'Manage Item catalog',
'Manage Kits': 'Manage Kits',
'Manage Relief Item Catalogue': 'Manage Relief Item Catalogue',
'Manage Sub-Category': 'Manage Sub-Category',
'Manage Users & Roles': 'Manage Users & Roles',
'Manage Warehouses/Sites': 'Manage Warehouses/Sites',
'Manage requests of hospitals for assistance.': 'Manage requests of hospitals for assistance.',
'Manage volunteers by capturing their skills, availability and allocation': 'Manage volunteers by capturing their skills, availability and allocation',
'Manager': 'Manager',
'Managing, Storing and Distributing Relief Items': 'Managing, Storing and Distributing Relief Items',
'Managing, Storing and Distributing Relief Items.': 'Managing, Storing and Distributing Relief Items.',
'Manual': 'Manual',
'Manual Synchronization': 'Manual Synchronization',
'Many': 'Many',
'Map': 'نقشہ',
'Map Height': 'Map Height',
'Map Service Catalogue': 'Map Service Catalogue',
'Map Settings': 'Map Settings',
'Map Width': 'Map Width',
'Map of Hospitals': 'Map of Hospitals',
'Mapping': 'Mapping',
'Marine Security': 'Marine Security',
'Marital Status': 'Marital Status',
'Marker': 'Marker',
'Marker Details': 'Marker Details',
'Marker added': 'Marker added',
'Marker deleted': 'Marker deleted',
'Marker updated': 'Marker updated',
'Markers': 'Markers',
'Master Message Log': 'Master Message Log',
'Master Message Log to process incoming reports & requests': 'Master Message Log to process incoming reports & requests',
'Matching Records': 'Matching Records',
'Matrix of Choices (Multiple Answers)': 'Matrix of Choices (Multiple Answers)',
'Matrix of Choices (Only one answer)': 'Matrix of Choices (Only one answer)',
'Matrix of Text Fields': 'Matrix of Text Fields',
'Max Persons per Dwelling': 'Max Persons per Dwelling',
'Maximum Weight': 'Maximum Weight',
'Maximum weight capacity of the Storage Location followed by choosing the unit from the drop down list.': 'Maximum weight capacity of the Storage Location followed by choosing the unit from the drop down list.',
'Maximum weight capacity of the items the storage bin can contain. followed by choosing the unit from the drop down list.': 'Maximum weight capacity of the items the storage bin can contain. followed by choosing the unit from the drop down list.',
'Measure Area: Click the points around the polygon & end with a double-click': 'Measure Area: Click the points around the polygon & end with a double-click',
'Measure Length: Click the points along the path & end with a double-click': 'Measure Length: Click the points along the path & end with a double-click',
'Medical Staff': 'Medical Staff',
'Medical Supplies': 'Medical Supplies',
'Medical and public health': 'Medical and public health',
'Medicine': 'طب',
'Medium': 'Medium',
'Megabytes per Month': 'Megabytes per Month',
'Members': 'نمائندگان',
'Membership': 'Membership',
'Membership Details': 'Membership Details',
'Membership added': 'Membership added',
'Membership deleted': 'Membership deleted',
'Membership updated': 'Membership updated',
'Memberships': 'رکنیت',
'Message': 'پیغام',
'Message Details': 'Message Details',
'Message Variable': 'Message Variable',
'Message added': 'Message added',
'Message deleted': 'Message deleted',
'Message sent to outbox': 'Message sent to outbox',
'Message updated': 'Message updated',
'Message variable': 'Message variable',
'Messages': 'پیغامات',
'Messaging': 'پیغام',
'Messaging settings updated': 'Messaging settings updated',
'Metadata': 'Metadata',
'Metadata Details': 'Metadata Details',
'Metadata added': 'Metadata added',
'Metadata can be supplied here to be applied to all uploaded photos, if desired.': 'Metadata can be supplied here to be applied to all uploaded photos, if desired.',
'Metadata deleted': 'Metadata deleted',
'Metadata updated': 'Metadata updated',
'Meteorite': 'Meteorite',
'Meteorological (inc. flood)': 'Meteorological (inc. flood)',
'Method used': 'Method used',
'Micronutrient malnutrition prior to disaster': 'Micronutrient malnutrition prior to disaster',
'Middle Name': 'Middle Name',
'Migrants or ethnic minorities': 'Migrants or ethnic minorities',
'Military': 'Military',
'Minorities participating in coping activities': 'Minorities participating in coping activities',
'Minutes must be a number between 0 and 60': 'Minutes must be a number between 0 and 60',
'Minutes must be between 0 and 60': 'Minutes must be between 0 and 60',
'Minutes per Month': 'Minutes per Month',
'Minutes should be a number greater than 0 and less than 60': 'Minutes should be a number greater than 0 and less than 60',
'Minutes should be greater than 0 and less than 60': 'Minutes should be greater than 0 and less than 60',
'Miscellaneous': 'Miscellaneous',
'Missing': 'Missing',
'Missing Person': 'Missing Person',
'Missing Person Details': 'Missing Person Details',
'Missing Person Reports': 'Missing Person Reports',
'Missing Persons': 'لاپتہ افراد',
'Missing Persons Registry': 'Missing Persons Registry',
'Missing Persons Report': 'Missing Persons Report',
'Missing Report': 'Missing Report',
'Missing Senior Citizen': 'Missing Senior Citizen',
'Missing Vulnerable Person': 'Missing Vulnerable Person',
'Mobile': 'Mobile',
'Mobile Phone': 'موبائل فون',
'Mode': 'Mode',
'Modem Settings': 'Modem Settings',
'Modem settings updated': 'Modem settings updated',
'Moderator': 'Moderator',
'Modify Feature: Select the feature you wish to deform & then Drag one of the dots to deform the feature in your chosen manner': 'Modify Feature: Select the feature you wish to deform & then Drag one of the dots to deform the feature in your chosen manner',
'Modify Information on groups and individuals': 'Modify Information on groups and individuals',
'Modifying data in spreadsheet before importing it to the database': 'Modifying data in spreadsheet before importing it to the database',
'Module Administration': 'Module Administration',
'Module disabled!': 'Module disabled!',
'Module provides access to information on current Flood Levels.': 'Module provides access to information on current Flood Levels.',
'Module stores structured reports done by Professional Organizations - currently data includes WFP Assessments.': 'Module stores structured reports done by Professional Organizations - currently data includes WFP Assessments.',
'Monday': 'Monday',
'Monthly Cost': 'Monthly Cost',
'Monthly Salary': 'Monthly Salary',
'Months': 'Months',
'Morgue Status': 'Morgue Status',
'Morgue Units Available': 'Morgue Units Available',
'Mosque': 'Mosque',
'Motorcycle': 'Motorcycle',
'Moustache': 'Moustache',
'Move Feature: Drag feature to desired location': 'Move Feature: Drag feature to desired location',
'Movements (Filter In/Out/Lost)': 'Movements (Filter In/Out/Lost)',
'MultiPolygon': 'MultiPolygon',
'Multiple': 'Multiple',
'Multiple Choice (Multiple Answers)': 'Multiple Choice (Multiple Answers)',
'Multiple Choice (Only One Answer)': 'Multiple Choice (Only One Answer)',
'Multiple Text Fields': 'Multiple Text Fields',
'Multiplicator': 'Multiplicator',
'Muslim': 'Muslim',
'My Tasks': 'My Tasks',
'N/A': 'N/A',
"NB SMS requests are filtered to just those which are 'actionable', whilst the Tweet requests are unfiltered, so that is likely to be a good place to start Searching.": "NB SMS requests are filtered to just those which are 'actionable', whilst the Tweet requests are unfiltered, so that is likely to be a good place to start Searching.",
'Name': 'Name',
'Name and/or ID': 'Name and/or ID',
'Name and/or ID Label': 'Name and/or ID Label',
'Name of School': 'Name of School',
'Name of Storage Bin Type.': 'Name of Storage Bin Type.',
'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': 'Name of the file (& optional sub-path) located in static which should be used for the background of the header.',
'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': 'Name of the file (& optional sub-path) located in static which should be used for the top-left image.',
'Name of the file (& optional sub-path) located in views which should be used for footer.': 'Name of the file (& optional sub-path) located in views which should be used for footer.',
'Name of the person in local language and script (optional).': 'Name of the person in local language and script (optional).',
'Name of the unit or department this report refers to. Leave empty if your hospital has no subdivisions.': 'Name of the unit or department this report refers to. Leave empty if your hospital has no subdivisions.',
'Names can be added in multiple languages': 'Names can be added in multiple languages',
'National ID Card': 'National ID Card',
'National NGO': 'National NGO',
'National Staff': 'National Staff',
'Nationality': 'Nationality',
'Nationality of the person.': 'Nationality of the person.',
'Nautical Accident': 'Nautical Accident',
'Nautical Hijacking': 'Nautical Hijacking',
"Need a 'url' argument!": "Need a 'url' argument!",
'Need to select 2 Locations': 'Need to select 2 Locations',
'Need to specify a Budget!': 'Need to specify a Budget!',
'Need to specify a Kit!': 'Need to specify a Kit!',
'Need to specify a Resource!': 'Need to specify a Resource!',
'Need to specify a bundle!': 'Need to specify a bundle!',
'Need to specify a group!': 'Need to specify a group!',
'Need to specify a location to search for.': 'Need to specify a location to search for.',
'Need to specify a role!': 'Need to specify a role!',
'Need to specify a table!': 'Need to specify a table!',
'Need to specify a user!': 'Need to specify a user!',
'Needs to reduce vulnerability to violence': 'Needs to reduce vulnerability to violence',
'Negative Flow Isolation': 'Negative Flow Isolation',
'Neonatal ICU': 'Neonatal ICU',
'Neonatology': 'Neonatology',
'Network': 'Network',
'Neurology': 'Neurology',
'New': 'New',
'New Checklist': 'New Checklist',
'New Peer': 'New Peer',
'New Record': 'New Record',
'New Report': 'New Report',
'New Request': 'New Request',
'New Solution Choice': 'New Solution Choice',
'New Synchronization Peer': 'New Synchronization Peer',
'Next': 'Next',
'Next View': 'Next View',
'No': 'No',
'No Activities Found': 'No Activities Found',
'No Addresses currently registered': 'No Addresses currently registered',
'No Aid Requests have been made yet': 'No Aid Requests have been made yet',
'No Assessments currently registered': 'No Assessments currently registered',
'No Budgets currently registered': 'No Budgets currently registered',
'No Bundles currently registered': 'No Bundles currently registered',
'No Category<>Sub-Category<>Catalog Relation currently registered': 'No Category<>Sub-Category<>Catalog Relation currently registered',
'No Checklist available': 'No Checklist available',
'No Clusters currently registered': 'No Clusters currently registered',
'No Configs currently defined': 'No Configs currently defined',
'No Details currently registered': 'No Details currently registered',
'No Distribution Items currently registered': 'No Distribution Items currently registered',
'No Distributions currently registered': 'No Distributions currently registered',
'No Documents found': 'No Documents found',
'No Donors currently registered': 'No Donors currently registered',
'No Feature Classes currently defined': 'No Feature Classes currently defined',
'No Feature Layers currently defined': 'No Feature Layers currently defined',
'No Flood Reports currently registered': 'No Flood Reports currently registered',
'No Groups currently defined': 'No Groups currently defined',
'No Groups currently registered': 'No Groups currently registered',
'No Hospitals currently registered': 'No Hospitals currently registered',
'No Identification Report Available': 'No Identification Report Available',
'No Identities currently registered': 'No Identities currently registered',
'No Image': 'No Image',
'No Images currently registered': 'No Images currently registered',
'No Incident Reports currently registered': 'No Incident Reports currently registered',
'No Incidents currently registered': 'No Incidents currently registered',
'No Inventory Items currently registered': 'No Inventory Items currently registered',
'No Inventory Stores currently registered': 'No Inventory Stores currently registered',
'No Item Catalog Category currently registered': 'No Item Catalog Category currently registered',
'No Item Catalog currently registered': 'No Item Catalog currently registered',
'No Item Categories currently registered': 'No Item Categories currently registered',
'No Item Sub-Category currently registered': 'No Item Sub-Category currently registered',
'No Item currently registered': 'No Item currently registered',
'No Items currently registered': 'No Items currently registered',
'No Items currently requested': 'No Items currently requested',
'No Keys currently defined': 'No Keys currently defined',
'No Kits currently registered': 'No Kits currently registered',
'No Locations currently available': 'No Locations currently available',
'No Locations currently registered': 'No Locations currently registered',
'No Markers currently available': 'No Markers currently available',
'No Members currently registered': 'No Members currently registered',
'No Memberships currently defined': 'No Memberships currently defined',
'No Memberships currently registered': 'No Memberships currently registered',
'No Messages currently in Outbox': 'No Messages currently in Outbox',
'No Metadata currently defined': 'No Metadata currently defined',
'No Offices currently registered': 'No Offices currently registered',
'No Organizations currently registered': 'No Organizations currently registered',
'No Peers currently registered': 'No Peers currently registered',
'No People currently registered in this shelter': 'No People currently registered in this shelter',
'No Persons currently registered': 'No Persons currently registered',
'No Persons currently reported missing': 'No Persons currently reported missing',
'No Persons found': 'No Persons found',
'No Photos found': 'No Photos found',
'No Presence Log Entries currently registered': 'No Presence Log Entries currently registered',
'No Problems currently defined': 'No Problems currently defined',
'No Projections currently defined': 'No Projections currently defined',
'No Projects currently registered': 'No Projects currently registered',
'No Records currently available': 'No Records currently available',
'No Records matching the query': 'No Records matching the query',
'No Reports currently registered': 'No Reports currently registered',
'No Responses currently registered': 'No Responses currently registered',
'No Rivers currently registered': 'No Rivers currently registered',
'No Roles currently defined': 'No Roles currently defined',
'No School Districts currently registered': 'No School Districts currently registered',
'No School Reports currently registered': 'No School Reports currently registered',
'No Sections currently registered': 'No Sections currently registered',
'No Settings currently defined': 'No Settings currently defined',
'No Shelter Services currently registered': 'No Shelter Services currently registered',
'No Shelter Types currently registered': 'No Shelter Types currently registered',
'No Shelters currently registered': 'No Shelters currently registered',
'No Shipment Transit Logs currently registered': 'No Shipment Transit Logs currently registered',
'No Shipment/Way Bills currently registered': 'No Shipment/Way Bills currently registered',
'No Shipment<>Item Relation currently registered': 'No Shipment<>Item Relation currently registered',
'No Sites currently registered': 'No Sites currently registered',
'No Skill Types currently set': 'No Skill Types currently set',
'No Solutions currently defined': 'No Solutions currently defined',
'No Sources currently registered': 'No Sources currently registered',
'No Staff Types currently registered': 'No Staff Types currently registered',
'No Staff currently registered': 'No Staff currently registered',
'No Storage Bin Type currently registered': 'No Storage Bin Type currently registered',
'No Storage Bins currently registered': 'No Storage Bins currently registered',
'No Storage Locations currently registered': 'No Storage Locations currently registered',
'No Subscription available': 'No Subscription available',
'No Survey Answers currently registered': 'No Survey Answers currently registered',
'No Survey Questions currently registered': 'No Survey Questions currently registered',
'No Survey Sections currently registered': 'No Survey Sections currently registered',
'No Survey Series currently registered': 'No Survey Series currently registered',
'No Survey Template currently registered': 'No Survey Template currently registered',
'No Themes currently defined': 'No Themes currently defined',
'No Tickets currently registered': 'No Tickets currently registered',
'No Tracks currently available': 'No Tracks currently available',
'No Units currently registered': 'No Units currently registered',
'No Users currently registered': 'No Users currently registered',
'No access at all': 'No access at all',
'No access to this record!': 'No access to this record!',
'No action recommended': 'No action recommended',
'No contact information available': 'No contact information available',
'No contacts currently registered': 'No contacts currently registered',
'No data in this table - cannot create PDF!': 'No data in this table - cannot create PDF!',
'No databases in this application': 'No databases in this application',
'No import jobs': 'No import jobs',
'No linked records': 'No linked records',
'No locations registered at this level': 'No locations registered at this level',
'No matching records found.': 'No matching records found.',
'No messages in the system': 'No messages in the system',
'No of Families Settled in the Schools': 'No of Families Settled in the Schools',
'No of Families to whom Food Items are Available': 'No of Families to whom Food Items are Available',
'No of Families to whom Hygiene is Available': 'No of Families to whom Hygiene is Available',
'No of Families to whom Non-Food Items are Available': 'No of Families to whom Non-Food Items are Available',
'No of Female Students (Primary To Higher Secondary) in the Total Affectees': 'No of Female Students (Primary To Higher Secondary) in the Total Affectees',
'No of Female Teachers & Other Govt Servants in the Total Affectees': 'No of Female Teachers & Other Govt Servants in the Total Affectees',
'No of Male Students (Primary To Higher Secondary) in the Total Affectees': 'No of Male Students (Primary To Higher Secondary) in the Total Affectees',
'No of Male Teachers & Other Govt Servants in the Total Affectees': 'No of Male Teachers & Other Govt Servants in the Total Affectees',
'No of Rooms Occupied By Flood Affectees': 'No of Rooms Occupied By Flood Affectees',
'No peers currently registered': 'No peers currently registered',
'No pending registrations found': 'No pending registrations found',
'No pending registrations matching the query': 'No pending registrations matching the query',
'No person record found for current user.': 'No person record found for current user.',
'No positions currently registered': 'No positions currently registered',
'No problem group defined yet': 'No problem group defined yet',
'No records matching the query': 'No records matching the query',
'No records to delete': 'No records to delete',
'No recovery reports available': 'No recovery reports available',
'No report available.': 'No report available.',
'No reports available.': 'No reports available.',
'No reports currently available': 'No reports currently available',
'No requests currently registered': 'No requests currently registered',
'No requests found': 'No requests found',
'No resources currently registered': 'No resources currently registered',
'No resources currently reported': 'No resources currently reported',
'No service profile available': 'No service profile available',
'No skills currently set': 'No skills currently set',
'No synchronization': 'No synchronization',
'No tasks currently registered': 'No tasks currently registered',
'No template found!': 'No template found!',
'No units currently registered': 'No units currently registered',
'No volunteer information registered': 'No volunteer information registered',
'Non-medical Staff': 'Non-medical Staff',
'None': 'کوئی بھی نہیں',
'None (no such record)': 'None (no such record)',
'Noodles': 'Noodles',
'Normal': 'Normal',
'Normal food sources disrupted': 'Normal food sources disrupted',
'Not Applicable': 'Not Applicable',
'Not Authorised!': 'Not Authorised!',
'Not Possible': 'Not Possible',
'Not Set': 'Not Set',
'Not authorised!': 'Not authorised!',
'Not installed or incorrectly configured.': 'Not installed or incorrectly configured.',
'Notice to Airmen': 'Notice to Airmen',
'Number': 'Number',
'Number of Columns': 'Number of Columns',
'Number of Patients': 'Number of Patients',
'Number of Rows': 'Number of Rows',
'Number of Vehicles': 'Number of Vehicles',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'Number of additional beds of that type expected to become available in this unit within the next 24 hours.',
'Number of alternative places for studying': 'Number of alternative places for studying',
'Number of available/vacant beds of that type in this unit at the time of reporting.': 'Number of available/vacant beds of that type in this unit at the time of reporting.',
'Number of deaths during the past 24 hours.': 'Number of deaths during the past 24 hours.',
'Number of discharged patients during the past 24 hours.': 'Number of discharged patients during the past 24 hours.',
'Number of doctors': 'Number of doctors',
'Number of doctors actively working': 'Number of doctors actively working',
'Number of houses damaged, but usable': 'Number of houses damaged, but usable',
'Number of houses destroyed/uninhabitable': 'Number of houses destroyed/uninhabitable',
'Number of in-patients at the time of reporting.': 'Number of in-patients at the time of reporting.',
'Number of latrines': 'Number of latrines',
'Number of midwives actively working': 'Number of midwives actively working',
'Number of newly admitted patients during the past 24 hours.': 'Number of newly admitted patients during the past 24 hours.',
'Number of non-medical staff': 'Number of non-medical staff',
'Number of nurses': 'Number of nurses',
'Number of nurses actively working': 'Number of nurses actively working',
'Number of private schools': 'Number of private schools',
'Number of public schools': 'Number of public schools',
'Number of religious schools': 'Number of religious schools',
'Number of schools damaged but usable': 'Number of schools damaged but usable',
'Number of schools destroyed/uninhabitable': 'Number of schools destroyed/uninhabitable',
'Number of schools open before disaster': 'Number of schools open before disaster',
'Number of schools open now': 'Number of schools open now',
'Number of teachers affected by disaster': 'Number of teachers affected by disaster',
'Number of teachers before disaster': 'Number of teachers before disaster',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': 'Number of vacant/available beds in this hospital. Automatically updated from daily reports.',
'Number of vacant/available units to which victims can be transported immediately.': 'Number of vacant/available units to which victims can be transported immediately.',
'Number or Label on the identification tag this person is wearing (if any).': 'Number or Label on the identification tag this person is wearing (if any).',
'Number/Percentage of affected population that is Female & Aged 0-5': 'Number/Percentage of affected population that is Female & Aged 0-5',
'Number/Percentage of affected population that is Female & Aged 13-17': 'Number/Percentage of affected population that is Female & Aged 13-17',
'Number/Percentage of affected population that is Female & Aged 18-25': 'Number/Percentage of affected population that is Female & Aged 18-25',
'Number/Percentage of affected population that is Female & Aged 26-60': 'Number/Percentage of affected population that is Female & Aged 26-60',
'Number/Percentage of affected population that is Female & Aged 6-12': 'Number/Percentage of affected population that is Female & Aged 6-12',
'Number/Percentage of affected population that is Female & Aged 61+': 'Number/Percentage of affected population that is Female & Aged 61+',
'Number/Percentage of affected population that is Male & Aged 0-5': 'Number/Percentage of affected population that is Male & Aged 0-5',
'Number/Percentage of affected population that is Male & Aged 13-17': 'Number/Percentage of affected population that is Male & Aged 13-17',
'Number/Percentage of affected population that is Male & Aged 18-25': 'Number/Percentage of affected population that is Male & Aged 18-25',
'Number/Percentage of affected population that is Male & Aged 26-60': 'Number/Percentage of affected population that is Male & Aged 26-60',
'Number/Percentage of affected population that is Male & Aged 6-12': 'Number/Percentage of affected population that is Male & Aged 6-12',
'Number/Percentage of affected population that is Male & Aged 61+': 'Number/Percentage of affected population that is Male & Aged 61+',
'Numbers Only': 'Numbers Only',
'Nursery Beds': 'Nursery Beds',
'Nutrition': 'غذائیت',
'OR Reason': 'OR Reason',
'OR Status': 'OR Status',
'OR Status Reason': 'OR Status Reason',
'Observer': 'Observer',
'Obstetrics/Gynecology': 'Obstetrics/Gynecology',
'Office': 'Office',
'Office Address': 'Office Address',
'Office Details': 'Office Details',
'Office added': 'Office added',
'Office deleted': 'Office deleted',
'Office updated': 'Office updated',
'Offices': 'دفتر',
'Offline Sync': 'Offline Sync',
'Offline Sync (from USB/File Backup)': 'Offline Sync (from USB/File Backup)',
'Old': 'پرانا',
'Older people as primary caregivers of children': 'Older people as primary caregivers of children',
'Older people in care homes': 'Older people in care homes',
'Older people participating in coping activities': 'Older people participating in coping activities',
'Older people with chronical illnesses': 'Older people with chronical illnesses',
'Older person (>60 yrs)': 'Older person (>60 yrs)',
'On by default?': 'On by default?',
'One Time Cost': 'One Time Cost',
'One time cost': 'One time cost',
'One-time': 'One-time',
'One-time costs': 'One-time costs',
'Oops! Something went wrong...': 'Oops! Something went wrong...',
'Oops! something went wrong on our side.': 'Oops! something went wrong on our side.',
'Open': 'اوپن',
'Open Map': 'اوپن کا نقشہ',
'Open area': 'Open area',
'Open recent': 'Open recent',
'Operating Rooms': 'Operating Rooms',
'Options': 'اختیارات',
'Organization': 'Organization',
'Organization Details': 'Organization Details',
'Organization Registry': 'Organization Registry',
'Organization added': 'Organization added',
'Organization deleted': 'Organization deleted',
'Organization updated': 'Organization updated',
'Organizations': 'تنظیم',
'Origin': 'Origin',
'Origin of the separated children': 'Origin of the separated children',
'Other': 'Other',
'Other (describe)': 'Other (describe)',
'Other (specify)': 'Other (specify)',
'Other Evidence': 'Other Evidence',
'Other Faucet/Piped Water': 'Other Faucet/Piped Water',
'Other Isolation': 'Other Isolation',
'Other Name': 'Other Name',
'Other activities of boys 13-17yrs': 'Other activities of boys 13-17yrs',
'Other activities of boys 13-17yrs before disaster': 'Other activities of boys 13-17yrs before disaster',
'Other activities of boys <12yrs': 'Other activities of boys <12yrs',
'Other activities of boys <12yrs before disaster': 'Other activities of boys <12yrs before disaster',
'Other activities of girls 13-17yrs': 'Other activities of girls 13-17yrs',
'Other activities of girls 13-17yrs before disaster': 'Other activities of girls 13-17yrs before disaster',
'Other activities of girls<12yrs': 'Other activities of girls<12yrs',
'Other activities of girls<12yrs before disaster': 'Other activities of girls<12yrs before disaster',
'Other alternative infant nutrition in use': 'Other alternative infant nutrition in use',
'Other alternative places for study': 'Other alternative places for study',
'Other assistance needed': 'Other assistance needed',
'Other assistance, Rank': 'Other assistance, Rank',
'Other current health problems, adults': 'Other current health problems, adults',
'Other current health problems, children': 'Other current health problems, children',
'Other events': 'Other events',
'Other factors affecting school attendance': 'Other factors affecting school attendance',
'Other major expenses': 'Other major expenses',
'Other school assistance received': 'Other school assistance received',
'Other school assistance, details': 'Other school assistance, details',
'Other school assistance, source': 'Other school assistance, source',
'Other side dishes in stock': 'Other side dishes in stock',
'Other types of water storage containers': 'Other types of water storage containers',
'Other ways to obtain food': 'Other ways to obtain food',
'Outbound Mail settings are configured in models/000_config.py.': 'Outbound Mail settings are configured in models/000_config.py.',
'Outbox': 'Outbox',
'Outgoing SMS handler': 'Outgoing SMS handler',
'Overland Flow Flood': 'Overland Flow Flood',
'Overlays': 'Overlays',
'Overview Map': 'Overview Map',
'PDAM': 'PDAM',
'PF Number': 'PF Number',
'PL Women': 'PL Women',
'Pan Map: keep the left mouse button pressed and drag the map': 'Pan Map: keep the left mouse button pressed and drag the map',
'Parameters': 'Parameters',
'Parent': 'Parent',
'Parents/Caregivers missing children': 'Parents/Caregivers missing children',
'Participant': 'Participant',
'Pashto': 'Pashto',
'Passport': 'Passport',
'Password': 'پاس ورڈ',
"Password fields don't match": "Password fields don't match",
'Password for authentication at the peer. Note that only HTTP Basic authentication is supported.': 'Password for authentication at the peer. Note that only HTTP Basic authentication is supported.',
'Pathology': 'Pathology',
'Patients': 'Patients',
'Pediatric ICU': 'Pediatric ICU',
'Pediatric Psychiatric': 'Pediatric Psychiatric',
'Pediatrics': 'Pediatrics',
'Peer': 'Peer',
'Peer Details': 'Peer Details',
'Peer Registration': 'Peer Registration',
'Peer Registration Details': 'Peer Registration Details',
'Peer Registration Request': 'Peer Registration Request',
'Peer Type': 'Peer Type',
'Peer UID': 'Peer UID',
'Peer added': 'Peer added',
'Peer deleted': 'Peer deleted',
'Peer not allowed to push': 'Peer not allowed to push',
'Peer registration request added': 'Peer registration request added',
'Peer registration request deleted': 'Peer registration request deleted',
'Peer registration request updated': 'Peer registration request updated',
'Peer updated': 'Peer updated',
'Peers': 'Peers',
'Pending Requests': 'Pending Requests',
'People': 'People',
'People Trapped': 'People Trapped',
'People with chronical illnesses': 'People with chronical illnesses',
'Person': 'Person',
'Person Data': 'Person Data',
'Person Details': 'Person Details',
'Person Registry': 'Person Registry',
'Person added': 'Person added',
'Person deleted': 'Person deleted',
'Person details updated': 'Person details updated',
'Person found': 'Person found',
'Person interviewed': 'Person interviewed',
'Person missing': 'Person missing',
'Person reporting': 'Person reporting',
'Person who is reporting about the presence.': 'Person who is reporting about the presence.',
'Person who observed the presence (if different from reporter).': 'Person who observed the presence (if different from reporter).',
'Person/Group': 'Person/Group',
'Personal Data': 'Personal Data',
'Personal Effects': 'Personal Effects',
'Personal Effects Details': 'Personal Effects Details',
'Personal impact of disaster': 'Personal impact of disaster',
'Persons': 'افراد',
'Persons with disability (mental)': 'Persons with disability (mental)',
'Persons with disability (physical)': 'Persons with disability (physical)',
'Phone': 'فون',
'Phone 1': 'فون 1',
'Phone 2': 'فون 2',
"Phone number to donate to this organization's relief efforts.": "Phone number to donate to this organization's relief efforts.",
'Phone/Business': 'Phone/Business',
'Phone/Emergency': 'Phone/Emergency',
'Phone/Exchange': 'Phone/Exchange',
'Photo': 'تصویر',
'Photo Details': 'Photo Details',
'Photo added': 'Photo added',
'Photo deleted': 'Photo deleted',
'Photo updated': 'Photo updated',
'Photograph': 'Photograph',
'Photos': 'تصاویر',
'Physical Description': 'Physical Description',
'Picture upload and finger print upload facility': 'Picture upload and finger print upload facility',
'Place for solid waste disposal': 'Place for solid waste disposal',
'Place of Recovery': 'Place of Recovery',
'Places the children have been sent to': 'Places the children have been sent to',
'Playing': 'Playing',
"Please come back after sometime if that doesn't help.": "Please come back after sometime if that doesn't help.",
'Please correct all errors.': 'Please correct all errors.',
'Please enter a First Name': 'Please enter a First Name',
'Please enter a valid email address': 'Please enter a valid email address',
'Please enter the recipient': 'Please enter the recipient',
'Please fill this!': 'Please fill this!',
'Please report here where you are:': 'Please report here where you are:',
'Please select another level': 'Please select another level',
'Please use this field to record any additional information, including a history of the record if it is updated.': 'Please use this field to record any additional information, including a history of the record if it is updated.',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.',
'Pledge': 'Pledge',
'Pledge Aid': 'Pledge Aid',
'Pledge Aid to match these Requests': 'Pledge Aid to match these Requests',
'Pledge Status': 'Pledge Status',
'Pledge Support': 'Pledge Support',
'Pledged': 'وعدہ',
'Pledges': 'وعدوں',
'Point': 'Point',
'Poisoning': 'Poisoning',
'Poisonous Gas': 'Poisonous Gas',
'Police': 'پولیس',
'Pollution and other environmental': 'Pollution and other environmental',
'Polygon': 'Polygon',
'Population': 'Population',
'Porridge': 'Porridge',
'Port': 'Port',
'Port Closure': 'Port Closure',
'Position Details': 'Position Details',
'Position added': 'Position added',
'Position deleted': 'Position deleted',
'Position type': 'Position type',
'Position updated': 'Position updated',
'Positions': 'Positions',
'Postcode': 'Postcode',
'Poultry': 'Poultry',
'Poultry restocking, Rank': 'Poultry restocking, Rank',
'Pounds': 'Pounds',
'Power Failure': 'Power Failure',
'Powered by Sahana Eden': 'Powered by Sahana Eden',
'Preferred Name': 'Preferred Name',
'Pregnant women': 'Pregnant women',
'Preliminary': 'Preliminary',
'Presence': 'Presence',
'Presence Condition': 'Presence Condition',
'Presence Log': 'Presence Log',
"Press the 'Delete Old' button to have all records which reference this one be repointed at the new one & then the old record will be deleted.": "Press the 'Delete Old' button to have all records which reference this one be repointed at the new one & then the old record will be deleted.",
'Previous': 'Previous',
'Previous View': 'Previous View',
'Primary Name': 'Primary Name',
'Priority': 'ترجیح',
'Priority Level': 'Priority Level',
'Private': 'Private',
'Problem': 'Problem',
'Problem Administration': 'Problem Administration',
'Problem Details': 'Problem Details',
'Problem Group': 'Problem Group',
'Problem Title': 'Problem Title',
'Problem added': 'Problem added',
'Problem deleted': 'Problem deleted',
'Problem updated': 'Problem updated',
'Problems': 'Problems',
'Procedure': 'Procedure',
'Procurements': 'Procurements',
'Product Description': 'Product Description',
'Product Name': 'Product Name',
'Profile': 'Profile',
'Project': 'Project',
'Project Activities': 'Project Activities',
'Project Details': 'Project Details',
'Project Management': 'Project Management',
'Project Status': 'Project Status',
'Project added': 'Project added',
'Project deleted': 'Project deleted',
'Project updated': 'Project updated',
'Projection': 'Projection',
'Projection Details': 'Projection Details',
'Projection added': 'Projection added',
'Projection deleted': 'Projection deleted',
'Projection updated': 'Projection updated',
'Projections': 'Projections',
'Projects': 'منصوبہ',
'Protected resource': 'Protected resource',
'Protection': 'تحفظ',
'Provide Metadata for your media files': 'Provide Metadata for your media files',
'Provide a password': 'Provide a password',
'Province': 'صوبہ',
'Proxy-server': 'Proxy-server',
'Psychiatrics/Adult': 'Psychiatrics/Adult',
'Psychiatrics/Pediatric': 'Psychiatrics/Pediatric',
'Public Event': 'Public Event',
'Public and private transportation': 'Public and private transportation',
'Pull tickets from external feed': 'Pull tickets from external feed',
'Punjabi': 'Punjabi',
'Push tickets to external system': 'Push tickets to external system',
'Put a choice in the box': 'Put a choice in the box',
'Pyroclastic Flow': 'Pyroclastic Flow',
'Pyroclastic Surge': 'Pyroclastic Surge',
'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'Python Serial module not available within the running Python - this needs installing to activate the Modem',
'Quantity': 'مقدار',
'Quarantine': 'Quarantine',
'Query': 'Query',
'Query Feature': 'Query Feature',
'Queryable?': 'Queryable?',
'Race': 'Race',
'Radiological Hazard': 'Radiological Hazard',
'Radiology': 'Radiology',
'Railway Accident': 'Railway Accident',
'Railway Hijacking': 'Railway Hijacking',
'Rain Fall': 'Rain Fall',
'Rapid Assessment': 'Rapid Assessment',
'Rapid Assessments': 'ریپڈ تجزیے',
'Rapid Close Lead': 'Rapid Close Lead',
'Rating Scale': 'Rating Scale',
'Raw Database access': 'Raw Database access',
'Real World Arbitrary Units': 'Real World Arbitrary Units',
'Receive': 'Receive',
'Received': 'Received',
'Recipient': 'Recipient',
'Recipients': 'Recipients',
'Record %(id)s created': 'Record %(id)s created',
'Record %(id)s updated': 'Record %(id)s updated',
'Record Details': 'Record Details',
'Record ID': 'Record ID',
'Record added': 'Record added',
'Record deleted': 'Record deleted',
'Record last updated': 'Record last updated',
'Record not found!': 'Record not found!',
'Record updated': 'Record updated',
'Records': 'Records',
'Recovery': 'Recovery',
'Recovery Reports': 'Recovery Reports',
'Recovery Request': 'Recovery Request',
'Recovery Request added': 'Recovery Request added',
'Recovery Request deleted': 'Recovery Request deleted',
'Recovery Request updated': 'Recovery Request updated',
'Recovery Requests': 'Recovery Requests',
'Recovery report added': 'Recovery report added',
'Recovery report deleted': 'Recovery report deleted',
'Recovery report updated': 'Recovery report updated',
'Recurring': 'Recurring',
'Recurring Cost': 'Recurring Cost',
'Recurring cost': 'Recurring cost',
'Recurring costs': 'Recurring costs',
'Reference Document': 'Reference Document',
'Regional': 'Regional',
'Register': 'رجسٹریشن کروائیے',
'Register Person': 'Register Person',
'Register Person into this Shelter': 'Register Person into this Shelter',
'Registered People': 'Registered People',
'Registered users can': 'رجسٹرڈ صارفین کر سکتے ہیں',
'Registering ad-hoc volunteers willing to contribute': 'Registering ad-hoc volunteers willing to contribute',
'Registration': 'Registration',
'Registration Details': 'Registration Details',
'Registration added': 'Registration added',
'Registration entry deleted': 'Registration entry deleted',
'Registration key': 'Registration key',
'Registration successful': 'Registration successful',
'Registration updated': 'Registration updated',
'Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.',
'Rehabilitation/Long Term Care': 'Rehabilitation/Long Term Care',
'Reliable access to sanitation/hygiene items': 'Reliable access to sanitation/hygiene items',
'Relief': 'Relief',
'Relief Item': 'Relief Item',
'Relief Item Catalog': 'Relief Item Catalog',
'Relief Items': 'امدادی سامان',
'Relief Team': 'Relief Team',
'Religion': 'Religion',
'Religious Leader': 'Religious Leader',
'Relocate as instructed in the <instruction>': 'Relocate as instructed in the <instruction>',
'Remove': 'خارج کر دیں',
'Remove Feature: Select the feature you wish to remove & press the delete key': 'Remove Feature: Select the feature you wish to remove & press the delete key',
'Repeat your password': 'Repeat your password',
'Replace': 'تبدیل',
'Replace if Master': 'Replace if Master',
'Replace if Newer': 'Replace if Newer',
'Replace/Master': 'Replace/Master',
'Replace/Newer': 'Replace/Newer',
'Report': 'Report',
'Report Details': 'Report Details',
'Report Resource': 'Report Resource',
'Report Type': 'Report Type',
'Report Types Include': 'Report Types Include',
'Report a Problem with the Software': 'Report a Problem with the Software',
'Report added': 'Report added',
'Report deleted': 'Report deleted',
'Report my location': 'Report my location',
'Report that person missing': 'Report that person missing',
'Report the contributing factors for the current EMS status.': 'Report the contributing factors for the current EMS status.',
'Report the contributing factors for the current OR status.': 'Report the contributing factors for the current OR status.',
'Report the person as found': 'Report the person as found',
'Report updated': 'Report updated',
'ReportLab module not available within the running Python - this needs installing for PDF output!': 'ReportLab module not available within the running Python - this needs installing for PDF output!',
'Reported By': 'رپورٹ',
'Reporter': 'رپورٹر',
'Reporter Name': 'خبررساں کا نام',
'Reporter: ': 'رپورٹر: ',
'Reporting on the projects in the region': 'Reporting on the projects in the region',
'Reports': 'رپورٹس',
'Request': 'Request',
'Request Detail': 'Request Detail',
'Request Details': 'Request Details',
'Request Item Details': 'Request Item Details',
'Request Item added': 'Request Item added',
'Request Item deleted': 'Request Item deleted',
'Request Item updated': 'Request Item updated',
'Request Items': 'درخواست آئٹم',
'Request Type': 'درخواست کی قسم',
'Request for Role Upgrade': 'Request for Role Upgrade',
'Request, Response & Session': 'Request, Response & Session',
'Requested': 'درخواست',
'Requested by': 'Requested by',
'Requested on': 'Requested on',
'Requestor': 'درخواست دھندہ',
'Requests': 'درخواستیں',
'Requests for Item': 'Requests for Item',
'Requires login': 'Requires login',
'Rescue and recovery': 'Rescue and recovery',
'Reset': 'Reset',
'Reset Password': 'Reset Password',
'Reset form': 'Reset form',
'Resize Feature: Select the feature you wish to resize & then Drag the associated dot to your desired size': 'Resize Feature: Select the feature you wish to resize & then Drag the associated dot to your desired size',
'Resource': 'Resource',
'Resource Details': 'Resource Details',
'Resource added': 'Resource added',
'Resource deleted': 'Resource deleted',
'Resource updated': 'Resource updated',
'Resources': 'Resources',
'Respiratory Infections': 'Respiratory Infections',
'Response': 'Response',
'Response Details': 'Response Details',
'Response added': 'Response added',
'Response deleted': 'Response deleted',
'Response updated': 'Response updated',
'Responses': 'Responses',
'Restricted Access': 'Restricted Access',
'Restrictions': 'Restrictions',
'Retail Crime': 'Retail Crime',
'Retrieve Password': 'Retrieve Password',
'Rice': 'چاول',
'Right-hand headline': 'Right-hand headline',
'Right-to-Left': 'Right-to-Left',
'Riot': 'Riot',
'River': 'ندی',
'River Details': 'River Details',
'River added': 'River added',
'River deleted': 'River deleted',
'River updated': 'River updated',
'Rivers': 'Rivers',
'Road Accident': 'Road Accident',
'Road Closed': 'Road Closed',
'Road Conditions': 'Road Conditions',
'Road Delay': 'Road Delay',
'Road Hijacking': 'Road Hijacking',
'Road Usage Condition': 'Road Usage Condition',
'Role': 'Role',
'Role Details': 'Role Details',
'Role Updated': 'Role Updated',
'Role added': 'Role added',
'Role deleted': 'Role deleted',
'Role updated': 'Role updated',
'Role-based': 'Role-based',
'Roles': 'Roles',
'Roof tile': 'Roof tile',
'Rotate Feature: Select the feature you wish to rotate & then Drag the associated dot to rotate to your desired location': 'Rotate Feature: Select the feature you wish to rotate & then Drag the associated dot to rotate to your desired location',
'Row Choices (One Per Line)': 'Row Choices (One Per Line)',
'Rows in table': 'Rows in table',
'Rows selected': 'Rows selected',
'Run Functional Tests': 'Run Functional Tests',
'Run Interval': 'Run Interval',
'Running Cost': 'Running Cost',
'Safe environment for vulnerable groups': 'Safe environment for vulnerable groups',
'Safety of children and women affected by disaster': 'Safety of children and women affected by disaster',
'Sahana Administrator': 'Sahana Administrator',
'Sahana Agasti': 'Sahana Agasti',
'Sahana Blue': 'Sahana Blue',
'Sahana Community Chat': 'Sahana Community Chat',
'Sahana Eden': 'Sahana Eden',
'Sahana Eden <=> Other': 'Sahana Eden <=> Other',
'Sahana Eden <=> Other (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=> Other (Sahana Agasti, Ushahidi, etc.)',
'Sahana Eden <=> Sahana Eden': 'Sahana Eden <=> Sahana Eden',
'Sahana Eden Website': 'Sahana Eden Website',
'Sahana Eden is a family of applications that provide solutions to coordination and collaboration for organisations working in disaster management.': ' ایڈن درخواست ہے کہ مطابقت اور مصیبت میں انتظام کام کرنے کی تنظیموں کے لئے تعاون کے حل کے لئے فراہم کی ایک خاندان ہے Sahana',
'Sahana FOSS Disaster Management System': 'Sahana FOSS Disaster Management System',
'Sahana Green': 'Sahana Green',
'Sahana Login Approval Pending': 'Sahana Login Approval Pending',
'Sahana Pakistan Floods Response': 'Sahana Pakistan Floods Response',
'Sahana Steel': 'Sahana Steel',
'Sahana access granted': 'Sahana access granted',
'Salted Fish': 'Salted Fish',
'Salvage material usable from destroyed houses': 'Salvage material usable from destroyed houses',
'Salvage material usable from destroyed schools': 'Salvage material usable from destroyed schools',
'Satellite': 'Satellite',
'Satellite Office': 'Satellite Office',
'Saturday': 'Saturday',
'Save': 'محفوظ کریں',
'Save any Changes in the one you wish to keep': 'Save any Changes in the one you wish to keep',
'Save: Default Lat, Lon & Zoom for the Viewport': 'Save: Default Lat, Lon & Zoom for the Viewport',
'Saved.': 'Saved.',
'Saving...': 'Saving...',
'Scale of Results': 'Scale of Results',
'Scanned File': 'Scanned File',
'Schedule': 'Schedule',
'School': 'سکول',
'School Closure': 'School Closure',
'School Code': 'School Code',
'School District': 'School District',
'School District Details': 'School District Details',
'School District added': 'School District added',
'School District deleted': 'School District deleted',
'School District updated': 'School District updated',
'School Districts': 'School Districts',
'School Information': 'School Information',
'School Lockdown': 'School Lockdown',
'School Report Details': 'School Report Details',
'School Report added': 'School Report added',
'School Report deleted': 'School Report deleted',
'School Report updated': 'School Report updated',
'School Reports': 'School Reports',
'School Teacher': 'School Teacher',
'School assistance received/expected': 'School assistance received/expected',
'School destroyed': 'School destroyed',
'School heavily damaged': 'School heavily damaged',
'School tents received': 'School tents received',
'School tents, source': 'School tents, source',
'School used for other purpose': 'School used for other purpose',
'School/studying': 'School/studying',
'Schools': 'سکول',
'Search': 'تلاش',
'Search ': 'تلاش ',
'Search & List Bin Types': 'Search & List Bin Types',
'Search & List Bins': 'Search & List Bins',
'Search & List Catalog': 'Search & List Catalog',
'Search & List Category': 'Search & List Category',
'Search & List Items': 'Search & List Items',
'Search & List Locations': 'Search & List Locations',
'Search & List Site': 'Search & List Site',
'Search & List Sub-Category': 'Search & List Sub-Category',
'Search & List Unit': 'Search & List Unit',
'Search Activities': 'Search Activities',
'Search Activity Report': 'Search Activity Report',
'Search Addresses': 'Search Addresses',
'Search Aid Requests': 'Search Aid Requests',
'Search Assessments': 'Search Assessments',
'Search Budgets': 'Search Budgets',
'Search Bundles': 'Search Bundles',
'Search Category<>Sub-Category<>Catalog Relation': 'Search Category<>Sub-Category<>Catalog Relation',
'Search Checklists': 'Search Checklists',
'Search Clusters': 'Search Clusters',
'Search Configs': 'Search Configs',
'Search Contact Information': 'Search Contact Information',
'Search Contacts': 'Search Contacts',
'Search Distribution Items': 'Search Distribution Items',
'Search Distributions': 'Search Distributions',
'Search Documents': 'Search Documents',
'Search Donors': 'Search Donors',
'Search Feature Class': 'Search Feature Class',
'Search Feature Layers': 'Search Feature Layers',
'Search Flood Reports': 'Search Flood Reports',
'Search Geonames': 'Search Geonames',
'Search Groups': 'Search Groups',
'Search Hospitals': 'Search Hospitals',
'Search Identity': 'Search Identity',
'Search Images': 'Search Images',
'Search Incident Reports': 'Search Incident Reports',
'Search Incidents': 'Search Incidents',
'Search Inventory Items': 'Search Inventory Items',
'Search Inventory Stores': 'Search Inventory Stores',
'Search Item Catalog Category(s)': 'Search Item Catalog Category(s)',
'Search Item Catalog(s)': 'Search Item Catalog(s)',
'Search Item Categories': 'Search Item Categories',
'Search Item Sub-Category(s)': 'Search Item Sub-Category(s)',
'Search Items': 'Search Items',
'Search Keys': 'Search Keys',
'Search Kits': 'Search Kits',
'Search Layers': 'Search Layers',
'Search Locations': 'Search Locations',
'Search Log Entry': 'Search Log Entry',
'Search Markers': 'Search Markers',
'Search Member': 'Search Member',
'Search Membership': 'Search Membership',
'Search Memberships': 'Search Memberships',
'Search Metadata': 'Search Metadata',
'Search Offices': 'Search Offices',
'Search Organizations': 'Search Organizations',
'Search Peer': 'Search Peer',
'Search Peers': 'Search Peers',
'Search Personal Effects': 'Search Personal Effects',
'Search Persons': 'Search Persons',
'Search Photos': 'Search Photos',
'Search Positions': 'Search Positions',
'Search Problems': 'Search Problems',
'Search Projections': 'Search Projections',
'Search Projects': 'Search Projects',
'Search Records': 'Search Records',
'Search Recovery Reports': 'Search Recovery Reports',
'Search Registations': 'Search Registations',
'Search Registration Request': 'Search Registration Request',
'Search Report': 'Search Report',
'Search Reports': 'Search Reports',
'Search Request': 'Search Request',
'Search Request Items': 'Search Request Items',
'Search Resources': 'Search Resources',
'Search Responses': 'Search Responses',
'Search Rivers': 'Search Rivers',
'Search Roles': 'Search Roles',
'Search School Districts': 'Search School Districts',
'Search School Reports': 'Search School Reports',
'Search Sections': 'Search Sections',
'Search Service Profiles': 'Search Service Profiles',
'Search Settings': 'Search Settings',
'Search Shelter Services': 'Search Shelter Services',
'Search Shelter Types': 'Search Shelter Types',
'Search Shelters': 'Search Shelters',
'Search Shipment Transit Logs': 'Search Shipment Transit Logs',
'Search Shipment/Way Bills': 'Search Shipment/Way Bills',
'Search Shipment<>Item Relation': 'Search Shipment<>Item Relation',
'Search Site(s)': 'Search Site(s)',
'Search Skill Types': 'Search Skill Types',
'Search Skills': 'Search Skills',
'Search Solutions': 'Search Solutions',
'Search Sources': 'Search Sources',
'Search Staff': 'Search Staff',
'Search Staff Types': 'Search Staff Types',
'Search Storage Bin Type(s)': 'Search Storage Bin Type(s)',
'Search Storage Bin(s)': 'Search Storage Bin(s)',
'Search Storage Location(s)': 'Search Storage Location(s)',
'Search Subscriptions': 'Search Subscriptions',
'Search Tasks': 'Search Tasks',
'Search Teams': 'Search Teams',
'Search Themes': 'Search Themes',
'Search Tickets': 'Search Tickets',
'Search Tracks': 'Search Tracks',
'Search Units': 'Search Units',
'Search Users': 'Search Users',
'Search Volunteer Registrations': 'Search Volunteer Registrations',
'Search and Edit Group': 'Search and Edit Group',
'Search and Edit Individual': 'Search and Edit Individual',
'Search by ID Tag': 'Search by ID Tag',
'Search for Items': 'Search for Items',
'Search for a Hospital': 'Search for a Hospital',
'Search for a Location': 'Search for a Location',
'Search for a Person': 'Search for a Person',
'Search for a Project': 'Search for a Project',
'Search for a Request': 'Search for a Request',
"Search here for a person's record in order to:": "Search here for a person's record in order to:",
'Search messages': 'Search messages',
'Searching for different groups and individuals': 'Searching for different groups and individuals',
'Seconds must be a number between 0 and 60': 'Seconds must be a number between 0 and 60',
'Seconds must be between 0 and 60': 'Seconds must be between 0 and 60',
'Section Details': 'Section Details',
'Section deleted': 'Section deleted',
'Section updated': 'Section updated',
'Sections': 'Sections',
'Sector(s): ': 'Sector(s): ',
'Security': 'Security',
'Security Policy': 'Security Policy',
'Security Status': 'Security Status',
'Seen': 'Seen',
'Select 2 potential locations from the dropdowns.': 'Select 2 potential locations from the dropdowns.',
'Select Photos': 'Select Photos',
'Select a location': 'منتخب جگہ',
"Select a person in charge for status 'assigned'": "Select a person in charge for status 'assigned'",
'Select a question from the list': 'Select a question from the list',
'Select all that apply': 'Select all that apply',
'Select an Organization to see a list of offices': 'Select an Organization to see a list of offices',
'Select an existing Location': 'Select an existing Location',
'Select the person assigned to this role for this project.': 'Select the person assigned to this role for this project.',
'Select the person associated with this scenario.': 'Select the person associated with this scenario.',
'Select to see a list of subdivisions.': 'Select to see a list of subdivisions.',
'Selects whether to use the gateway or the Modem for sending out SMS': 'Selects whether to use the gateway or the Modem for sending out SMS',
'Self Registration': 'Self Registration',
'Self-registration': 'Self-registration',
'Send Alerts using Email &/or SMS': 'Send Alerts using Email &/or SMS',
'Send Mail': 'Send Mail',
'Send message': 'Send message',
'Send new message': 'Send new message',
'Sends & Receives Alerts via Email & SMS': 'Sends & Receives Alerts via Email & SMS',
'Senior (50+)': 'Senior (50+)',
'Sensitivity': 'Sensitivity',
'Sent': 'Sent',
'Separate latrines for women and men': 'Separate latrines for women and men',
'Seraiki': 'Seraiki',
'Series': 'Series',
'Server': 'Server',
'Service': 'Service',
'Service Catalogue': 'Service Catalogue',
'Service or Facility': 'Service or Facility',
'Service profile added': 'Service profile added',
'Service profile deleted': 'Service profile deleted',
'Service profile updated': 'Service profile updated',
'Services': 'Services',
'Services Available': 'Services Available',
'Setting Details': 'Setting Details',
'Setting added': 'Setting added',
'Setting deleted': 'Setting deleted',
'Setting updated': 'Setting updated',
'Settings': 'Settings',
'Settings updated': 'Settings updated',
'Share a common Marker (unless over-ridden at the Feature level)': 'Share a common Marker (unless over-ridden at the Feature level)',
'Shelter': 'Shelter',
'Shelter & Essential NFIs': 'NFIs شیلٹر اور ضروری',
'Shelter Details': 'Shelter Details',
'Shelter Name': 'Shelter Name',
'Shelter Registry': 'Shelter Registry',
'Shelter Service': 'Shelter Service',
'Shelter Service Details': 'Shelter Service Details',
'Shelter Service added': 'Shelter Service added',
'Shelter Service deleted': 'Shelter Service deleted',
'Shelter Service updated': 'Shelter Service updated',
'Shelter Services': 'Shelter Services',
'Shelter Type': 'Shelter Type',
'Shelter Type Details': 'Shelter Type Details',
'Shelter Type added': 'Shelter Type added',
'Shelter Type deleted': 'Shelter Type deleted',
'Shelter Type updated': 'Shelter Type updated',
'Shelter Types': 'Shelter Types',
'Shelter Types and Services': 'Shelter Types and Services',
'Shelter added': 'Shelter added',
'Shelter deleted': 'Shelter deleted',
'Shelter updated': 'Shelter updated',
'Shelter(s)': 'Shelter(s)',
'Shelter/NFI assistance received/expected': 'Shelter/NFI assistance received/expected',
'Shelters': 'پناہ گاہیں',
'Shipment Transit Log Details': 'Shipment Transit Log Details',
'Shipment Transit Log added': 'Shipment Transit Log added',
'Shipment Transit Log deleted': 'Shipment Transit Log deleted',
'Shipment Transit Log updated': 'Shipment Transit Log updated',
'Shipment Transit Logs': 'Shipment Transit Logs',
'Shipment/Way Bill added': 'Shipment/Way Bill added',
'Shipment/Way Bills': 'Shipment/Way Bills',
'Shipment/Way Bills Details': 'Shipment/Way Bills Details',
'Shipment/Way Bills deleted': 'Shipment/Way Bills deleted',
'Shipment/Way Bills updated': 'Shipment/Way Bills updated',
'Shipment<>Item Relation added': 'Shipment<>Item Relation added',
'Shipment<>Item Relation deleted': 'Shipment<>Item Relation deleted',
'Shipment<>Item Relation updated': 'Shipment<>Item Relation updated',
'Shipment<>Item Relations': 'Shipment<>Item Relations',
'Shipment<>Item Relations Details': 'Shipment<>Item Relations Details',
'Shipments': 'Shipments',
'Shooting': 'Shooting',
'Short Description': 'Short Description',
'Show Checklist': 'Show Checklist',
'Show on map': 'Show on map',
'Sindhi': 'Sindhi',
'Site': 'Site',
'Site Address': 'Site Address',
'Site Administration': 'Site Administration',
'Site Description': 'Site Description',
'Site Details': 'Site Details',
'Site ID': 'Site ID',
'Site Location Description': 'Site Location Description',
'Site Location Name': 'Site Location Name',
'Site Manager': 'Site Manager',
'Site Name': 'Site Name',
'Site added': 'Site added',
'Site deleted': 'Site deleted',
'Site updated': 'Site updated',
'Site/Warehouse': 'Site/Warehouse',
'Sites': 'Sites',
'Situation Awareness & Geospatial Analysis': 'Situation Awareness & Geospatial Analysis',
'Sketch': 'Sketch',
'Skill': 'Skill',
'Skill Details': 'Skill Details',
'Skill Type Details': 'Skill Type Details',
'Skill Type added': 'Skill Type added',
'Skill Type deleted': 'Skill Type deleted',
'Skill Type updated': 'Skill Type updated',
'Skill Types': 'Skill Types',
'Skill added': 'Skill added',
'Skill deleted': 'Skill deleted',
'Skill updated': 'Skill updated',
'Skills': 'Skills',
'Skype ID': 'Skype ID',
'Small Trade': 'Small Trade',
'Smoke': 'Smoke',
'Snow Fall': 'Snow Fall',
'Snow Squall': 'Snow Squall',
'Solution': 'Solution',
'Solution Details': 'Solution Details',
'Solution Item': 'Solution Item',
'Solution added': 'Solution added',
'Solution deleted': 'Solution deleted',
'Solution updated': 'Solution updated',
'Solutions': 'Solutions',
'Some': 'Some',
'Sorry - the server has a problem, please try again later.': 'Sorry - the server has a problem, please try again later.',
'Sorry that location appears to be outside the area supported by this deployment.': 'Sorry that location appears to be outside the area supported by this deployment.',
'Sorry, I could not understand your request': 'Sorry, I could not understand your request',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'Sorry, only users with the MapAdmin role are allowed to edit these locations',
'Sorry, something went wrong.': 'Sorry, something went wrong.',
'Sorry, that page is forbidden for some reason.': 'Sorry, that page is forbidden for some reason.',
'Sorry, that service is temporary unavailable.': 'Sorry, that service is temporary unavailable.',
"Sorry, things didn't get done on time.": "Sorry, things didn't get done on time.",
"Sorry, we couldn't find that page.": "Sorry, we couldn't find that page.",
'Source': 'Source',
'Source Details': 'Source Details',
'Source ID': 'Source ID',
'Source Time': 'Source Time',
'Source Type': 'Source Type',
'Source added': 'Source added',
'Source deleted': 'Source deleted',
'Source of Information': 'Source of Information',
'Source updated': 'Source updated',
'Sources': 'Sources',
'Space Debris': 'Space Debris',
'Spanish': 'Spanish',
'Special Ice': 'Special Ice',
'Special Marine': 'Special Marine',
'Special needs': 'Special needs',
'Specialized Hospital': 'Specialized Hospital',
'Specify a descriptive title for the image.': 'Specify a descriptive title for the image.',
'Specify the bed type of this unit.': 'Specify the bed type of this unit.',
'Specify the minimum sustainability in weeks or days.': 'Specify the minimum sustainability in weeks or days.',
'Spherical Mercator?': 'Spherical Mercator?',
'Spreadsheet Importer': 'Spreadsheet Importer',
'Spreadsheet uploaded': 'Spreadsheet uploaded',
'Spring': 'Spring',
'Squall': 'Squall',
'Staff': 'سٹاف',
'Staff 2': 'سٹاف 2',
'Staff Details': 'Staff Details',
'Staff Type Details': 'Staff Type Details',
'Staff Type added': 'Staff Type added',
'Staff Type deleted': 'Staff Type deleted',
'Staff Type updated': 'Staff Type updated',
'Staff Types': 'Staff Types',
'Staff added': 'Staff added',
'Staff deleted': 'Staff deleted',
'Staff present and caring for residents': 'Staff present and caring for residents',
'Staff updated': 'Staff updated',
'Staffing': 'Staffing',
'Start date': 'Start date',
'Start of Period': 'Start of Period',
'Stationery': 'Stationery',
'Status': 'Status',
'Status Report': 'Status Report',
'Status of clinical operation of the facility.': 'Status of clinical operation of the facility.',
'Status of general operation of the facility.': 'Status of general operation of the facility.',
'Status of morgue capacity.': 'Status of morgue capacity.',
'Status of operations of the emergency department of this hospital.': 'Status of operations of the emergency department of this hospital.',
'Status of security procedures/access restrictions in the hospital.': 'Status of security procedures/access restrictions in the hospital.',
'Status of the operating rooms of this hospital.': 'Status of the operating rooms of this hospital.',
'Storage Bin': 'Storage Bin',
'Storage Bin Details': 'Storage Bin Details',
'Storage Bin Number': 'Storage Bin Number',
'Storage Bin Type': 'Storage Bin Type',
'Storage Bin Type Details': 'Storage Bin Type Details',
'Storage Bin Type added': 'Storage Bin Type added',
'Storage Bin Type deleted': 'Storage Bin Type deleted',
'Storage Bin Type updated': 'Storage Bin Type updated',
'Storage Bin Types': 'Storage Bin Types',
'Storage Bin added': 'Storage Bin added',
'Storage Bin deleted': 'Storage Bin deleted',
'Storage Bin updated': 'Storage Bin updated',
'Storage Bins': 'Storage Bins',
'Storage Location': 'Storage Location',
'Storage Location Details': 'Storage Location Details',
'Storage Location ID': 'Storage Location ID',
'Storage Location Name': 'Storage Location Name',
'Storage Location added': 'Storage Location added',
'Storage Location deleted': 'Storage Location deleted',
'Storage Location updated': 'Storage Location updated',
'Storage Locations': 'Storage Locations',
'Store spreadsheets in the Eden database': 'Store spreadsheets in the Eden database',
'Storm Force Wind': 'Storm Force Wind',
'Storm Surge': 'Storm Surge',
'Stowaway': 'Stowaway',
'Street': 'Street',
'Street (continued)': 'Street (continued)',
'Street Address': 'سٹریٹ کا پتہ',
'Strong Wind': 'Strong Wind',
'Sub Category': 'Sub Category',
'Sub-type': 'Sub-type',
'Subject': 'Subject',
'Submission successful - please wait': 'Submission successful - please wait',
'Submission successful - please wait...': 'Submission successful - please wait...',
'Submit': 'عرض',
'Submitting information about the individual such as identification numbers, physical appearance, last seen location, status, etc': 'Submitting information about the individual such as identification numbers, physical appearance, last seen location, status, etc',
'Subscription Details': 'Subscription Details',
'Subscription added': 'Subscription added',
'Subscription deleted': 'Subscription deleted',
'Subscription updated': 'Subscription updated',
'Subscriptions': 'Subscriptions',
'Subsistence Cost': 'Subsistence Cost',
'Sufficient care/assistance for chronically ill': 'Sufficient care/assistance for chronically ill',
'Suggest not changing this field unless you know what you are doing.': 'Suggest not changing this field unless you know what you are doing.',
'Summary': 'Summary',
'Sunday': 'Sunday',
'Support Request': 'Support Request',
'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': 'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.',
'Sure you want to delete this object?': 'Sure you want to delete this object?',
'Surgery': 'سرجری',
'Survey Answer': 'Survey Answer',
'Survey Answer Details': 'Survey Answer Details',
'Survey Answer added': 'Survey Answer added',
'Survey Answer deleted': 'Survey Answer deleted',
'Survey Answer updated': 'Survey Answer updated',
'Survey Module': 'Survey Module',
'Survey Name': 'Survey Name',
'Survey Question': 'Survey Question',
'Survey Question Details': 'Survey Question Details',
'Survey Question Display Name': 'Survey Question Display Name',
'Survey Question added': 'Survey Question added',
'Survey Question deleted': 'Survey Question deleted',
'Survey Question updated': 'Survey Question updated',
'Survey Section': 'Survey Section',
'Survey Section Details': 'Survey Section Details',
'Survey Section Display Name': 'Survey Section Display Name',
'Survey Section added': 'Survey Section added',
'Survey Section deleted': 'Survey Section deleted',
'Survey Section updated': 'Survey Section updated',
'Survey Series': 'Survey Series',
'Survey Series Details': 'Survey Series Details',
'Survey Series Name': 'Survey Series Name',
'Survey Series added': 'Survey Series added',
'Survey Series deleted': 'Survey Series deleted',
'Survey Series updated': 'Survey Series updated',
'Survey Template': 'Survey Template',
'Survey Template Details': 'Survey Template Details',
'Survey Template added': 'Survey Template added',
'Survey Template deleted': 'Survey Template deleted',
'Survey Template updated': 'Survey Template updated',
'Survey Templates': 'Survey Templates',
'Switch this on to use individual CSS/Javascript files for diagnostics during development.': 'Switch this on to use individual CSS/Javascript files for diagnostics during development.',
'Symbology': 'Symbology',
'Sync Conflicts': 'Sync Conflicts',
'Sync History': 'Sync History',
'Sync Now': 'Sync Now',
'Sync Partners': 'Sync Partners',
'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.',
'Sync Pools': 'Sync Pools',
'Sync Schedule': 'Sync Schedule',
'Sync Settings': 'Sync Settings',
'Sync process already started on ': 'Sync process already started on ',
'Synchronisation': 'Synchronisation',
'Synchronisation History': 'Synchronisation History',
'Synchronization': 'Synchronization',
'Synchronization Peers': 'Synchronization Peers',
'Synchronization Settings': 'Synchronization Settings',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden',
'Synchronization not configured': 'Synchronization not configured',
'Synchronization not configured.': 'Synchronization not configured.',
'Synchronization settings updated': 'Synchronization settings updated',
'Syncronisation History': 'Syncronisation History',
'Syncronisation Schedules': 'Syncronisation Schedules',
'System allows the General Public to Report Incidents & have these Tracked.': 'System allows the General Public to Report Incidents & have these Tracked.',
'System allows the tracking & discovery of Items stored in Locations.': 'System allows the tracking & discovery of Items stored in Locations.',
'System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.': 'System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.',
'System keeps track of all Volunteers working in the disaster region. It captures not only the places where they are active, but also captures information on the range of services they are providing in each area.': 'System keeps track of all Volunteers working in the disaster region. It captures not only the places where they are active, but also captures information on the range of services they are providing in each area.',
'Table name': 'Table name',
'Tags': 'Tags',
'Take shelter in place or per <instruction>': 'Take shelter in place or per <instruction>',
'Task Details': 'Task Details',
'Task List': 'Task List',
'Task Status': 'Task Status',
'Task added': 'Task added',
'Task deleted': 'Task deleted',
'Task status': 'Task status',
'Task updated': 'Task updated',
'Tasks': 'کام',
'Team': 'Team',
'Team Description': 'Team Description',
'Team Details': 'Team Details',
'Team Head': 'Team Head',
'Team Id': 'Team Id',
'Team Leader': 'Team Leader',
'Team Member added': 'Team Member added',
'Team Members': 'Team Members',
'Team Name': 'Team Name',
'Team Type': 'Team Type',
'Team added': 'Team added',
'Team deleted': 'Team deleted',
'Team updated': 'Team updated',
'Team:': 'Team:',
'Teams': 'ٹیم',
'Technical testing only, all recipients disregard': 'Technical testing only, all recipients disregard',
'Telephone': 'Telephone',
'Telephony': 'Telephony',
'Temp folder %s not writable - unable to apply theme!': 'Temp folder %s not writable - unable to apply theme!',
'Template file %s not readable - unable to apply theme!': 'Template file %s not readable - unable to apply theme!',
'Templates': 'Templates',
'Terrorism': 'Terrorism',
'Test Results': 'Test Results',
'Text': 'Text',
'Text Colour for Text blocks': 'Text Colour for Text blocks',
'Text Direction': 'Text Direction',
'Text before each Text Field (One per line)': 'Text before each Text Field (One per line)',
'Text in Message': 'Text in Message',
'Text in Message: ': 'Text in Message: ',
'Thanks for your assistance': 'Thanks for your assistance',
'The': 'The',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.',
"The <a href='http://en.wikipedia.org/wiki/Well-known_text' target=_blank>Well-Known Text</a> representation of the Polygon/Line.": "The <a href='http://en.wikipedia.org/wiki/Well-known_text' target=_blank>Well-Known Text</a> representation of the Polygon/Line.",
'The Area which this Site is located within.': 'The Area which this Site is located within.',
'The Author of this Document (optional)': 'The Author of this Document (optional)',
'The District for this Report.': 'The District for this Report.',
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": "The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.",
'The Group whose members can edit data in this record.': 'The Group whose members can edit data in this record.',
'The Location of this Site, which can be general (for Reporting) or precise (for displaying on a Map).': 'The Location of this Site, which can be general (for Reporting) or precise (for displaying on a Map).',
'The Office this record is associated with.': 'The Office this record is associated with.',
'The Organization this record is associated with.': 'The Organization this record is associated with.',
'The Organization which is funding this Activity.': 'The Organization which is funding this Activity.',
'The Rapid Assessments Module stores structured reports done by Professional Organizations.': 'The Rapid Assessments Module stores structured reports done by Professional Organizations.',
'The Request this record is associated with.': 'The Request this record is associated with.',
'The Role this person plays within this Office/Project.': 'The Role this person plays within this Office/Project.',
'The Role this person plays within this hospital.': 'The Role this person plays within this hospital.',
"The Sector(s) this organization works in. Multiple values can be selected by holding down the 'Control' key.": "The Sector(s) this organization works in. Multiple values can be selected by holding down the 'Control' key.",
'The Shelter this Request is from (optional).': 'The Shelter this Request is from (optional).',
'The Source this information came from.': 'The Source this information came from.',
'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.': 'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.',
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": "The URL of the image file. If you don't upload an image file, then you must specify its location here.",
'The URL of your web gateway without the post parameters': 'The URL of your web gateway without the post parameters',
'The Unique Identifier (UUID) as assigned to this facility by the government.': 'The Unique Identifier (UUID) as assigned to this facility by the government.',
'The area is ': 'The area is ',
'The attribute within the KML which is used for the title of popups.': 'The attribute within the KML which is used for the title of popups.',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)',
'The body height (crown to heel) in cm.': 'The body height (crown to heel) in cm.',
'The category of the Item.': 'The category of the Item.',
'The contact person for this organization.': 'The contact person for this organization.',
'The country the person usually lives in.': 'The country the person usually lives in.',
'The default policy for data import from this peer.': 'The default policy for data import from this peer.',
'The descriptive name of the peer.': 'The descriptive name of the peer.',
'The entered unit links to this unit. For e.g. if you are entering m for meter then choose kilometer(if it exists) and enter the value 0.001 as multiplicator.': 'The entered unit links to this unit. For e.g. if you are entering m for meter then choose kilometer(if it exists) and enter the value 0.001 as multiplicator.',
'The first or only name of the person (mandatory).': 'The first or only name of the person (mandatory).',
'The following modules are available': 'The following modules are available',
'The hospital this record is associated with.': 'The hospital this record is associated with.',
'The item is designated to be sent for specific project, population, village or other earmarking of the donation such as a Grant Code.': 'The item is designated to be sent for specific project, population, village or other earmarking of the donation such as a Grant Code.',
'The language to use for notifications.': 'The language to use for notifications.',
'The last known location of the missing person before disappearance.': 'The last known location of the missing person before disappearance.',
'The length is ': 'The length is ',
'The list of Item categories are maintained by the Administrators.': 'The list of Item categories are maintained by the Administrators.',
'The name to be used when calling for or directly addressing the person (optional).': 'The name to be used when calling for or directly addressing the person (optional).',
'The next screen will allow you to detail the number of people here & their needs.': 'The next screen will allow you to detail the number of people here & their needs.',
'The next screen will allow you to enter a detailed list of items and quantities, if appropriate...': 'The next screen will allow you to enter a detailed list of items and quantities, if appropriate...',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.',
'The person at the location who is reporting this incident (optional)': 'The person at the location who is reporting this incident (optional)',
'The person reporting about the missing person.': 'The person reporting about the missing person.',
"The person's manager within this Office/Project.": "The person's manager within this Office/Project.",
'The post variable containing the phone number': 'The post variable containing the phone number',
'The post variable on the URL used for sending messages': 'The post variable on the URL used for sending messages',
'The post variables other than the ones containing the message and the phone number': 'The post variables other than the ones containing the message and the phone number',
'The request this record is associated with.': 'The request this record is associated with.',
'The scanned copy of this document.': 'The scanned copy of this document.',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows',
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': 'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': 'The server received an incorrect response from another server that it was accessing to fill the request by the browser.',
'The simple policy allows anonymous users to Read & registered users to Edit. The full security policy allows the administrator to set permissions on individual tables or records - see models/zzz.py.': 'The simple policy allows anonymous users to Read & registered users to Edit. The full security policy allows the administrator to set permissions on individual tables or records - see models/zzz.py.',
'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': 'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>',
'The title of the WMS Browser panel in the Tools panel.': 'The title of the WMS Browser panel in the Tools panel.',
'The unique identifier of the peer. Leave blank if the peer is no Sahana Eden instance, it will be auto-assigned in that case.': 'The unique identifier of the peer. Leave blank if the peer is no Sahana Eden instance, it will be auto-assigned in that case.',
'The unique identifier which identifies this instance to other instances.': 'The unique identifier which identifies this instance to other instances.',
'The weight in kg.': 'The weight in kg.',
'Theme': 'Theme',
'Theme Details': 'Theme Details',
'Theme added': 'Theme added',
'Theme deleted': 'Theme deleted',
'Theme updated': 'Theme updated',
'Themes': 'Themes',
'There are errors': 'There are errors',
'There are multiple records at this location': 'There are multiple records at this location',
'There was a problem, sorry, please try again later.': 'There was a problem, sorry, please try again later.',
'These are settings for Inbound Mail.': 'These are settings for Inbound Mail.',
'These are the default settings for all users. To change settings just for you, click ': 'These are the default settings for all users. To change settings just for you, click ',
'They': 'They',
'This can either be the postal address or a simpler description (such as `Next to the Fuel Station`).': 'This can either be the postal address or a simpler description (such as `Next to the Fuel Station`).',
'This form allows the administrator to remove a duplicate location.': 'This form allows the administrator to remove a duplicate location.',
'This is the way to transfer data between machines as it maintains referential integrity.': 'This is the way to transfer data between machines as it maintains referential integrity.',
'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': 'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!',
'This might be due to a temporary overloading or maintenance of the server.': 'This might be due to a temporary overloading or maintenance of the server.',
'This module assists the management of fatalities and the identification of the deceased.': 'This module assists the management of fatalities and the identification of the deceased.',
'This page shows you logs of past syncs. Click on the link below to go to this page.': 'This page shows you logs of past syncs. Click on the link below to go to this page.',
'This screen allows you to upload a collection of photos to the server.': 'This screen allows you to upload a collection of photos to the server.',
'Thunderstorm': 'Thunderstorm',
'Thursday': 'Thursday',
'Ticket': 'Ticket',
'Ticket Details': 'Ticket Details',
'Ticket added': 'Ticket added',
'Ticket deleted': 'Ticket deleted',
'Ticket updated': 'Ticket updated',
'Ticketing Module': 'Ticketing Module',
'Tickets': 'Tickets',
'Time needed to collect water': 'Time needed to collect water',
'Time of Request': 'Time of Request',
'Timestamp': 'Timestamp',
'Title': 'Title',
'To begin the sync process, click the button on the right => ': 'To begin the sync process, click the button on the right => ',
'To begin the sync process, click this button => ': 'To begin the sync process, click this button => ',
'To delete': 'To delete',
"To search for a body, enter the ID label of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": "To search for a body, enter the ID label of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.",
"To search for a hospital, enter any part of the name or ID. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "To search for a hospital, enter any part of the name or ID. You may use % as wildcard. Press 'Search' without input to list all hospitals.",
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.",
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.",
"To search for a person, enter any of the first, middle or last names and/or the ID label of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "To search for a person, enter any of the first, middle or last names and/or the ID label of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.",
"To search for a request, enter some of the text that you are looking for. You may use % as wildcard. Press 'Search' without input to list all requests.": "To search for a request, enter some of the text that you are looking for. You may use % as wildcard. Press 'Search' without input to list all requests.",
'To submit a new job, use the': 'To submit a new job, use the',
'To variable': 'To variable',
'Tools': 'Tools',
'Tornado': 'Tornado',
'Total # of Beneficiaries Reached ': 'Total # of Beneficiaries Reached ',
'Total # of Target Beneficiaries': 'Total # of Target Beneficiaries',
'Total # of households of site visited': 'Total # of households of site visited',
'Total Beds': 'Total Beds',
'Total Cost per Megabyte': 'Total Cost per Megabyte',
'Total Cost per Minute': 'Total Cost per Minute',
'Total Households': 'Total Households',
'Total Monthly': 'Total Monthly',
'Total Monthly Cost': 'Total Monthly Cost',
'Total Monthly Cost: ': 'Total Monthly Cost: ',
'Total No of Affectees (Including Students, Teachers & Others)': 'Total No of Affectees (Including Students, Teachers & Others)',
'Total No of Female Affectees (Including Students, Teachers & Others)': 'Total No of Female Affectees (Including Students, Teachers & Others)',
'Total No of Male Affectees (Including Students, Teachers & Others)': 'Total No of Male Affectees (Including Students, Teachers & Others)',
'Total No of Students (Primary To Higher Secondary) in the Total Affectees': 'Total No of Students (Primary To Higher Secondary) in the Total Affectees',
'Total No of Teachers & Other Govt Servants in the Total Affectees': 'Total No of Teachers & Other Govt Servants in the Total Affectees',
'Total One-time Costs': 'Total One-time Costs',
'Total Persons': 'Total Persons',
'Total Recurring Costs': 'Total Recurring Costs',
'Total Unit Cost': 'Total Unit Cost',
'Total Unit Cost: ': 'Total Unit Cost: ',
'Total Units': 'Total Units',
'Total number of beds in this hospital. Automatically updated from daily reports.': 'Total number of beds in this hospital. Automatically updated from daily reports.',
'Total number of houses in the area': 'Total number of houses in the area',
'Total number of schools in affected area': 'Total number of schools in affected area',
'Total population of site visited': 'Total population of site visited',
'Totals for Budget:': 'Totals for Budget:',
'Totals for Bundle:': 'Totals for Bundle:',
'Totals for Kit:': 'Totals for Kit:',
'Tourist Group': 'Tourist Group',
'Town': 'ٹاؤن',
'Traces internally displaced people (IDPs) and their needs': 'Traces internally displaced people (IDPs) and their needs',
'Tracing': 'Tracing',
'Track': 'Track',
'Track Details': 'Track Details',
'Track deleted': 'Track deleted',
'Track updated': 'Track updated',
'Track uploaded': 'Track uploaded',
'Tracking of basic information on the location, facilities and size of the Shelters': 'Tracking of basic information on the location, facilities and size of the Shelters',
'Tracks': 'Tracks',
'Tracks requests for aid and matches them against donors who have pledged aid': 'Tracks requests for aid and matches them against donors who have pledged aid',
'Tracks the location, distibution, capacity and breakdown of victims in Shelters': 'Tracks the location, distibution, capacity and breakdown of victims in Shelters',
'Traffic Report': 'Traffic Report',
'Transit': 'Transit',
'Transition Effect': 'Transition Effect',
'Transparent?': 'Transparent?',
'Transport': 'نقل و حمل',
'Transportation assistance, Rank': 'Transportation assistance, Rank',
'Trauma Center': 'Trauma Center',
'Travel Cost': 'Travel Cost',
'Tree': 'Tree',
'Tropical Storm': 'Tropical Storm',
'Truck': 'Truck',
'Try checking the URL for errors, maybe it was mistyped.': 'Try checking the URL for errors, maybe it was mistyped.',
'Try hitting refresh/reload button or trying the URL from the address bar again.': 'Try hitting refresh/reload button or trying the URL from the address bar again.',
'Try refreshing the page or hitting the back button on your browser.': 'Try refreshing the page or hitting the back button on your browser.',
'Tsunami': 'Tsunami',
'Tuesday': 'Tuesday',
'Twitter': 'Twitter',
'Twitter ID or #hashtag': 'Twitter ID or #hashtag',
'Type': 'قسم',
'Type of cause': 'Type of cause',
'Type of latrines': 'Type of latrines',
'Type of place for defecation': 'Type of place for defecation',
'Type of water source before the disaster': 'Type of water source before the disaster',
'Types of health services available': 'Types of health services available',
'Types of water storage containers available': 'Types of water storage containers available',
'UID': 'UID',
'URL': 'URL',
'UTC Offset': 'UTC Offset',
'Unable to parse CSV file!': 'Unable to parse CSV file!',
'Unidentified': 'Unidentified',
'Union Council': 'یونین کونسل',
'Unit': 'Unit',
'Unit Bed Capacity': 'Unit Bed Capacity',
'Unit Cost': 'Unit Cost',
'Unit Details': 'Unit Details',
'Unit Name': 'Unit Name',
'Unit Set': 'Unit Set',
'Unit Short Code for e.g. m for meter.': 'Unit Short Code for e.g. m for meter.',
'Unit added': 'Unit added',
'Unit deleted': 'Unit deleted',
'Unit updated': 'Unit updated',
'Units': 'Units',
'Units of Measure': 'Units of Measure',
'Unknown': 'Unknown',
'Unknown Peer': 'Unknown Peer',
'Unknown type of facility': 'Unknown type of facility',
'Unselect to disable the modem': 'Unselect to disable the modem',
'Unsent': 'Unsent',
'Unsupported data format!': 'Unsupported data format!',
'Unsupported method!': 'Unsupported method!',
'Update': 'اپ ڈیٹ',
'Update Activity Report': 'Update Activity Report',
'Update Import Job': 'Update Import Job',
'Update Request': 'Update Request',
'Update Service Profile': 'Update Service Profile',
'Update Task Status': 'Update Task Status',
'Update Unit': 'Update Unit',
'Update if Master': 'Update if Master',
'Update if Newer': 'Update if Newer',
'Update your current ordered list': 'Update your current ordered list',
'Update/Master': 'Update/Master',
'Update/Newer': 'Update/Newer',
'Upload': 'اپ لوڈ کریں',
'Upload Photos': 'Upload Photos',
'Upload Spreadsheet': 'Upload Spreadsheet',
'Upload Track': 'Upload Track',
'Upload a Spreadsheet': 'Upload a Spreadsheet',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": "Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.",
'Urban Fire': 'Urban Fire',
'Urban area': 'Urban area',
'Urdu': 'Urdu',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.',
'Use default': 'Use default',
'Use default from feature class': 'Use default from feature class',
'Use these links to download data that is currently in the database.': 'Use these links to download data that is currently in the database.',
'Use this space to add a description about the Bin Type.': 'Use this space to add a description about the Bin Type.',
'Use this space to add a description about the site location.': 'Use this space to add a description about the site location.',
'Use this space to add a description about the warehouse/site.': 'Use this space to add a description about the warehouse/site.',
'Use this space to add additional comments and notes about the Site/Warehouse.': 'Use this space to add additional comments and notes about the Site/Warehouse.',
'Use this to indicate that the person has been found.': 'Use this to indicate that the person has been found.',
'User': 'صارف',
'User %(id)s Logged-in': 'User %(id)s Logged-in',
'User %(id)s Logged-out': 'User %(id)s Logged-out',
'User %(id)s Registered': 'User %(id)s Registered',
'User Details': 'User Details',
'User ID': 'User ID',
'User Management': 'User Management',
'User Profile': 'User Profile',
'User Requests': 'User Requests',
'User Updated': 'User Updated',
'User added': 'User added',
'User deleted': 'User deleted',
'User updated': 'User updated',
'Username': 'Username',
'Username for authentication at the peer. Note that only HTTP Basic authentication is supported.': 'Username for authentication at the peer. Note that only HTTP Basic authentication is supported.',
'Users': 'Users',
'Users removed': 'Users removed',
'Ushahidi': 'Ushahidi',
'Usual food sources in the area': 'Usual food sources in the area',
'Utility, telecommunication, other non-transport infrastructure': 'Utility, telecommunication, other non-transport infrastructure',
'Various Reporting functionalities': 'Various Reporting functionalities',
'Vehicle': 'Vehicle',
'Vehicle Crime': 'Vehicle Crime',
'Vehicle Types': 'Vehicle Types',
'Vendor': 'Vendor',
'Verified': 'Verified',
'Verified?': 'Verified?',
'Verify Password': 'توثیق کریں پاس ورڈ',
'Version': 'Version',
'Very High': 'Very High',
'View Alerts received using either Email or SMS': 'View Alerts received using either Email or SMS',
'View Fullscreen Map': 'View Fullscreen Map',
'View Image': 'View Image',
'View On Map': 'نقشہ پر ملاحظہ کریں',
'View Outbox': 'View Outbox',
'View Requests for Aid': 'View Requests for Aid',
'View Settings': 'View Settings',
'View Tickets': 'View Tickets',
"View and/or update details of the person's record": "View and/or update details of the person's record",
'View or update the status of a hospital.': 'View or update the status of a hospital.',
'View pending requests and pledge support.': 'View pending requests and pledge support.',
'View the hospitals on a map.': 'View the hospitals on a map.',
"View/Edit the Database directly (caution: doesn't respect the framework rules!)": "View/Edit the Database directly (caution: doesn't respect the framework rules!)",
'Village': 'گرام',
'Village Leader': 'گاؤں رہنما',
'Visible?': 'Visible?',
'Visual Recognition': 'Visual Recognition',
'Volcanic Ash Cloud': 'Volcanic Ash Cloud',
'Volcanic Event': 'Volcanic Event',
'Volume - Fluids': 'Volume - Fluids',
'Volume - Solids': 'Volume - Solids',
'Volume Capacity': 'Volume Capacity',
'Volume/Dimensions': 'Volume/Dimensions',
'Volunteer Data': 'Volunteer Data',
'Volunteer Project': 'Volunteer Project',
'Volunteer Registration': 'Volunteer Registration',
'Volunteer Registrations': 'Volunteer Registrations',
'Volunteer Request': 'Volunteer Request',
'Volunteer registration added': 'Volunteer registration added',
'Volunteer registration deleted': 'Volunteer registration deleted',
'Volunteer registration updated': 'Volunteer registration updated',
'Volunteers': 'رضاکار',
'Vote': 'Vote',
'Votes': 'Votes',
'WMS Browser Name': 'WMS Browser Name',
'WMS Browser URL': 'WMS Browser URL',
'Walking Only': 'Walking Only',
'Walking time to the health service': 'Walking time to the health service',
'Warehouse': 'ہاؤس',
'Warehouse Management': 'Warehouse Management',
'Warehouse/Sites Registry': 'Warehouse/Sites Registry',
'Warehouses': 'Warehouses',
'WatSan': 'پانی اور صفائی',
'Water': 'پانی',
'Water gallon': 'Water gallon',
'Water storage containers available for HH': 'Water storage containers available for HH',
'Water storage containers sufficient per HH': 'Water storage containers sufficient per HH',
'Waterspout': 'Waterspout',
'Way Bill(s)': 'Way Bill(s)',
'We have tried': 'We have tried',
'Website': 'Website',
'Wednesday': 'Wednesday',
'Weekly': 'Weekly',
'Weight': 'وزن',
'Weight (kg)': 'Weight (kg)',
'Welcome to the Sahana Eden Disaster Management System': 'Welcome to the Sahana Eden Disaster Management System',
'Welcome to the Sahana Portal at ': 'Welcome to the Sahana Portal at ',
'Well-Known Text': 'Well-Known Text',
'Were basic medical supplies available for health services prior to the disaster?': 'Were basic medical supplies available for health services prior to the disaster?',
'Were breast milk substitutes used prior to the disaster?': 'Were breast milk substitutes used prior to the disaster?',
'Were there cases of malnutrition in this area prior to the disaster?': 'Were there cases of malnutrition in this area prior to the disaster?',
'Were there health services functioning for the community prior to the disaster?': 'Were there health services functioning for the community prior to the disaster?',
'Were there reports or evidence of outbreaks of any micronutrient malnutrition disorders before the emergency?': 'Were there reports or evidence of outbreaks of any micronutrient malnutrition disorders before the emergency?',
'What are the factors affecting school attendance?': 'What are the factors affecting school attendance?',
"What are the people's normal ways to obtain food in this area?": "What are the people's normal ways to obtain food in this area?",
'What are your main sources of cash to restart your business?': 'What are your main sources of cash to restart your business?',
'What are your main sources of income now?': 'What are your main sources of income now?',
'What do you spend most of your income on now?': 'What do you spend most of your income on now?',
'What food stocks exist? (main dishes)': 'What food stocks exist? (main dishes)',
'What food stocks exist? (side dishes)': 'What food stocks exist? (side dishes)',
'What is the estimated total number of people in all of these institutions?': 'What is the estimated total number of people in all of these institutions?',
'What is your major source of clean water for daily use (ex: washing, cooking, bathing)?': 'What is your major source of clean water for daily use (ex: washing, cooking, bathing)?',
'What is your major source of drinking water?': 'What is your major source of drinking water?',
"What should be done to reduce women and children's vulnerability to violence?": "What should be done to reduce women and children's vulnerability to violence?",
'What type of latrines are available in the village/IDP centre/Camp?': 'What type of latrines are available in the village/IDP centre/Camp?',
'What type of salvage material can be used from destroyed houses?': 'What type of salvage material can be used from destroyed houses?',
'What type of salvage material can be used from destroyed schools?': 'What type of salvage material can be used from destroyed schools?',
'What types of health problems do children currently have?': 'What types of health problems do children currently have?',
'What types of health problems do people currently have?': 'What types of health problems do people currently have?',
'What types of health services are still functioning in the affected area?': 'What types of health services are still functioning in the affected area?',
'What types of household water storage containers are available?': 'What types of household water storage containers are available?',
'What were your main sources of income before the disaster?': 'What were your main sources of income before the disaster?',
'Wheat': 'گندم کا عطیہ',
"When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": "When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.",
'Where are the alternative places for studying?': 'Where are the alternative places for studying?',
'Where are the separated children originally from?': 'Where are the separated children originally from?',
'Where do the majority of people defecate?': 'Where do the majority of people defecate?',
'Where have the children been sent?': 'Where have the children been sent?',
'Where is solid waste disposed in the village/camp?': 'Where is solid waste disposed in the village/camp?',
'Whether this is a Sahana Eden, Sahana Agasti, Ushahidi or Other instance.': 'Whether this is a Sahana Eden, Sahana Agasti, Ushahidi or Other instance.',
'Whiskers': 'Whiskers',
'Who is doing what and where': 'کون کر رہا ہے کیا اور کہاں',
'Who usually collects water for the family?': 'Who usually collects water for the family?',
'Width': 'چوڑائی',
'Wild Fire': 'Wild Fire',
'Wind Chill': 'Wind Chill',
'Window frame': 'Window frame',
'Winter Storm': 'Winter Storm',
'Without mentioning any names or indicating anyone, do you know of any incidents of violence against women or girls occuring since the disaster?': 'Without mentioning any names or indicating anyone, do you know of any incidents of violence against women or girls occuring since the disaster?',
'Women of Child Bearing Age': 'Women of Child Bearing Age',
'Women participating in coping activities': 'Women participating in coping activities',
'Women who are Pregnant or in Labour': 'Women who are Pregnant or in Labour',
'Womens Focus Groups': 'Womens Focus Groups',
'Wooden plank': 'Wooden plank',
'Wooden poles': 'Wooden poles',
'Working hours end': 'Working hours end',
'Working hours start': 'Working hours start',
'Working or other to provide money/food': 'Working or other to provide money/food',
'Would you like to display the photos on the map?': 'Would you like to display the photos on the map?',
'X-Ray': 'X-Ray',
'Yes': 'Yes',
'You are currently reported missing!': 'You are currently reported missing!',
'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': 'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.',
'You can click on the map below to select the Lat/Lon fields:': 'You can click on the map below to select the Lat/Lon fields:',
'You can click on the map to select the Lat/Lon fields. Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. This needs to be added in Decimal Degrees.': 'You can click on the map to select the Lat/Lon fields. Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. This needs to be added in Decimal Degrees.',
'You can select the Draw tool (': 'You can select the Draw tool (',
'You can set the modem settings for SMS here.': 'You can set the modem settings for SMS here.',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': 'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.',
"You have personalised settings, so changes made here won't be visible to you. To change your personalised settings, click ": "You have personalised settings, so changes made here won't be visible to you. To change your personalised settings, click ",
"You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.": "You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.",
"You haven't made any calculations": "You haven't made any calculations",
'You must be logged in to report persons missing or found.': 'You must be logged in to report persons missing or found.',
'You must provide a series id to proceed.': 'You must provide a series id to proceed.',
'Your action is required. Please approve user %s asap: ': 'Your action is required. Please approve user %s asap: ',
'Your current ordered list of solution items is shown below. You can change it by voting again.': 'Your current ordered list of solution items is shown below. You can change it by voting again.',
'Your post was added successfully.': 'Your post was added successfully.',
'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.',
'Yourself': 'اپنے آپ کو',
'ZIP/Postcode': 'ZIP/Postcode',
'Zinc roof': 'Zinc roof',
'Zoom': 'زوم',
'Zoom In: click in the map or use the left mouse button and drag to create a rectangle': 'Zoom In: click in the map or use the left mouse button and drag to create a rectangle',
'Zoom Levels': 'Zoom Levels',
'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle': 'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle',
'Zoom to maximum map extent': 'Zoom to maximum map extent',
'accepted': 'accepted',
'act': 'act',
'active': 'active',
'added': 'added',
'all records': 'all records',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'allows a budget to be developed based on staff & equipment costs, including any admin overheads.',
'allows for creation and management of surveys to assess the damage following a natural disaster.': 'allows for creation and management of surveys to assess the damage following a natural disaster.',
'approved': 'approved',
'assigned': 'assigned',
'average': 'average',
'black': 'black',
'blond': 'blond',
'blue': 'blue',
'brown': 'brown',
'c/o Name': 'c/o Name',
'can be used to extract data from spreadsheets and put them into database tables.': 'can be used to extract data from spreadsheets and put them into database tables.',
'can use this to identify the Location': 'can use this to identify the Location',
'cancelled': 'cancelled',
'caucasoid': 'caucasoid',
'check all': 'check all',
'click for more details': 'click for more details',
'collateral event': 'collateral event',
'completed': 'completed',
'consider': 'consider',
'constraint_id': 'constraint_id',
'criminal intent': 'criminal intent',
'critical': 'critical',
'crud': 'crud',
'curly': 'curly',
'currently registered': 'currently registered',
'daily': 'daily',
'dark': 'dark',
'data uploaded': 'data uploaded',
'database': 'database',
'database %s select': 'database %s select',
'db': 'db',
'deferred': 'deferred',
'delete all checked': 'delete all checked',
'deleted': 'deleted',
'denied': 'denied',
'description': 'description',
'design': 'design',
'diseased': 'diseased',
'displaced': 'displaced',
'divorced': 'divorced',
'done!': 'done!',
'edit': 'edit',
'editor': 'editor',
'embedded': 'embedded',
'enclosed area': 'enclosed area',
'export as csv file': 'export as csv file',
'fat': 'fat',
'feedback': 'feedback',
'female': 'مادہ',
'final report': 'final report',
'flush latrine with septic tank': 'flush latrine with septic tank',
'follow-up assessment': 'follow-up assessment',
'forehead': 'forehead',
'form data': 'form data',
'from_id': 'from_id',
'full': 'full',
'getting': 'getting',
'green': 'green',
'grey': 'grey',
'here': 'here',
'high': 'high',
'highly critical': 'highly critical',
'hourly': 'hourly',
'households': 'households',
'human error': 'human error',
'identified': 'identified',
'ignore': 'ignore',
'immediately': 'فوری طور پر',
'improvement': 'improvement',
'in Deg Min Sec format': 'in Deg Min Sec format',
'in GPS format': 'in GPS format',
'inactive': 'inactive',
'initial assessment': 'initial assessment',
'injured': 'injured',
'insert new': 'insert new',
'insert new %s': 'insert new %s',
'invalid': 'invalid',
'invalid request': 'invalid request',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.',
'is an online bulletin board of missing and found people. It captures information about the people missing and found, as well as information of the person seeking them, increasing the chances of people finding each other. For example if two members of a family unit is looking for the head of the family, we can use this data at least to connect those two family members.': 'is an online bulletin board of missing and found people. It captures information about the people missing and found, as well as information of the person seeking them, increasing the chances of people finding each other. For example if two members of a family unit is looking for the head of the family, we can use this data at least to connect those two family members.',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': 'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.',
'keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.',
'latrines': 'latrines',
'legend URL': 'legend URL',
'light': 'light',
'locations': 'locations',
'login': 'لاگ ان',
'long': 'long',
'long>12cm': 'long>12cm',
'low': 'low',
'male': 'مرد',
'manual': 'manual',
'married': 'married',
'maxExtent': 'maxExtent',
'maxResolution': 'maxResolution',
'medium': 'medium',
'medium<12cm': 'medium<12cm',
'menu item': 'menu item',
'message_id': 'message_id',
'meters': 'meters',
'module allows the site administrator to configure various options.': 'module allows the site administrator to configure various options.',
'module helps monitoring the status of hospitals.': 'module helps monitoring the status of hospitals.',
'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': 'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).',
'mongoloid': 'mongoloid',
'more': 'مزید',
'natural hazard': 'natural hazard',
'negroid': 'negroid',
'never': 'never',
'new': 'نیا',
'new record inserted': 'new record inserted',
'next 100 rows': 'next 100 rows',
'non-critical': 'non-critical',
'none': 'کوئی بھی',
'normal': 'normal',
'not specified': 'not specified',
'num Zoom Levels': 'num Zoom Levels',
'once': 'once',
'open defecation': 'open defecation',
'operational intent': 'operational intent',
'or import from csv file': 'or import from csv file',
'other': 'دیگر',
'over one hour': 'over one hour',
'people': 'people',
'pit': 'pit',
'pit latrine': 'pit latrine',
'postponed': 'postponed',
'preliminary template or draft, not actionable in its current form': 'preliminary template or draft, not actionable in its current form',
'previous 100 rows': 'previous 100 rows',
'primary incident': 'primary incident',
'provides a catalogue of digital media.': 'provides a catalogue of digital media.',
'record does not exist': 'record does not exist',
'record id': 'record id',
'records deleted': 'records deleted',
'red': 'red',
'reports successfully imported.': 'reports successfully imported.',
'retired': 'retired',
'retry': 'retry',
'review': 'review',
'river': 'river',
'secondary effect': 'secondary effect',
'see comment': 'see comment',
'selected': 'selected',
'separated': 'separated',
'separated from family': 'separated from family',
'shaved': 'shaved',
'shift_end': 'shift_end',
'shift_start': 'shift_start',
'short': 'short',
'short<6cm': 'short<6cm',
'sides': 'sides',
'sign-up now': 'اپ ابھی سائن اپ کریں',
'simple': 'simple',
'single': 'single',
'slim': 'slim',
'state': 'state',
'straight': 'straight',
'suffered financial losses': 'suffered financial losses',
'table': 'table',
'table_name': 'table_name',
'tall': 'tall',
'technical failure': 'technical failure',
'this': 'this',
'times and it is still not working. We give in. Sorry.': 'times and it is still not working. We give in. Sorry.',
'to access the system': 'نظام تک رسائی کے لئے',
'to reset your password': 'to reset your password',
'to verify your email': 'to verify your email',
'to_id': 'to_id',
'tonsure': 'tonsure',
'total': 'total',
'tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.',
'unable to parse csv file': 'unable to parse csv file',
'unapproved': 'unapproved',
'uncheck all': 'uncheck all',
'unidentified': 'unidentified',
'uninhabitable = foundation and structure destroyed': 'uninhabitable = foundation and structure destroyed',
'unknown': 'نامعلوم',
'unspecified': 'منسوب نہیں کیا گیا',
'updated': 'updated',
'updates only': 'updates only',
'urgent': 'urgent',
'vm_action': 'vm_action',
'wavy': 'wavy',
'weekly': 'weekly',
'white': 'white',
'widowed': 'widowed',
'window': 'window',
'windows broken, cracks in walls, roof slightly damaged': 'windows broken, cracks in walls, roof slightly damaged',
'wish': 'wish',
'within human habitat': 'within human habitat',
'xlwt module not available within the running Python - this needs installing for XLS output!': 'xlwt module not available within the running Python - this needs installing for XLS output!',
'yes': 'جی ہاں',
}
|
ksetyadi/Sahana-Eden
|
languages/ur.py
|
Python
|
mit
| 228,256
|
[
"VisIt"
] |
ac882c75737ee58aa02ac3a95994e29ac3aa75c8e036ef3c03e255d3268e3455
|
# import vtk wrapped version that will raise exceptions for error events
import vtkwithexceptions as vtk
import base64
import os
import tempfile
from celery import Celery
from celery import task, current_task
from celery.result import AsyncResult
from visomics.vtk.common import parse_json
celery = Celery()
celery.config_from_object('celeryconfig')
@celery.task
def run(input):
task_description = parse_json(input);
return execute(task_description['inputs'], task_description['outputs'],
task_description['script'])
def execute(inputs, outputs, script):
# prepend some R code to the beginning of the script
# this allows us to capture useful error output information
script = "con <- file(\"/tmp/R_errors.txt\", \"w\")\nsink(con, type=\"message\")\n" + script
# Setup the pipeline
rcalc = vtk.vtkRCalculatorFilter()
rcalc.SetRscript(script)
readers = []
input_tables = []
input_trees = []
for name, type, format, value in inputs:
if type == 'Table':
input_tables.append(name)
input = vtk.vtkTableReader()
elif type == 'Tree':
input_trees.append(name)
input = vtk.vtkTreeReader()
data_value = base64.b64decode(value)
input.SetBinaryInputString(data_value, len(data_value))
input.SetReadFromInputString(1)
readers.append(input)
names = vtk.vtkStringArray()
names.SetNumberOfComponents(1)
names.SetNumberOfTuples(len(input_tables))
index = 0
for name in input_tables:
names.SetValue(index, name)
index += 1
rcalc.PutTables(names)
names = vtk.vtkStringArray()
names.SetNumberOfComponents(1)
names.SetNumberOfTuples(len(input_trees))
index = 0
for name in input_trees:
names.SetValue(index, name)
index += 1
rcalc.PutTrees(names)
# Multi inputs
if len(inputs) > 1:
input = vtk.vtkMultiBlockDataGroupFilter()
for reader in readers:
input.AddInputConnection(reader.GetOutputPort())
rcalc.AddInputConnection(input.GetOutputPort())
output_tables = []
output_trees = []
for o in outputs:
if o['type'] == 'Table':
output_tables.append(o['name'])
elif o['type'] == 'Tree':
output_trees.append(o['name'])
names = vtk.vtkStringArray()
names.SetNumberOfComponents(1)
names.SetNumberOfTuples(len(output_tables))
index = 0
for name in output_tables:
names.SetValue(index, name)
index += 1
rcalc.GetTables(names)
names = vtk.vtkStringArray()
names.SetNumberOfComponents(1)
names.SetNumberOfTuples(len(output_trees))
index = 0
for name in output_trees:
names.SetValue(index, name)
index += 1
rcalc.GetTrees(names)
try:
rcalc.Update()
output = rcalc.GetOutput()
except vtk.ErrorEventException:
f = file("/tmp/R_errors.txt", "r")
error_msg = f.read()
raise Exception(error_msg)
print str(output)
output_dataobjects = []
if len(outputs) > 1:
iter = output.NewIterator();
iter.InitTraversal()
while not iter.IsDoneWithTraversal():
dataobject = iter.GetCurrentDataObject()
output_dataobjects.append(dataobject)
iter.GoToNextItem()
iter.FastDelete()
else:
output_dataobjects.append(output)
output_json = {'output': []}
index = 0
for dataobject in output_dataobjects:
output = outputs[index]
index += 1
if output['type'] == 'Table':
writer = vtk.vtkTableWriter()
elif output['type'] == 'Tree':
writer = vtk.vtkTreeWriter()
tmp = tempfile.mktemp()
writer.SetFileName(tmp)
writer.SetFileTypeToBinary()
writer.SetInputData(dataobject)
writer.Update()
with open(tmp, 'r') as fp:
data = fp.read()
os.remove(tmp)
data = base64.b64encode(data)
output_json['output'].append({'name': output['name'], 'type': output['type'], 'data': data})
return output_json
|
Visomics/Visomics
|
AnalysisServer/visomics/vtk/r.py
|
Python
|
apache-2.0
| 4,173
|
[
"VTK"
] |
aca3c76d8c60cd5c5ce9f26c30369d5755ccbf8f919d495b22582ceec99fde51
|
# -*- coding: utf-8 -*-
#
# Copyright © 2013 Michael Rabbitt.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Warning: URL formats of "raw" gists are undocummented and subject to change.
# See also: https://developer.github.com/v3/gists/
#
# Inspired by "[Python] reStructuredText GitHub Gist directive"
# (https://gist.github.com/brianhsu/1407759), public domain by Brian Hsu
"""
Extension to Python Markdown for Embedded Gists (gist.github.com).
Basic Example:
Text of the gist:
[:gist: 4747847]
Example with filename:
Text of the gist:
[:gist: 4747847 zen.py]
Basic Example with hexidecimal id:
Text of the gist:
[:gist: c4a43d6fdce612284ac0]
Example with hexidecimal id filename:
Text of the gist:
[:gist: c4a43d6fdce612284ac0 cow.txt]
Example using reStructuredText syntax:
Text of the gist:
.. gist:: 4747847 zen.py
Example using hexidecimal ID with reStructuredText syntax:
Text of the gist:
.. gist:: c4a43d6fdce612284ac0
Example using hexidecimal ID and filename with reStructuredText syntax:
Text of the gist:
.. gist:: c4a43d6fdce612284ac0 cow.txt
Error Case: non-existent Gist ID:
Text of the gist:
[:gist: 0]
Error Case: non-existent file:
Text of the gist:
[:gist: 4747847 doesntexist.py]
"""
import requests
from nikola.plugin_categories import MarkdownExtension
from nikola.utils import get_logger
try:
from markdown.extensions import Extension
from markdown.inlinepatterns import Pattern
from markdown.util import AtomicString
from markdown.util import etree
except ImportError:
# No need to catch this, if you try to use this without Markdown,
# the markdown compiler will fail first
Extension = Pattern = object
LOGGER = get_logger('compile_markdown.mdx_gist')
GIST_JS_URL = "https://gist.github.com/{0}.js"
GIST_FILE_JS_URL = "https://gist.github.com/{0}.js?file={1}"
GIST_RAW_URL = "https://gist.githubusercontent.com/raw/{0}"
GIST_FILE_RAW_URL = "https://gist.githubusercontent.com/raw/{0}/{1}"
GIST_MD_RE = r'\[:gist:\s*(?P<gist_id>\S+)(?:\s*(?P<filename>.+?))?\s*\]'
GIST_RST_RE = r'(?m)^\.\.\s*gist::\s*(?P<gist_id>[^\]\s]+)(?:\s*(?P<filename>.+?))?\s*$'
class GistFetchException(Exception):
"""Raised when attempt to fetch content of a Gist from github.com fails."""
def __init__(self, url, status_code):
"""Initialize the exception."""
Exception.__init__(self)
self.message = 'Received a {0} response from Gist URL: {1}'.format(
status_code, url)
class GistPattern(Pattern):
"""InlinePattern for footnote markers in a document's body text."""
def __init__(self, pattern, configs):
"""Initialize the pattern."""
Pattern.__init__(self, pattern)
def get_raw_gist_with_filename(self, gist_id, filename):
"""Get raw gist text for a filename."""
url = GIST_FILE_RAW_URL.format(gist_id, filename)
resp = requests.get(url)
if not resp.ok:
raise GistFetchException(url, resp.status_code)
return resp.text
def get_raw_gist(self, gist_id):
"""Get raw gist text."""
url = GIST_RAW_URL.format(gist_id)
resp = requests.get(url)
if not resp.ok:
raise GistFetchException(url, resp.status_code)
return resp.text
def handleMatch(self, m):
"""Handle pattern match."""
gist_id = m.group('gist_id')
gist_file = m.group('filename')
gist_elem = etree.Element('div')
gist_elem.set('class', 'gist')
script_elem = etree.SubElement(gist_elem, 'script')
noscript_elem = etree.SubElement(gist_elem, 'noscript')
try:
if gist_file:
script_elem.set('src', GIST_FILE_JS_URL.format(
gist_id, gist_file))
raw_gist = (self.get_raw_gist_with_filename(
gist_id, gist_file))
else:
script_elem.set('src', GIST_JS_URL.format(gist_id))
raw_gist = (self.get_raw_gist(gist_id))
# Insert source as <pre/> within <noscript>
pre_elem = etree.SubElement(noscript_elem, 'pre')
pre_elem.text = AtomicString(raw_gist)
except GistFetchException as e:
LOGGER.warning(e.message)
warning_comment = etree.Comment(' WARNING: {0} '.format(e.message))
noscript_elem.append(warning_comment)
return gist_elem
class GistExtension(MarkdownExtension, Extension):
"""Gist extension for Markdown."""
def __init__(self, configs={}):
"""Initialize the extension."""
# set extension defaults
self.config = {}
# Override defaults with user settings
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals=None):
"""Extend Markdown."""
gist_md_pattern = GistPattern(GIST_MD_RE, self.getConfigs())
gist_md_pattern.md = md
md.inlinePatterns.register(gist_md_pattern, 'gist', 175)
gist_rst_pattern = GistPattern(GIST_RST_RE, self.getConfigs())
gist_rst_pattern.md = md
md.inlinePatterns.register(gist_rst_pattern, 'gist-rst', 176)
md.registerExtension(self)
def makeExtension(configs=None): # pragma: no cover
"""Make Markdown extension."""
return GistExtension(configs)
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=(doctest.NORMALIZE_WHITESPACE +
doctest.REPORT_NDIFF))
|
getnikola/nikola
|
nikola/plugins/compile/markdown/mdx_gist.py
|
Python
|
mit
| 6,603
|
[
"Brian"
] |
5ff66fe292b49b57c1fb45c0b577f42cd38920f393d02c09ca0bc4acd5f76828
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements methods for writing LAMMPS input files.
"""
import os
import re
import shutil
import warnings
from string import Template
from monty.json import MSONable
from pymatgen.io.lammps.data import LammpsData
__author__ = "Kiran Mathew, Brandon Wood, Zhi Deng"
__copyright__ = "Copyright 2018, The Materials Virtual Lab"
__version__ = "1.0"
__maintainer__ = "Zhi Deng"
__email__ = "z4deng@eng.ucsd.edu"
__date__ = "Aug 1, 2018"
class LammpsRun(MSONable):
"""
Examples for various simple LAMMPS runs with given simulation box,
force field and a few more settings. Experience LAMMPS users should
consider using write_lammps_inputs method with more sophisticated
templates.
"""
template_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "templates")
def __init__(self, script_template, settings, data, script_filename):
"""
Base constructor.
Args:
script_template (str): String template for input script
with placeholders. The format for placeholders has to
be '$variable_name', e.g., '$temperature'
settings (dict): Contains values to be written to the
placeholders, e.g., {'temperature': 1}.
data (LammpsData or str): Data file as a LammpsData
instance or path to an existing data file. Default to
None, i.e., no data file supplied. Useful only when
read_data cmd is in the script.
script_filename (str): Filename for the input script.
"""
self.script_template = script_template
self.settings = settings
self.data = data
self.script_filename = script_filename
def write_inputs(self, output_dir, **kwargs):
"""
Writes all input files (input script, and data if needed).
Other supporting files are not handled at this moment.
Args:
output_dir (str): Directory to output the input files.
**kwargs: kwargs supported by LammpsData.write_file.
"""
write_lammps_inputs(
output_dir=output_dir,
script_template=self.script_template,
settings=self.settings,
data=self.data,
script_filename=self.script_filename,
**kwargs,
)
@classmethod
def md(cls, data, force_field, temperature, nsteps, other_settings=None):
r"""
Example for a simple MD run based on template md.txt.
Args:
data (LammpsData or str): Data file as a LammpsData
instance or path to an existing data file.
force_field (str): Combined force field related cmds. For
example, 'pair_style eam\npair_coeff * * Cu_u3.eam'.
temperature (float): Simulation temperature.
nsteps (int): No. of steps to run.
other_settings (dict): other settings to be filled into
placeholders.
"""
template_path = os.path.join(cls.template_dir, "md.txt")
with open(template_path) as f:
script_template = f.read()
settings = other_settings.copy() if other_settings is not None else {}
settings.update({"force_field": force_field, "temperature": temperature, "nsteps": nsteps})
script_filename = "in.md"
return cls(
script_template=script_template,
settings=settings,
data=data,
script_filename=script_filename,
)
def write_lammps_inputs(
output_dir,
script_template,
settings=None,
data=None,
script_filename="in.lammps",
make_dir_if_not_present=True,
**kwargs,
):
"""
Writes input files for a LAMMPS run. Input script is constructed
from a str template with placeholders to be filled by custom
settings. Data file is either written from a LammpsData
instance or copied from an existing file if read_data cmd is
inspected in the input script. Other supporting files are not
handled at the moment.
Args:
output_dir (str): Directory to output the input files.
script_template (str): String template for input script with
placeholders. The format for placeholders has to be
'$variable_name', e.g., '$temperature'
settings (dict): Contains values to be written to the
placeholders, e.g., {'temperature': 1}. Default to None.
data (LammpsData or str): Data file as a LammpsData instance or
path to an existing data file. Default to None, i.e., no
data file supplied. Useful only when read_data cmd is in
the script.
script_filename (str): Filename for the input script.
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
**kwargs: kwargs supported by LammpsData.write_file.
Examples:
>>> eam_template = '''units metal
... atom_style atomic
...
... lattice fcc 3.615
... region box block 0 20 0 20 0 20
... create_box 1 box
... create_atoms 1 box
...
... pair_style eam
... pair_coeff 1 1 Cu_u3.eam
...
... velocity all create $temperature 376847 loop geom
...
... neighbor 1.0 bin
... neigh_modify delay 5 every 1
...
... fix 1 all nvt temp $temperature $temperature 0.1
...
... timestep 0.005
...
... run $nsteps'''
>>> write_lammps_inputs('.', eam_template, settings={'temperature': 1600.0, 'nsteps': 100})
>>> with open('in.lammps') as f:
... script = f.read()
...
>>> print(script)
units metal
atom_style atomic
lattice fcc 3.615
region box block 0 20 0 20 0 20
create_box 1 box
create_atoms 1 box
pair_style eam
pair_coeff 1 1 Cu_u3.eam
velocity all create 1600.0 376847 loop geom
neighbor 1.0 bin
neigh_modify delay 5 every 1
fix 1 all nvt temp 1600.0 1600.0 0.1
timestep 0.005
run 100
"""
variables = {} if settings is None else settings
template = Template(script_template)
input_script = template.safe_substitute(**variables)
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(os.path.join(output_dir, script_filename), "w") as f:
f.write(input_script)
read_data = re.search(r"read_data\s+(.*)\n", input_script)
if read_data:
data_filename = read_data.group(1).split()[0]
if isinstance(data, LammpsData):
data.write_file(os.path.join(output_dir, data_filename), **kwargs)
elif isinstance(data, str) and os.path.exists(data):
shutil.copyfile(data, os.path.join(output_dir, data_filename))
else:
warnings.warn("No data file supplied. Skip writing %s." % data_filename)
|
vorwerkc/pymatgen
|
pymatgen/io/lammps/inputs.py
|
Python
|
mit
| 7,388
|
[
"LAMMPS",
"pymatgen"
] |
1f24afa7a4d90a40e74ccf880bcb5d0670c85a098f3f26d28810f95ed767826f
|
# Hidden Markov Models
#
# Author: Ron Weiss <ronweiss@gmail.com>
# and Shiqiao Du <lucidfrontier.45@gmail.com>
# API changes: Jaques Grobler <jaquesgrobler@gmail.com>
"""
The :mod:`sklearn.hmm` module implements hidden Markov models.
**Warning:** :mod:`sklearn.hmm` is orphaned, undocumented and has known
numerical stability issues. This module will be removed in version 0.17.
It has been moved to a separate repository:
https://github.com/hmmlearn/hmmlearn
"""
import string
import numpy as np
from .utils import check_random_state, deprecated
from .utils.extmath import logsumexp
from .base import BaseEstimator
from .mixture import (
GMM, log_multivariate_normal_density, sample_gaussian,
distribute_covar_matrix_to_match_covariance_type, _validate_covars)
from . import cluster
from . import _hmmc
__all__ = ['GMMHMM',
'GaussianHMM',
'MultinomialHMM',
'decoder_algorithms',
'normalize']
ZEROLOGPROB = -1e200
EPS = np.finfo(float).eps
NEGINF = -np.inf
decoder_algorithms = ("viterbi", "map")
@deprecated("WARNING: The HMM module and its functions will be removed in 0.17 "
"as it no longer falls within the project's scope and API. "
"It has been moved to a separate repository: "
"https://github.com/hmmlearn/hmmlearn")
def normalize(A, axis=None):
""" Normalize the input array so that it sums to 1.
WARNING: The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Parameters
----------
A: array, shape (n_samples, n_features)
Non-normalized input data
axis: int
dimension along which normalization is performed
Returns
-------
normalized_A: array, shape (n_samples, n_features)
A with values normalized (summing to 1) along the prescribed axis
WARNING: Modifies inplace the array
"""
A += EPS
Asum = A.sum(axis)
if axis and A.ndim > 1:
# Make sure we don't divide by zero.
Asum[Asum == 0] = 1
shape = list(A.shape)
shape[axis] = 1
Asum.shape = shape
return A / Asum
@deprecated("WARNING: The HMM module and its function will be removed in 0.17"
"as it no longer falls within the project's scope and API. "
"It has been moved to a separate repository: "
"https://github.com/hmmlearn/hmmlearn")
class _BaseHMM(BaseEstimator):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Attributes
----------
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
transmat_prior : array, shape (`n_components`, `n_components`)
Matrix of prior transition probabilities between states.
startprob_prior : array, shape ('n_components`,)
Initial state occupation prior distribution.
algorithm : string, one of the decoder_algorithms
decoder algorithm
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, and other characters for subclass-specific
emmission parameters. Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, and other characters for
subclass-specific emmission parameters. Defaults to all
parameters.
See Also
--------
GMM : Gaussian mixture model
"""
# This class implements the public interface to all HMMs that
# derive from it, including all of the machinery for the
# forward-backward and Viterbi algorithms. Subclasses need only
# implement _generate_sample_from_state(), _compute_log_likelihood(),
# _init(), _initialize_sufficient_statistics(),
# _accumulate_sufficient_statistics(), and _do_mstep(), all of
# which depend on the specific emission distribution.
#
# Subclasses will probably also want to implement properties for
# the emission distribution parameters to expose them publicly.
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
self.n_components = n_components
self.n_iter = n_iter
self.thresh = thresh
self.params = params
self.init_params = init_params
self.startprob_ = startprob
self.startprob_prior = startprob_prior
self.transmat_ = transmat
self.transmat_prior = transmat_prior
self._algorithm = algorithm
self.random_state = random_state
def eval(self, X):
return self.score_samples(X)
def score_samples(self, obs):
"""Compute the log probability under the model and compute posteriors.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
logprob : float
Log likelihood of the sequence ``obs``.
posteriors : array_like, shape (n, n_components)
Posterior probabilities of each state for each
observation
See Also
--------
score : Compute the log probability under the model
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
posteriors += np.finfo(np.float32).eps
posteriors /= np.sum(posteriors, axis=1).reshape((-1, 1))
return logprob, posteriors
def score(self, obs):
"""Compute the log probability under the model.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Log likelihood of the ``obs``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, _ = self._do_forward_pass(framelogprob)
return logprob
def _decode_viterbi(self, obs):
"""Find most likely state sequence corresponding to ``obs``.
Uses the Viterbi algorithm.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
viterbi_logprob : float
Log probability of the maximum likelihood path through the HMM.
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob)
return viterbi_logprob, state_sequence
def _decode_map(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the maximum a posteriori estimation.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
map_logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
_, posteriors = self.score_samples(obs)
state_sequence = np.argmax(posteriors, axis=1)
map_logprob = np.max(posteriors, axis=1).sum()
return map_logprob, state_sequence
def decode(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to ``obs``.
Uses the selected algorithm for decoding.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
algorithm : string, one of the `decoder_algorithms`
decoder algorithm to be used
Returns
-------
logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
if self._algorithm in decoder_algorithms:
algorithm = self._algorithm
elif algorithm in decoder_algorithms:
algorithm = algorithm
decoder = {"viterbi": self._decode_viterbi,
"map": self._decode_map}
logprob, state_sequence = decoder[algorithm](obs)
return logprob, state_sequence
def predict(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
"""
_, state_sequence = self.decode(obs, algorithm)
return state_sequence
def predict_proba(self, obs):
"""Compute the posterior probability for each state in the model
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
T : array-like, shape (n, n_components)
Returns the probability of the sample for each state in the model.
"""
_, posteriors = self.score_samples(obs)
return posteriors
def sample(self, n=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n : int
Number of samples to generate.
random_state: RandomState or an int seed (0 by default)
A random number generator instance. If None is given, the
object's random_state is used
Returns
-------
(obs, hidden_states)
obs : array_like, length `n` List of samples
hidden_states : array_like, length `n` List of hidden states
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_pdf = self.startprob_
startprob_cdf = np.cumsum(startprob_pdf)
transmat_pdf = self.transmat_
transmat_cdf = np.cumsum(transmat_pdf, 1)
# Initial state.
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for _ in range(n - 1):
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(
currstate, random_state=random_state))
return np.array(obs), np.array(hidden_states, dtype=int)
def fit(self, obs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. a covariance parameter getting too
small). You can fix this by getting more training data,
or strengthening the appropriate subclass-specific regularization
parameter.
"""
if self.algorithm not in decoder_algorithms:
self._algorithm = "viterbi"
self._init(obs, self.init_params)
logprob = []
for i in range(self.n_iter):
# Expectation step
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for seq in obs:
framelogprob = self._compute_log_likelihood(seq)
lpr, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(
stats, seq, framelogprob, posteriors, fwdlattice,
bwdlattice, self.params)
logprob.append(curr_logprob)
# Check for convergence.
if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh:
break
# Maximization step
self._do_mstep(stats, self.params)
return self
def _get_algorithm(self):
"decoder algorithm"
return self._algorithm
def _set_algorithm(self, algorithm):
if algorithm not in decoder_algorithms:
raise ValueError("algorithm must be one of the decoder_algorithms")
self._algorithm = algorithm
algorithm = property(_get_algorithm, _set_algorithm)
def _get_startprob(self):
"""Mixing startprob for each state."""
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.tile(1.0 / self.n_components, self.n_components)
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
normalize(startprob)
if len(startprob) != self.n_components:
raise ValueError('startprob must have length n_components')
if not np.allclose(np.sum(startprob), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob_ = property(_get_startprob, _set_startprob)
def _get_transmat(self):
"""Matrix of transition probabilities."""
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.tile(1.0 / self.n_components,
(self.n_components, self.n_components))
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
normalize(transmat, axis=1)
if (np.asarray(transmat).shape
!= (self.n_components, self.n_components)):
raise ValueError('transmat must have shape '
'(n_components, n_components)')
if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat_ = property(_get_transmat, _set_transmat)
def _do_viterbi_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_components))
_hmmc._forward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
fwdlattice[fwdlattice <= ZEROLOGPROB] = NEGINF
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_components))
_hmmc._backward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
bwdlattice[bwdlattice <= ZEROLOGPROB] = NEGINF
return bwdlattice
def _compute_log_likelihood(self, obs):
pass
def _generate_sample_from_state(self, state, random_state=None):
pass
def _init(self, obs, params):
if 's' in params:
self.startprob_.fill(1.0 / self.n_components)
if 't' in params:
self.transmat_.fill(1.0 / self.n_components)
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
stats = {'nobs': 0,
'start': np.zeros(self.n_components),
'trans': np.zeros((self.n_components, self.n_components))}
return stats
def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
stats['nobs'] += 1
if 's' in params:
stats['start'] += posteriors[0]
if 't' in params:
n_observations, n_components = framelogprob.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_observations > 1:
lneta = np.zeros((n_observations - 1, n_components, n_components))
lnP = logsumexp(fwdlattice[-1])
_hmmc._compute_lneta(n_observations, n_components, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lnP, lneta)
stats['trans'] += np.exp(np.minimum(logsumexp(lneta, 0), 700))
def _do_mstep(self, stats, params):
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
if self.startprob_prior is None:
self.startprob_prior = 1.0
if self.transmat_prior is None:
self.transmat_prior = 1.0
if 's' in params:
self.startprob_ = normalize(
np.maximum(self.startprob_prior - 1.0 + stats['start'], 1e-20))
if 't' in params:
transmat_ = normalize(
np.maximum(self.transmat_prior - 1.0 + stats['trans'], 1e-20),
axis=1)
self.transmat_ = transmat_
class GaussianHMM(_BaseHMM):
"""Hidden Markov Model with Gaussian emissions
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Parameters
----------
n_components : int
Number of states.
``_covariance_type`` : string
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
Attributes
----------
``_covariance_type`` : string
String describing the type of covariance parameters used by
the model. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussian emissions.
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
means : array, shape (`n_components`, `n_features`)
Mean parameters for each state.
covars : array
Covariance parameters for each state. The shape depends on
``_covariance_type``::
(`n_components`,) if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars. Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import GaussianHMM
>>> GaussianHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
GaussianHMM(algorithm='viterbi',...
See Also
--------
GMM : Gaussian mixture model
"""
def __init__(self, n_components=1, covariance_type='diag', startprob=None,
transmat=None, startprob_prior=None, transmat_prior=None,
algorithm="viterbi", means_prior=None, means_weight=0,
covars_prior=1e-2, covars_weight=1,
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
thresh=thresh, params=params,
init_params=init_params)
self._covariance_type = covariance_type
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('bad covariance_type')
self.means_prior = means_prior
self.means_weight = means_weight
self.covars_prior = covars_prior
self.covars_weight = covars_weight
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _get_means(self):
"""Mean parameters for each state."""
return self._means_
def _set_means(self, means):
means = np.asarray(means)
if (hasattr(self, 'n_features')
and means.shape != (self.n_components, self.n_features)):
raise ValueError('means must have shape '
'(n_components, n_features)')
self._means_ = means.copy()
self.n_features = self._means_.shape[1]
means_ = property(_get_means, _set_means)
def _get_covars(self):
"""Return covars as a full matrix."""
if self._covariance_type == 'full':
return self._covars_
elif self._covariance_type == 'diag':
return [np.diag(cov) for cov in self._covars_]
elif self._covariance_type == 'tied':
return [self._covars_] * self.n_components
elif self._covariance_type == 'spherical':
return [np.eye(self.n_features) * f for f in self._covars_]
def _set_covars(self, covars):
covars = np.asarray(covars)
_validate_covars(covars, self._covariance_type, self.n_components)
self._covars_ = covars.copy()
covars_ = property(_get_covars, _set_covars)
def _compute_log_likelihood(self, obs):
return log_multivariate_normal_density(
obs, self._means_, self._covars_, self._covariance_type)
def _generate_sample_from_state(self, state, random_state=None):
if self._covariance_type == 'tied':
cv = self._covars_
else:
cv = self._covars_[state]
return sample_gaussian(self._means_[state], cv, self._covariance_type,
random_state=random_state)
def _init(self, obs, params='stmc'):
super(GaussianHMM, self)._init(obs, params=params)
if (hasattr(self, 'n_features')
and self.n_features != obs[0].shape[1]):
raise ValueError('Unexpected number of dimensions, got %s but '
'expected %s' % (obs[0].shape[1],
self.n_features))
self.n_features = obs[0].shape[1]
if 'm' in params:
self._means_ = cluster.KMeans(
n_clusters=self.n_components).fit(obs[0]).cluster_centers_
if 'c' in params:
cv = np.cov(obs[0].T)
if not cv.shape:
cv.shape = (1, 1)
self._covars_ = distribute_covar_matrix_to_match_covariance_type(
cv, self._covariance_type, self.n_components)
self._covars_[self._covars_ == 0] = 1e-5
def _initialize_sufficient_statistics(self):
stats = super(GaussianHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros(self.n_components)
stats['obs'] = np.zeros((self.n_components, self.n_features))
stats['obs**2'] = np.zeros((self.n_components, self.n_features))
stats['obs*obs.T'] = np.zeros((self.n_components, self.n_features,
self.n_features))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GaussianHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'm' in params or 'c' in params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
if 'c' in params:
if self._covariance_type in ('spherical', 'diag'):
stats['obs**2'] += np.dot(posteriors.T, obs ** 2)
elif self._covariance_type in ('tied', 'full'):
for t, o in enumerate(obs):
obsobsT = np.outer(o, o)
for c in range(self.n_components):
stats['obs*obs.T'][c] += posteriors[t, c] * obsobsT
def _do_mstep(self, stats, params):
super(GaussianHMM, self)._do_mstep(stats, params)
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
denom = stats['post'][:, np.newaxis]
if 'm' in params:
prior = self.means_prior
weight = self.means_weight
if prior is None:
weight = 0
prior = 0
self._means_ = (weight * prior + stats['obs']) / (weight + denom)
if 'c' in params:
covars_prior = self.covars_prior
covars_weight = self.covars_weight
if covars_prior is None:
covars_weight = 0
covars_prior = 0
means_prior = self.means_prior
means_weight = self.means_weight
if means_prior is None:
means_weight = 0
means_prior = 0
meandiff = self._means_ - means_prior
if self._covariance_type in ('spherical', 'diag'):
cv_num = (means_weight * (meandiff) ** 2
+ stats['obs**2']
- 2 * self._means_ * stats['obs']
+ self._means_ ** 2 * denom)
cv_den = max(covars_weight - 1, 0) + denom
self._covars_ = (covars_prior + cv_num) / np.maximum(cv_den, 1e-5)
if self._covariance_type == 'spherical':
self._covars_ = np.tile(
self._covars_.mean(1)[:, np.newaxis],
(1, self._covars_.shape[1]))
elif self._covariance_type in ('tied', 'full'):
cvnum = np.empty((self.n_components, self.n_features,
self.n_features))
for c in range(self.n_components):
obsmean = np.outer(stats['obs'][c], self._means_[c])
cvnum[c] = (means_weight * np.outer(meandiff[c],
meandiff[c])
+ stats['obs*obs.T'][c]
- obsmean - obsmean.T
+ np.outer(self._means_[c], self._means_[c])
* stats['post'][c])
cvweight = max(covars_weight - self.n_features, 0)
if self._covariance_type == 'tied':
self._covars_ = ((covars_prior + cvnum.sum(axis=0)) /
(cvweight + stats['post'].sum()))
elif self._covariance_type == 'full':
self._covars_ = ((covars_prior + cvnum) /
(cvweight + stats['post'][:, None, None]))
def fit(self, obs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. the covariance parameter on one or
more components becomminging too small). You can fix this by getting
more training data, or increasing covars_prior.
"""
return super(GaussianHMM, self).fit(obs)
class MultinomialHMM(_BaseHMM):
"""Hidden Markov Model with multinomial (discrete) emissions
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Attributes
----------
n_components : int
Number of states in the model.
n_symbols : int
Number of possible symbols emitted by the model (in the observations).
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
emissionprob : array, shape ('n_components`, 'n_symbols`)
Probability of emitting a given symbol when in each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import MultinomialHMM
>>> MultinomialHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
MultinomialHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
def _get_emissionprob(self):
"""Emission probability distribution for each state."""
return np.exp(self._log_emissionprob)
def _set_emissionprob(self, emissionprob):
emissionprob = np.asarray(emissionprob)
if hasattr(self, 'n_symbols') and \
emissionprob.shape != (self.n_components, self.n_symbols):
raise ValueError('emissionprob must have shape '
'(n_components, n_symbols)')
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(emissionprob):
normalize(emissionprob)
self._log_emissionprob = np.log(emissionprob)
underflow_idx = np.isnan(self._log_emissionprob)
self._log_emissionprob[underflow_idx] = NEGINF
self.n_symbols = self._log_emissionprob.shape[1]
emissionprob_ = property(_get_emissionprob, _set_emissionprob)
def _compute_log_likelihood(self, obs):
return self._log_emissionprob[:, obs].T
def _generate_sample_from_state(self, state, random_state=None):
cdf = np.cumsum(self.emissionprob_[state, :])
random_state = check_random_state(random_state)
rand = random_state.rand()
symbol = (cdf > rand).argmax()
return symbol
def _init(self, obs, params='ste'):
super(MultinomialHMM, self)._init(obs, params=params)
self.random_state = check_random_state(self.random_state)
if 'e' in params:
if not hasattr(self, 'n_symbols'):
symbols = set()
for o in obs:
symbols = symbols.union(set(o))
self.n_symbols = len(symbols)
emissionprob = normalize(self.random_state.rand(self.n_components,
self.n_symbols), 1)
self.emissionprob_ = emissionprob
def _initialize_sufficient_statistics(self):
stats = super(MultinomialHMM, self)._initialize_sufficient_statistics()
stats['obs'] = np.zeros((self.n_components, self.n_symbols))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(MultinomialHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'e' in params:
for t, symbol in enumerate(obs):
stats['obs'][:, symbol] += posteriors[t]
def _do_mstep(self, stats, params):
super(MultinomialHMM, self)._do_mstep(stats, params)
if 'e' in params:
self.emissionprob_ = (stats['obs']
/ stats['obs'].sum(1)[:, np.newaxis])
def _check_input_symbols(self, obs):
"""check if input can be used for Multinomial.fit input must be both
positive integer array and every element must be continuous.
e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, 0, 3, 5, 10] not
"""
symbols = np.asarray(obs).flatten()
if symbols.dtype.kind != 'i':
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
symbols.sort()
if np.any(np.diff(symbols) > 1):
# input is discontinous
return False
return True
def fit(self, obs, **kwargs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
"""
err_msg = ("Input must be both positive integer array and "
"every element must be continuous, but %s was given.")
if not self._check_input_symbols(obs):
raise ValueError(err_msg % obs)
return _BaseHMM.fit(self, obs, **kwargs)
class GMMHMM(_BaseHMM):
"""Hidden Markov Model with Gaussin mixture emissions
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Attributes
----------
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
gmms : array of GMM objects, length `n_components`
GMM emission distributions for each state.
random_state : RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
init_params : string, optional
Controls which parameters are initialized prior to training.
Can contain any combination of 's' for startprob, 't' for transmat, 'm'
for means, 'c' for covars, and 'w' for GMM mixing weights. Defaults to
all parameters.
params : string, optional
Controls which parameters are updated in the training process. Can
contain any combination of 's' for startprob, 't' for transmat,
'm' for means, and 'c' for covars, and 'w' for GMM mixing weights.
Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import GMMHMM
>>> GMMHMM(n_components=2, n_mix=10, covariance_type='diag')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
GMMHMM(algorithm='viterbi', covariance_type='diag',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, n_mix=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", gmms=None, covariance_type='diag',
covars_prior=1e-2, random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with GMM emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
# XXX: Hotfit for n_mix that is incompatible with the scikit's
# BaseEstimator API
self.n_mix = n_mix
self._covariance_type = covariance_type
self.covars_prior = covars_prior
self.gmms = gmms
if gmms is None:
gmms = []
for x in range(self.n_components):
if covariance_type is None:
g = GMM(n_mix)
else:
g = GMM(n_mix, covariance_type=covariance_type)
gmms.append(g)
self.gmms_ = gmms
# Read-only properties.
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _compute_log_likelihood(self, obs):
return np.array([g.score(obs) for g in self.gmms_]).T
def _generate_sample_from_state(self, state, random_state=None):
return self.gmms_[state].sample(1, random_state=random_state).flatten()
def _init(self, obs, params='stwmc'):
super(GMMHMM, self)._init(obs, params=params)
allobs = np.concatenate(obs, 0)
for g in self.gmms_:
g.set_params(init_params=params, n_iter=0)
g.fit(allobs)
def _initialize_sufficient_statistics(self):
stats = super(GMMHMM, self)._initialize_sufficient_statistics()
stats['norm'] = [np.zeros(g.weights_.shape) for g in self.gmms_]
stats['means'] = [np.zeros(np.shape(g.means_)) for g in self.gmms_]
stats['covars'] = [np.zeros(np.shape(g.covars_)) for g in self.gmms_]
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GMMHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
for state, g in enumerate(self.gmms_):
_, lgmm_posteriors = g.score_samples(obs)
lgmm_posteriors += np.log(posteriors[:, state][:, np.newaxis]
+ np.finfo(np.float).eps)
gmm_posteriors = np.exp(lgmm_posteriors)
tmp_gmm = GMM(g.n_components, covariance_type=g.covariance_type)
n_features = g.means_.shape[1]
tmp_gmm._set_covars(
distribute_covar_matrix_to_match_covariance_type(
np.eye(n_features), g.covariance_type,
g.n_components))
norm = tmp_gmm._do_mstep(obs, gmm_posteriors, params)
if np.any(np.isnan(tmp_gmm.covars_)):
raise ValueError
stats['norm'][state] += norm
if 'm' in params:
stats['means'][state] += tmp_gmm.means_ * norm[:, np.newaxis]
if 'c' in params:
if tmp_gmm.covariance_type == 'tied':
stats['covars'][state] += tmp_gmm.covars_ * norm.sum()
else:
cvnorm = np.copy(norm)
shape = np.ones(tmp_gmm.covars_.ndim)
shape[0] = np.shape(tmp_gmm.covars_)[0]
cvnorm.shape = shape
stats['covars'][state] += tmp_gmm.covars_ * cvnorm
def _do_mstep(self, stats, params):
super(GMMHMM, self)._do_mstep(stats, params)
# All that is left to do is to apply covars_prior to the
# parameters updated in _accumulate_sufficient_statistics.
for state, g in enumerate(self.gmms_):
n_features = g.means_.shape[1]
norm = stats['norm'][state]
if 'w' in params:
g.weights_ = normalize(norm)
if 'm' in params:
g.means_ = stats['means'][state] / norm[:, np.newaxis]
if 'c' in params:
if g.covariance_type == 'tied':
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * np.eye(n_features))
/ norm.sum())
else:
cvnorm = np.copy(norm)
shape = np.ones(g.covars_.ndim)
shape[0] = np.shape(g.covars_)[0]
cvnorm.shape = shape
if (g.covariance_type in ['spherical', 'diag']):
g.covars_ = (stats['covars'][state] +
self.covars_prior) / cvnorm
elif g.covariance_type == 'full':
eye = np.eye(n_features)
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * eye[np.newaxis])
/ cvnorm)
|
chaluemwut/fbserver
|
venv/lib/python2.7/site-packages/sklearn/hmm.py
|
Python
|
apache-2.0
| 48,579
|
[
"Gaussian"
] |
83b3306da180c3c33024c0c04a504fc6c7a471ca8037256697ed23703f426fef
|
import unittest
from DIRAC.Core.Workflow.Module import ModuleDefinition
from DIRAC.Core.Workflow.Step import StepDefinition
from DIRAC.Interfaces.API.Job import Job
from DIRAC.Workflow.Utilities.Utils import getStepDefinition, getStepCPUTimes
#############################################################################
class UtilitiesTestCase( unittest.TestCase ):
""" Base class
"""
def setUp( self ):
self.job = Job()
pass
class UtilsSuccess( UtilitiesTestCase ):
def test__getStepDefinition( self ):
importLine = """
from DIRAC.Workflow.Modules.<MODULE> import <MODULE>
"""
# modules
gaudiApp = ModuleDefinition( 'Script' )
body = importLine.replace( '<MODULE>', 'Script' )
gaudiApp.setDescription( getattr( __import__( "%s.%s" % ( 'DIRAC.Workflow.Modules', 'Script' ),
globals(), locals(), ['__doc__'] ),
"__doc__" ) )
gaudiApp.setBody( body )
genBKReport = ModuleDefinition( 'FailoverRequest' )
body = importLine.replace( '<MODULE>', 'FailoverRequest' )
genBKReport.setDescription( getattr( __import__( "%s.%s" % ( 'DIRAC.Workflow.Modules', 'FailoverRequest' ),
globals(), locals(), ['__doc__'] ),
"__doc__" ) )
genBKReport.setBody( body )
# step
appDefn = StepDefinition( 'App_Step' )
appDefn.addModule( gaudiApp )
appDefn.createModuleInstance( 'Script', 'Script' )
appDefn.addModule( genBKReport )
appDefn.createModuleInstance( 'FailoverRequest', 'FailoverRequest' )
appDefn.addParameterLinked( gaudiApp.parameters )
stepDef = getStepDefinition( 'App_Step', ['Script', 'FailoverRequest'] )
self.assert_( str( appDefn ) == str( stepDef ) )
self.job._addParameter( appDefn, 'name', 'type', 'value', 'desc' )
self.job._addParameter( appDefn, 'name1', 'type1', 'value1', 'desc1' )
stepDef = getStepDefinition( 'App_Step', ['Script', 'FailoverRequest'],
parametersList = [[ 'name', 'type', 'value', 'desc' ],
[ 'name1', 'type1', 'value1', 'desc1' ]] )
self.assert_( str( appDefn ) == str( stepDef ) )
def test_getStepCPUTimes( self ):
execT, cpuT = getStepCPUTimes( {} )
self.assertEqual( execT, 0 )
self.assertEqual( cpuT, 0 )
execT, cpuT = getStepCPUTimes( {'StartTime':0, 'StartStats': ( 0, 0, 0, 0, 0 )} )
print execT, cpuT
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( UtilitiesTestCase )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( UtilsSuccess ) )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
|
avedaee/DIRAC
|
Workflow/Utilities/test/Test_Utilities.py
|
Python
|
gpl-3.0
| 2,812
|
[
"DIRAC"
] |
a3257f2498a891b32b9738b014994653e71aea3ab99f392cc6c9a41b9045cd68
|
import unittest
from test import test_support
from itertools import *
from weakref import proxy
import sys
import operator
import random
maxsize = test_support.MAX_Py_ssize_t
minsize = -maxsize-1
def onearg(x):
'Test function of one argument'
return 2*x
def errfunc(*args):
'Test function that raises an error'
raise ValueError
def gen3():
'Non-restartable source sequence'
for i in (0, 1, 2):
yield i
def isEven(x):
'Test predicate'
return x%2==0
def isOdd(x):
'Test predicate'
return x%2==1
class StopNow:
'Class emulating an empty iterable.'
def __iter__(self):
return self
def next(self):
raise StopIteration
def take(n, seq):
'Convenience function for partially consuming a long of infinite iterable'
return list(islice(seq, n))
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def fact(n):
'Factorial'
return prod(range(1, n+1))
class TestBasicOps(unittest.TestCase):
def test_chain(self):
def chain2(*iterables):
'Pure python version in the docs'
for it in iterables:
for element in it:
yield element
for c in (chain, chain2):
self.assertEqual(list(c('abc', 'def')), list('abcdef'))
self.assertEqual(list(c('abc')), list('abc'))
self.assertEqual(list(c('')), [])
self.assertEqual(take(4, c('abc', 'def')), list('abcd'))
self.assertRaises(TypeError, list,c(2, 3))
def test_chain_from_iterable(self):
self.assertEqual(list(chain.from_iterable(['abc', 'def'])), list('abcdef'))
self.assertEqual(list(chain.from_iterable(['abc'])), list('abc'))
self.assertEqual(list(chain.from_iterable([''])), [])
self.assertEqual(take(4, chain.from_iterable(['abc', 'def'])), list('abcd'))
self.assertRaises(TypeError, list, chain.from_iterable([2, 3]))
def test_combinations(self):
self.assertRaises(TypeError, combinations, 'abc') # missing r argument
self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, combinations, None) # pool is not iterable
self.assertRaises(ValueError, combinations, 'abc', -2) # r is negative
self.assertRaises(ValueError, combinations, 'abc', 32) # r is too big
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def combinations1(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
indices = range(r)
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
def combinations2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in permutations(range(n), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+1):
result = list(combinations(values, r))
self.assertEqual(len(result), fact(n) / fact(r) / fact(n-r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for c in result:
self.assertEqual(len(c), r) # r-length combinations
self.assertEqual(len(set(c)), r) # no duplicate elements
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assert_(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(list(c),
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(combinations1(values, r))) # matches first pure python version
self.assertEqual(result, list(combinations2(values, r))) # matches first pure python version
# Test implementation detail: tuple re-use
self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1)
def test_permutations(self):
self.assertRaises(TypeError, permutations) # too few arguments
self.assertRaises(TypeError, permutations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, permutations, None) # pool is not iterable
self.assertRaises(ValueError, permutations, 'abc', -2) # r is negative
self.assertRaises(ValueError, permutations, 'abc', 32) # r is too big
self.assertRaises(TypeError, permutations, 'abc', 's') # r is not an int or None
self.assertEqual(list(permutations(range(3), 2)),
[(0,1), (0,2), (1,0), (1,2), (2,0), (2,1)])
def permutations1(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
indices = range(n)
cycles = range(n, n-r, -1)
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
def permutations2(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(range(n), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+1):
result = list(permutations(values, r))
self.assertEqual(len(result), fact(n) / fact(n-r)) # right number of perms
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for p in result:
self.assertEqual(len(p), r) # r-length permutations
self.assertEqual(len(set(p)), r) # no duplicate elements
self.assert_(all(e in values for e in p)) # elements taken from input iterable
self.assertEqual(result, list(permutations1(values, r))) # matches first pure python version
self.assertEqual(result, list(permutations2(values, r))) # matches first pure python version
if r == n:
self.assertEqual(result, list(permutations(values, None))) # test r as None
self.assertEqual(result, list(permutations(values))) # test default r
# Test implementation detail: tuple re-use
self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1)
def test_count(self):
self.assertEqual(zip('abc',count()), [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(zip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)])
self.assertEqual(take(2, zip('abc',count(3))), [('a', 3), ('b', 4)])
self.assertEqual(take(2, zip('abc',count(-1))), [('a', -1), ('b', 0)])
self.assertEqual(take(2, zip('abc',count(-3))), [('a', -3), ('b', -2)])
self.assertRaises(TypeError, count, 2, 3)
self.assertRaises(TypeError, count, 'a')
self.assertEqual(list(islice(count(maxsize-5), 10)), range(maxsize-5, maxsize+5))
self.assertEqual(list(islice(count(-maxsize-5), 10)), range(-maxsize-5, -maxsize+5))
c = count(3)
self.assertEqual(repr(c), 'count(3)')
c.next()
self.assertEqual(repr(c), 'count(4)')
c = count(-9)
self.assertEqual(repr(c), 'count(-9)')
c.next()
self.assertEqual(c.next(), -8)
for i in (-sys.maxint-5, -sys.maxint+5 ,-10, -1, 0, 10, sys.maxint-5, sys.maxint+5):
# Test repr (ignoring the L in longs)
r1 = repr(count(i)).replace('L', '')
r2 = 'count(%r)'.__mod__(i).replace('L', '')
self.assertEqual(r1, r2)
def test_cycle(self):
self.assertEqual(take(10, cycle('abc')), list('abcabcabca'))
self.assertEqual(list(cycle('')), [])
self.assertRaises(TypeError, cycle)
self.assertRaises(TypeError, cycle, 5)
self.assertEqual(list(islice(cycle(gen3()),10)), [0,1,2,0,1,2,0,1,2,0])
def test_groupby(self):
# Check whether it accepts arguments correctly
self.assertEqual([], list(groupby([])))
self.assertEqual([], list(groupby([], key=id)))
self.assertRaises(TypeError, list, groupby('abc', []))
self.assertRaises(TypeError, groupby, None)
self.assertRaises(TypeError, groupby, 'abc', lambda x:x, 10)
# Check normal input
s = [(0, 10, 20), (0, 11,21), (0,12,21), (1,13,21), (1,14,22),
(2,15,22), (3,16,23), (3,17,23)]
dup = []
for k, g in groupby(s, lambda r:r[0]):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested case
dup = []
for k, g in groupby(s, lambda r:r[0]):
for ik, ig in groupby(g, lambda r:r[2]):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check case where inner iterator is not used
keys = [k for k, g in groupby(s, lambda r:r[0])]
expectedkeys = set([r[0] for r in s])
self.assertEqual(set(keys), expectedkeys)
self.assertEqual(len(keys), len(expectedkeys))
# Exercise pipes and filters style
s = 'abracadabra'
# sort s | uniq
r = [k for k, g in groupby(sorted(s))]
self.assertEqual(r, ['a', 'b', 'c', 'd', 'r'])
# sort s | uniq -d
r = [k for k, g in groupby(sorted(s)) if list(islice(g,1,2))]
self.assertEqual(r, ['a', 'b', 'r'])
# sort s | uniq -c
r = [(len(list(g)), k) for k, g in groupby(sorted(s))]
self.assertEqual(r, [(5, 'a'), (2, 'b'), (1, 'c'), (1, 'd'), (2, 'r')])
# sort s | uniq -c | sort -rn | head -3
r = sorted([(len(list(g)) , k) for k, g in groupby(sorted(s))], reverse=True)[:3]
self.assertEqual(r, [(5, 'a'), (2, 'r'), (2, 'b')])
# iter.next failure
class ExpectedError(Exception):
pass
def delayed_raise(n=0):
for i in range(n):
yield 'yo'
raise ExpectedError
def gulp(iterable, keyp=None, func=list):
return [func(g) for k, g in groupby(iterable, keyp)]
# iter.next failure on outer object
self.assertRaises(ExpectedError, gulp, delayed_raise(0))
# iter.next failure on inner object
self.assertRaises(ExpectedError, gulp, delayed_raise(1))
# __cmp__ failure
class DummyCmp:
def __cmp__(self, dst):
raise ExpectedError
s = [DummyCmp(), DummyCmp(), None]
# __cmp__ failure on outer object
self.assertRaises(ExpectedError, gulp, s, func=id)
# __cmp__ failure on inner object
self.assertRaises(ExpectedError, gulp, s)
# keyfunc failure
def keyfunc(obj):
if keyfunc.skip > 0:
keyfunc.skip -= 1
return obj
else:
raise ExpectedError
# keyfunc failure on outer object
keyfunc.skip = 0
self.assertRaises(ExpectedError, gulp, [None], keyfunc)
keyfunc.skip = 1
self.assertRaises(ExpectedError, gulp, [None, None], keyfunc)
def test_ifilter(self):
self.assertEqual(list(ifilter(isEven, range(6))), [0,2,4])
self.assertEqual(list(ifilter(None, [0,1,0,2,0])), [1,2])
self.assertEqual(list(ifilter(bool, [0,1,0,2,0])), [1,2])
self.assertEqual(take(4, ifilter(isEven, count())), [0,2,4,6])
self.assertRaises(TypeError, ifilter)
self.assertRaises(TypeError, ifilter, lambda x:x)
self.assertRaises(TypeError, ifilter, lambda x:x, range(6), 7)
self.assertRaises(TypeError, ifilter, isEven, 3)
self.assertRaises(TypeError, ifilter(range(6), range(6)).next)
def test_ifilterfalse(self):
self.assertEqual(list(ifilterfalse(isEven, range(6))), [1,3,5])
self.assertEqual(list(ifilterfalse(None, [0,1,0,2,0])), [0,0,0])
self.assertEqual(list(ifilterfalse(bool, [0,1,0,2,0])), [0,0,0])
self.assertEqual(take(4, ifilterfalse(isEven, count())), [1,3,5,7])
self.assertRaises(TypeError, ifilterfalse)
self.assertRaises(TypeError, ifilterfalse, lambda x:x)
self.assertRaises(TypeError, ifilterfalse, lambda x:x, range(6), 7)
self.assertRaises(TypeError, ifilterfalse, isEven, 3)
self.assertRaises(TypeError, ifilterfalse(range(6), range(6)).next)
def test_izip(self):
ans = [(x,y) for x, y in izip('abc',count())]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(list(izip('abc', range(6))), zip('abc', range(6)))
self.assertEqual(list(izip('abcdef', range(3))), zip('abcdef', range(3)))
self.assertEqual(take(3,izip('abcdef', count())), zip('abcdef', range(3)))
self.assertEqual(list(izip('abcdef')), zip('abcdef'))
self.assertEqual(list(izip()), zip())
self.assertRaises(TypeError, izip, 3)
self.assertRaises(TypeError, izip, range(3), 3)
# Check tuple re-use (implementation detail)
self.assertEqual([tuple(list(pair)) for pair in izip('abc', 'def')],
zip('abc', 'def'))
self.assertEqual([pair for pair in izip('abc', 'def')],
zip('abc', 'def'))
ids = map(id, izip('abc', 'def'))
self.assertEqual(min(ids), max(ids))
ids = map(id, list(izip('abc', 'def')))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_iziplongest(self):
for args in [
['abc', range(6)],
[range(6), 'abc'],
[range(1000), range(2000,2100), range(3000,3050)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500), range(0)],
]:
target = map(None, *args)
self.assertEqual(list(izip_longest(*args)), target)
self.assertEqual(list(izip_longest(*args, **{})), target)
target = [tuple((e is None and 'X' or e) for e in t) for t in target] # Replace None fills with 'X'
self.assertEqual(list(izip_longest(*args, **dict(fillvalue='X'))), target)
self.assertEqual(take(3,izip_longest('abcdef', count())), zip('abcdef', range(3))) # take 3 from infinite input
self.assertEqual(list(izip_longest()), zip())
self.assertEqual(list(izip_longest([])), zip([]))
self.assertEqual(list(izip_longest('abcdef')), zip('abcdef'))
self.assertEqual(list(izip_longest('abc', 'defg', **{})), map(None, 'abc', 'defg')) # empty keyword dict
self.assertRaises(TypeError, izip_longest, 3)
self.assertRaises(TypeError, izip_longest, range(3), 3)
for stmt in [
"izip_longest('abc', fv=1)",
"izip_longest('abc', fillvalue=1, bogus_keyword=None)",
]:
try:
eval(stmt, globals(), locals())
except TypeError:
pass
else:
self.fail('Did not raise Type in: ' + stmt)
# Check tuple re-use (implementation detail)
self.assertEqual([tuple(list(pair)) for pair in izip_longest('abc', 'def')],
zip('abc', 'def'))
self.assertEqual([pair for pair in izip_longest('abc', 'def')],
zip('abc', 'def'))
ids = map(id, izip_longest('abc', 'def'))
self.assertEqual(min(ids), max(ids))
ids = map(id, list(izip_longest('abc', 'def')))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_product(self):
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(product(*args)), result)
for r in range(4):
self.assertEqual(list(product(*(args*r))),
list(product(*args, **dict(repeat=r))))
self.assertEqual(len(list(product(*[range(7)]*6))), 7**6)
self.assertRaises(TypeError, product, range(6), None)
def product1(*args, **kwds):
pools = map(tuple, args) * kwds.get('repeat', 1)
n = len(pools)
if n == 0:
yield ()
return
if any(len(pool) == 0 for pool in pools):
return
indices = [0] * n
yield tuple(pool[i] for pool, i in zip(pools, indices))
while 1:
for i in reversed(range(n)): # right to left
if indices[i] == len(pools[i]) - 1:
continue
indices[i] += 1
for j in range(i+1, n):
indices[j] = 0
yield tuple(pool[i] for pool, i in zip(pools, indices))
break
else:
return
def product2(*args, **kwds):
'Pure python version used in docs'
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
argtypes = ['', 'abc', '', xrange(0), xrange(4), dict(a=1, b=2, c=3),
set('abcdefg'), range(11), tuple(range(13))]
for i in range(100):
args = [random.choice(argtypes) for j in range(random.randrange(5))]
expected_len = prod(map(len, args))
self.assertEqual(len(list(product(*args))), expected_len)
self.assertEqual(list(product(*args)), list(product1(*args)))
self.assertEqual(list(product(*args)), list(product2(*args)))
args = map(iter, args)
self.assertEqual(len(list(product(*args))), expected_len)
# Test implementation detail: tuple re-use
self.assertEqual(len(set(map(id, product('abc', 'def')))), 1)
self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1)
def test_repeat(self):
self.assertEqual(zip(xrange(3),repeat('a')),
[(0, 'a'), (1, 'a'), (2, 'a')])
self.assertEqual(list(repeat('a', 3)), ['a', 'a', 'a'])
self.assertEqual(take(3, repeat('a')), ['a', 'a', 'a'])
self.assertEqual(list(repeat('a', 0)), [])
self.assertEqual(list(repeat('a', -3)), [])
self.assertRaises(TypeError, repeat)
self.assertRaises(TypeError, repeat, None, 3, 4)
self.assertRaises(TypeError, repeat, None, 'a')
r = repeat(1+0j)
self.assertEqual(repr(r), 'repeat((1+0j))')
r = repeat(1+0j, 5)
self.assertEqual(repr(r), 'repeat((1+0j), 5)')
list(r)
self.assertEqual(repr(r), 'repeat((1+0j), 0)')
def test_imap(self):
self.assertEqual(list(imap(operator.pow, range(3), range(1,7))),
[0**1, 1**2, 2**3])
self.assertEqual(list(imap(None, 'abc', range(5))),
[('a',0),('b',1),('c',2)])
self.assertEqual(list(imap(None, 'abc', count())),
[('a',0),('b',1),('c',2)])
self.assertEqual(take(2,imap(None, 'abc', count())),
[('a',0),('b',1)])
self.assertEqual(list(imap(operator.pow, [])), [])
self.assertRaises(TypeError, imap)
self.assertRaises(TypeError, imap, operator.neg)
self.assertRaises(TypeError, imap(10, range(5)).next)
self.assertRaises(ValueError, imap(errfunc, [4], [5]).next)
self.assertRaises(TypeError, imap(onearg, [4], [5]).next)
def test_starmap(self):
self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))),
[0**1, 1**2, 2**3])
self.assertEqual(take(3, starmap(operator.pow, izip(count(), count(1)))),
[0**1, 1**2, 2**3])
self.assertEqual(list(starmap(operator.pow, [])), [])
self.assertEqual(list(starmap(operator.pow, [iter([4,5])])), [4**5])
self.assertRaises(TypeError, list, starmap(operator.pow, [None]))
self.assertRaises(TypeError, starmap)
self.assertRaises(TypeError, starmap, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, starmap(10, [(4,5)]).next)
self.assertRaises(ValueError, starmap(errfunc, [(4,5)]).next)
self.assertRaises(TypeError, starmap(onearg, [(4,5)]).next)
def test_islice(self):
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 3),
(20,)
]:
self.assertEqual(list(islice(xrange(100), *args)), range(*args))
for args, tgtargs in [ # Stop when seqn is exhausted
((10, 110, 3), ((10, 100, 3))),
((10, 110), ((10, 100))),
((110,), (100,))
]:
self.assertEqual(list(islice(xrange(100), *args)), range(*tgtargs))
# Test stop=None
self.assertEqual(list(islice(xrange(10), None)), range(10))
self.assertEqual(list(islice(xrange(10), None, None)), range(10))
self.assertEqual(list(islice(xrange(10), None, None, None)), range(10))
self.assertEqual(list(islice(xrange(10), 2, None)), range(2, 10))
self.assertEqual(list(islice(xrange(10), 1, None, 2)), range(1, 10, 2))
# Test number of items consumed SF #1171417
it = iter(range(10))
self.assertEqual(list(islice(it, 3)), range(3))
self.assertEqual(list(it), range(3, 10))
# Test invalid arguments
self.assertRaises(TypeError, islice, xrange(10))
self.assertRaises(TypeError, islice, xrange(10), 1, 2, 3, 4)
self.assertRaises(ValueError, islice, xrange(10), -5, 10, 1)
self.assertRaises(ValueError, islice, xrange(10), 1, -5, -1)
self.assertRaises(ValueError, islice, xrange(10), 1, 10, -1)
self.assertRaises(ValueError, islice, xrange(10), 1, 10, 0)
self.assertRaises(ValueError, islice, xrange(10), 'a')
self.assertRaises(ValueError, islice, xrange(10), 'a', 1)
self.assertRaises(ValueError, islice, xrange(10), 1, 'a')
self.assertRaises(ValueError, islice, xrange(10), 'a', 1, 1)
self.assertRaises(ValueError, islice, xrange(10), 1, 'a', 1)
self.assertEqual(len(list(islice(count(), 1, 10, maxsize))), 1)
def test_takewhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
underten = lambda x: x<10
self.assertEqual(list(takewhile(underten, data)), [1, 3, 5])
self.assertEqual(list(takewhile(underten, [])), [])
self.assertRaises(TypeError, takewhile)
self.assertRaises(TypeError, takewhile, operator.pow)
self.assertRaises(TypeError, takewhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, takewhile(10, [(4,5)]).next)
self.assertRaises(ValueError, takewhile(errfunc, [(4,5)]).next)
t = takewhile(bool, [1, 1, 1, 0, 0, 0])
self.assertEqual(list(t), [1, 1, 1])
self.assertRaises(StopIteration, t.next)
def test_dropwhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
underten = lambda x: x<10
self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8])
self.assertEqual(list(dropwhile(underten, [])), [])
self.assertRaises(TypeError, dropwhile)
self.assertRaises(TypeError, dropwhile, operator.pow)
self.assertRaises(TypeError, dropwhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, dropwhile(10, [(4,5)]).next)
self.assertRaises(ValueError, dropwhile(errfunc, [(4,5)]).next)
def test_tee(self):
n = 200
def irange(n):
for i in xrange(n):
yield i
a, b = tee([]) # test empty iterator
self.assertEqual(list(a), [])
self.assertEqual(list(b), [])
a, b = tee(irange(n)) # test 100% interleaved
self.assertEqual(zip(a,b), zip(range(n),range(n)))
a, b = tee(irange(n)) # test 0% interleaved
self.assertEqual(list(a), range(n))
self.assertEqual(list(b), range(n))
a, b = tee(irange(n)) # test dealloc of leading iterator
for i in xrange(100):
self.assertEqual(a.next(), i)
del a
self.assertEqual(list(b), range(n))
a, b = tee(irange(n)) # test dealloc of trailing iterator
for i in xrange(100):
self.assertEqual(a.next(), i)
del b
self.assertEqual(list(a), range(100, n))
for j in xrange(5): # test randomly interleaved
order = [0]*n + [1]*n
random.shuffle(order)
lists = ([], [])
its = tee(irange(n))
for i in order:
value = its[i].next()
lists[i].append(value)
self.assertEqual(lists[0], range(n))
self.assertEqual(lists[1], range(n))
# test argument format checking
self.assertRaises(TypeError, tee)
self.assertRaises(TypeError, tee, 3)
self.assertRaises(TypeError, tee, [1,2], 'x')
self.assertRaises(TypeError, tee, [1,2], 3, 'x')
# tee object should be instantiable
a, b = tee('abc')
c = type(a)('def')
self.assertEqual(list(c), list('def'))
# test long-lagged and multi-way split
a, b, c = tee(xrange(2000), 3)
for i in xrange(100):
self.assertEqual(a.next(), i)
self.assertEqual(list(b), range(2000))
self.assertEqual([c.next(), c.next()], range(2))
self.assertEqual(list(a), range(100,2000))
self.assertEqual(list(c), range(2,2000))
# test values of n
self.assertRaises(TypeError, tee, 'abc', 'invalid')
self.assertRaises(ValueError, tee, [], -1)
for n in xrange(5):
result = tee('abc', n)
self.assertEqual(type(result), tuple)
self.assertEqual(len(result), n)
self.assertEqual(map(list, result), [list('abc')]*n)
# tee pass-through to copyable iterator
a, b = tee('abc')
c, d = tee(a)
self.assert_(a is c)
# test tee_new
t1, t2 = tee('abc')
tnew = type(t1)
self.assertRaises(TypeError, tnew)
self.assertRaises(TypeError, tnew, 10)
t3 = tnew(t1)
self.assert_(list(t1) == list(t2) == list(t3) == list('abc'))
# test that tee objects are weak referencable
a, b = tee(xrange(10))
p = proxy(a)
self.assertEqual(getattr(p, '__class__'), type(b))
del a
self.assertRaises(ReferenceError, getattr, p, '__class__')
def test_StopIteration(self):
self.assertRaises(StopIteration, izip().next)
for f in (chain, cycle, izip, groupby):
self.assertRaises(StopIteration, f([]).next)
self.assertRaises(StopIteration, f(StopNow()).next)
self.assertRaises(StopIteration, islice([], None).next)
self.assertRaises(StopIteration, islice(StopNow(), None).next)
p, q = tee([])
self.assertRaises(StopIteration, p.next)
self.assertRaises(StopIteration, q.next)
p, q = tee(StopNow())
self.assertRaises(StopIteration, p.next)
self.assertRaises(StopIteration, q.next)
self.assertRaises(StopIteration, repeat(None, 0).next)
for f in (ifilter, ifilterfalse, imap, takewhile, dropwhile, starmap):
self.assertRaises(StopIteration, f(lambda x:x, []).next)
self.assertRaises(StopIteration, f(lambda x:x, StopNow()).next)
class TestExamples(unittest.TestCase):
def test_chain(self):
self.assertEqual(''.join(chain('ABC', 'DEF')), 'ABCDEF')
def test_chain_from_iterable(self):
self.assertEqual(''.join(chain.from_iterable(['ABC', 'DEF'])), 'ABCDEF')
def test_combinations(self):
self.assertEqual(list(combinations('ABCD', 2)),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def test_count(self):
self.assertEqual(list(islice(count(10), 5)), [10, 11, 12, 13, 14])
def test_cycle(self):
self.assertEqual(list(islice(cycle('ABCD'), 12)), list('ABCDABCDABCD'))
def test_dropwhile(self):
self.assertEqual(list(dropwhile(lambda x: x<5, [1,4,6,4,1])), [6,4,1])
def test_groupby(self):
self.assertEqual([k for k, g in groupby('AAAABBBCCDAABBB')],
list('ABCDAB'))
self.assertEqual([(list(g)) for k, g in groupby('AAAABBBCCD')],
[list('AAAA'), list('BBB'), list('CC'), list('D')])
def test_ifilter(self):
self.assertEqual(list(ifilter(lambda x: x%2, range(10))), [1,3,5,7,9])
def test_ifilterfalse(self):
self.assertEqual(list(ifilterfalse(lambda x: x%2, range(10))), [0,2,4,6,8])
def test_imap(self):
self.assertEqual(list(imap(pow, (2,3,10), (5,2,3))), [32, 9, 1000])
def test_islice(self):
self.assertEqual(list(islice('ABCDEFG', 2)), list('AB'))
self.assertEqual(list(islice('ABCDEFG', 2, 4)), list('CD'))
self.assertEqual(list(islice('ABCDEFG', 2, None)), list('CDEFG'))
self.assertEqual(list(islice('ABCDEFG', 0, None, 2)), list('ACEG'))
def test_izip(self):
self.assertEqual(list(izip('ABCD', 'xy')), [('A', 'x'), ('B', 'y')])
def test_izip_longest(self):
self.assertEqual(list(izip_longest('ABCD', 'xy', fillvalue='-')),
[('A', 'x'), ('B', 'y'), ('C', '-'), ('D', '-')])
def test_permutations(self):
self.assertEqual(list(permutations('ABCD', 2)),
map(tuple, 'AB AC AD BA BC BD CA CB CD DA DB DC'.split()))
self.assertEqual(list(permutations(range(3))),
[(0,1,2), (0,2,1), (1,0,2), (1,2,0), (2,0,1), (2,1,0)])
def test_product(self):
self.assertEqual(list(product('ABCD', 'xy')),
map(tuple, 'Ax Ay Bx By Cx Cy Dx Dy'.split()))
self.assertEqual(list(product(range(2), repeat=3)),
[(0,0,0), (0,0,1), (0,1,0), (0,1,1),
(1,0,0), (1,0,1), (1,1,0), (1,1,1)])
def test_repeat(self):
self.assertEqual(list(repeat(10, 3)), [10, 10, 10])
def test_stapmap(self):
self.assertEqual(list(starmap(pow, [(2,5), (3,2), (10,3)])),
[32, 9, 1000])
def test_takewhile(self):
self.assertEqual(list(takewhile(lambda x: x<5, [1,4,6,4,1])), [1,4])
class TestGC(unittest.TestCase):
def makecycle(self, iterator, container):
container.append(iterator)
iterator.next()
del container, iterator
def test_chain(self):
a = []
self.makecycle(chain(a), a)
def test_chain_from_iterable(self):
a = []
self.makecycle(chain.from_iterable([a]), a)
def test_combinations(self):
a = []
self.makecycle(combinations([1,2,a,3], 3), a)
def test_cycle(self):
a = []
self.makecycle(cycle([a]*2), a)
def test_dropwhile(self):
a = []
self.makecycle(dropwhile(bool, [0, a, a]), a)
def test_groupby(self):
a = []
self.makecycle(groupby([a]*2, lambda x:x), a)
def test_issue2246(self):
# Issue 2246 -- the _grouper iterator was not included in GC
n = 10
keyfunc = lambda x: x
for i, j in groupby(xrange(n), key=keyfunc):
keyfunc.__dict__.setdefault('x',[]).append(j)
def test_ifilter(self):
a = []
self.makecycle(ifilter(lambda x:True, [a]*2), a)
def test_ifilterfalse(self):
a = []
self.makecycle(ifilterfalse(lambda x:False, a), a)
def test_izip(self):
a = []
self.makecycle(izip([a]*2, [a]*3), a)
def test_izip_longest(self):
a = []
self.makecycle(izip_longest([a]*2, [a]*3), a)
b = [a, None]
self.makecycle(izip_longest([a]*2, [a]*3, fillvalue=b), a)
def test_imap(self):
a = []
self.makecycle(imap(lambda x:x, [a]*2), a)
def test_islice(self):
a = []
self.makecycle(islice([a]*2, None), a)
def test_permutations(self):
a = []
self.makecycle(permutations([1,2,a,3], 3), a)
def test_product(self):
a = []
self.makecycle(product([1,2,a,3], repeat=3), a)
def test_repeat(self):
a = []
self.makecycle(repeat(a), a)
def test_starmap(self):
a = []
self.makecycle(starmap(lambda *t: t, [(a,a)]*2), a)
def test_takewhile(self):
a = []
self.makecycle(takewhile(bool, [1, 0, a, a]), a)
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing next()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def next(self):
raise StopIteration
def L(seqn):
'Test multiple tiers of iterators'
return chain(imap(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_chain(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(chain(g(s))), list(g(s)))
self.assertEqual(list(chain(g(s), g(s))), list(g(s))+list(g(s)))
self.assertRaises(TypeError, list, chain(X(s)))
self.assertRaises(TypeError, list, chain(N(s)))
self.assertRaises(ZeroDivisionError, list, chain(E(s)))
def test_product(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
self.assertRaises(TypeError, product, X(s))
self.assertRaises(TypeError, product, N(s))
self.assertRaises(ZeroDivisionError, product, E(s))
def test_cycle(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgtlen = len(s) * 3
expected = list(g(s))*3
actual = list(islice(cycle(g(s)), tgtlen))
self.assertEqual(actual, expected)
self.assertRaises(TypeError, cycle, X(s))
self.assertRaises(TypeError, list, cycle(N(s)))
self.assertRaises(ZeroDivisionError, list, cycle(E(s)))
def test_groupby(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual([k for k, sb in groupby(g(s))], list(g(s)))
self.assertRaises(TypeError, groupby, X(s))
self.assertRaises(TypeError, list, groupby(N(s)))
self.assertRaises(ZeroDivisionError, list, groupby(E(s)))
def test_ifilter(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(ifilter(isEven, g(s))), filter(isEven, g(s)))
self.assertRaises(TypeError, ifilter, isEven, X(s))
self.assertRaises(TypeError, list, ifilter(isEven, N(s)))
self.assertRaises(ZeroDivisionError, list, ifilter(isEven, E(s)))
def test_ifilterfalse(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(ifilterfalse(isEven, g(s))), filter(isOdd, g(s)))
self.assertRaises(TypeError, ifilterfalse, isEven, X(s))
self.assertRaises(TypeError, list, ifilterfalse(isEven, N(s)))
self.assertRaises(ZeroDivisionError, list, ifilterfalse(isEven, E(s)))
def test_izip(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(izip(g(s))), zip(g(s)))
self.assertEqual(list(izip(g(s), g(s))), zip(g(s), g(s)))
self.assertRaises(TypeError, izip, X(s))
self.assertRaises(TypeError, list, izip(N(s)))
self.assertRaises(ZeroDivisionError, list, izip(E(s)))
def test_iziplongest(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(izip_longest(g(s))), zip(g(s)))
self.assertEqual(list(izip_longest(g(s), g(s))), zip(g(s), g(s)))
self.assertRaises(TypeError, izip_longest, X(s))
self.assertRaises(TypeError, list, izip_longest(N(s)))
self.assertRaises(ZeroDivisionError, list, izip_longest(E(s)))
def test_imap(self):
for s in (range(10), range(0), range(100), (7,11), xrange(20,50,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(imap(onearg, g(s))), map(onearg, g(s)))
self.assertEqual(list(imap(operator.pow, g(s), g(s))), map(operator.pow, g(s), g(s)))
self.assertRaises(TypeError, imap, onearg, X(s))
self.assertRaises(TypeError, list, imap(onearg, N(s)))
self.assertRaises(ZeroDivisionError, list, imap(onearg, E(s)))
def test_islice(self):
for s in ("12345", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2])
self.assertRaises(TypeError, islice, X(s), 10)
self.assertRaises(TypeError, list, islice(N(s), 10))
self.assertRaises(ZeroDivisionError, list, islice(E(s), 10))
def test_starmap(self):
for s in (range(10), range(0), range(100), (7,11), xrange(20,50,5)):
for g in (G, I, Ig, S, L, R):
ss = zip(s, s)
self.assertEqual(list(starmap(operator.pow, g(ss))), map(operator.pow, g(s), g(s)))
self.assertRaises(TypeError, starmap, operator.pow, X(ss))
self.assertRaises(TypeError, list, starmap(operator.pow, N(ss)))
self.assertRaises(ZeroDivisionError, list, starmap(operator.pow, E(ss)))
def test_takewhile(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not isEven(elem): break
tgt.append(elem)
self.assertEqual(list(takewhile(isEven, g(s))), tgt)
self.assertRaises(TypeError, takewhile, isEven, X(s))
self.assertRaises(TypeError, list, takewhile(isEven, N(s)))
self.assertRaises(ZeroDivisionError, list, takewhile(isEven, E(s)))
def test_dropwhile(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not tgt and isOdd(elem): continue
tgt.append(elem)
self.assertEqual(list(dropwhile(isOdd, g(s))), tgt)
self.assertRaises(TypeError, dropwhile, isOdd, X(s))
self.assertRaises(TypeError, list, dropwhile(isOdd, N(s)))
self.assertRaises(ZeroDivisionError, list, dropwhile(isOdd, E(s)))
def test_tee(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
it1, it2 = tee(g(s))
self.assertEqual(list(it1), list(g(s)))
self.assertEqual(list(it2), list(g(s)))
self.assertRaises(TypeError, tee, X(s))
self.assertRaises(TypeError, list, tee(N(s))[0])
self.assertRaises(ZeroDivisionError, list, tee(E(s))[0])
class LengthTransparency(unittest.TestCase):
def test_repeat(self):
from test.test_iterlen import len
self.assertEqual(len(repeat(None, 50)), 50)
self.assertRaises(TypeError, len, repeat(None))
class RegressionTests(unittest.TestCase):
def test_sf_793826(self):
# Fix Armin Rigo's successful efforts to wreak havoc
def mutatingtuple(tuple1, f, tuple2):
# this builds a tuple t which is a copy of tuple1,
# then calls f(t), then mutates t to be equal to tuple2
# (needs len(tuple1) == len(tuple2)).
def g(value, first=[1]):
if first:
del first[:]
f(z.next())
return value
items = list(tuple2)
items[1:1] = list(tuple1)
gen = imap(g, items)
z = izip(*[gen]*len(tuple1))
z.next()
def f(t):
global T
T = t
first[:] = list(T)
first = []
mutatingtuple((1,2,3), f, (4,5,6))
second = list(T)
self.assertEqual(first, second)
def test_sf_950057(self):
# Make sure that chain() and cycle() catch exceptions immediately
# rather than when shifting between input sources
def gen1():
hist.append(0)
yield 1
hist.append(1)
raise AssertionError
hist.append(2)
def gen2(x):
hist.append(3)
yield 2
hist.append(4)
if x:
raise StopIteration
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(False)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(True)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, cycle(gen1()))
self.assertEqual(hist, [0,1])
class SubclassWithKwargsTest(unittest.TestCase):
def test_keywords_in_subclass(self):
# count is not subclassable...
for cls in (repeat, izip, ifilter, ifilterfalse, chain, imap,
starmap, islice, takewhile, dropwhile, cycle):
class Subclass(cls):
def __init__(self, newarg=None, *args):
cls.__init__(self, *args)
try:
Subclass(newarg=1)
except TypeError, err:
# we expect type errors because of wrong argument count
self.failIf("does not take keyword arguments" in err.args[0])
libreftest = """ Doctest for examples in the library reference: libitertools.tex
>>> amounts = [120.15, 764.05, 823.14]
>>> for checknum, amount in izip(count(1200), amounts):
... print 'Check %d is for $%.2f' % (checknum, amount)
...
Check 1200 is for $120.15
Check 1201 is for $764.05
Check 1202 is for $823.14
>>> import operator
>>> for cube in imap(operator.pow, xrange(1,4), repeat(3)):
... print cube
...
1
8
27
>>> reportlines = ['EuroPython', 'Roster', '', 'alex', '', 'laura', '', 'martin', '', 'walter', '', 'samuele']
>>> for name in islice(reportlines, 3, None, 2):
... print name.title()
...
Alex
Laura
Martin
Walter
Samuele
>>> from operator import itemgetter
>>> d = dict(a=1, b=2, c=1, d=2, e=1, f=2, g=3)
>>> di = sorted(sorted(d.iteritems()), key=itemgetter(1))
>>> for k, g in groupby(di, itemgetter(1)):
... print k, map(itemgetter(0), g)
...
1 ['a', 'c', 'e']
2 ['b', 'd', 'f']
3 ['g']
# Find runs of consecutive numbers using groupby. The key to the solution
# is differencing with a range so that consecutive numbers all appear in
# same group.
>>> data = [ 1, 4,5,6, 10, 15,16,17,18, 22, 25,26,27,28]
>>> for k, g in groupby(enumerate(data), lambda (i,x):i-x):
... print map(operator.itemgetter(1), g)
...
[1]
[4, 5, 6]
[10]
[15, 16, 17, 18]
[22]
[25, 26, 27, 28]
>>> def take(n, iterable):
... "Return first n items of the iterable as a list"
... return list(islice(iterable, n))
>>> def enumerate(iterable, start=0):
... return izip(count(start), iterable)
>>> def tabulate(function, start=0):
... "Return function(0), function(1), ..."
... return imap(function, count(start))
>>> def nth(iterable, n):
... "Returns the nth item or empty list"
... return list(islice(iterable, n, n+1))
>>> def quantify(iterable, pred=bool):
... "Count how many times the predicate is true"
... return sum(imap(pred, iterable))
>>> def padnone(iterable):
... "Returns the sequence elements and then returns None indefinitely"
... return chain(iterable, repeat(None))
>>> def ncycles(iterable, n):
... "Returns the seqeuence elements n times"
... return chain(*repeat(iterable, n))
>>> def dotproduct(vec1, vec2):
... return sum(imap(operator.mul, vec1, vec2))
>>> def flatten(listOfLists):
... return list(chain.from_iterable(listOfLists))
>>> def repeatfunc(func, times=None, *args):
... "Repeat calls to func with specified arguments."
... " Example: repeatfunc(random.random)"
... if times is None:
... return starmap(func, repeat(args))
... else:
... return starmap(func, repeat(args, times))
>>> def pairwise(iterable):
... "s -> (s0,s1), (s1,s2), (s2, s3), ..."
... a, b = tee(iterable)
... for elem in b:
... break
... return izip(a, b)
>>> def grouper(n, iterable, fillvalue=None):
... "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
... args = [iter(iterable)] * n
... return izip_longest(fillvalue=fillvalue, *args)
>>> def roundrobin(*iterables):
... "roundrobin('ABC', 'D', 'EF') --> A D E B F C"
... # Recipe credited to George Sakkis
... pending = len(iterables)
... nexts = cycle(iter(it).next for it in iterables)
... while pending:
... try:
... for next in nexts:
... yield next()
... except StopIteration:
... pending -= 1
... nexts = cycle(islice(nexts, pending))
>>> def powerset(iterable):
... "powerset('ab') --> set([]), set(['a']), set(['b']), set(['a', 'b'])"
... # Recipe credited to Eric Raymond
... pairs = [(2**i, x) for i, x in enumerate(iterable)]
... for n in xrange(2**len(pairs)):
... yield set(x for m, x in pairs if m&n)
>>> def compress(data, selectors):
... "compress('ABCDEF', [1,0,1,0,1,1]) --> A C E F"
... return (d for d, s in izip(data, selectors) if s)
>>> def combinations_with_replacement(iterable, r):
... "combinations_with_replacement('ABC', 3) --> AA AB AC BB BC CC"
... pool = tuple(iterable)
... n = len(pool)
... indices = [0] * r
... yield tuple(pool[i] for i in indices)
... while 1:
... for i in reversed(range(r)):
... if indices[i] != n - 1:
... break
... else:
... return
... indices[i:] = [indices[i] + 1] * (r - i)
... yield tuple(pool[i] for i in indices)
This is not part of the examples but it tests to make sure the definitions
perform as purported.
>>> take(10, count())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> list(enumerate('abc'))
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> list(islice(tabulate(lambda x: 2*x), 4))
[0, 2, 4, 6]
>>> nth('abcde', 3)
['d']
>>> quantify(xrange(99), lambda x: x%2==0)
50
>>> a = [[1, 2, 3], [4, 5, 6]]
>>> flatten(a)
[1, 2, 3, 4, 5, 6]
>>> list(repeatfunc(pow, 5, 2, 3))
[8, 8, 8, 8, 8]
>>> import random
>>> take(5, imap(int, repeatfunc(random.random)))
[0, 0, 0, 0, 0]
>>> list(pairwise('abcd'))
[('a', 'b'), ('b', 'c'), ('c', 'd')]
>>> list(pairwise([]))
[]
>>> list(pairwise('a'))
[]
>>> list(islice(padnone('abc'), 0, 6))
['a', 'b', 'c', None, None, None]
>>> list(ncycles('abc', 3))
['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c']
>>> dotproduct([1,2,3], [4,5,6])
32
>>> list(grouper(3, 'abcdefg', 'x'))
[('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'x', 'x')]
>>> list(roundrobin('abc', 'd', 'ef'))
['a', 'd', 'e', 'b', 'f', 'c']
>>> map(sorted, powerset('ab'))
[[], ['a'], ['b'], ['a', 'b']]
>>> list(compress('abcdef', [1,0,1,0,1,1]))
['a', 'c', 'e', 'f']
>>> list(combinations_with_replacement('abc', 2))
[('a', 'a'), ('a', 'b'), ('a', 'c'), ('b', 'b'), ('b', 'c'), ('c', 'c')]
"""
__test__ = {'libreftest' : libreftest}
def test_main(verbose=None):
test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC,
RegressionTests, LengthTransparency,
SubclassWithKwargsTest, TestExamples)
test_support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
# doctest the examples in the library reference
test_support.run_doctest(sys.modules[__name__], verbose)
if __name__ == "__main__":
test_main(verbose=True)
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.6/Lib/test/test_itertools.py
|
Python
|
mit
| 52,962
|
[
"GULP"
] |
83ac248e5ca0bc6fd05f497cba63d7d6e8aa5e3a14da72d51031e735feaf4020
|
#!/usr/bin/env python
#29-11-2016
#Authors:Sebastian ECHEVERRI RESTREPO,
# sebastian.echeverri.restrepo@skf.com, sebastianecheverrir@gmail.com
# James EWEN
# j.ewen14@imperial.ac.uk, jimmyewen@gmail.com
#################################################################################3
# This file adds the information needed to define the EAM interaction
# for the Fe atoms in the lammps input files
#################################################################################3
import math
def AddEAM():
class PairCoeff:
def __init__(self,Type1,Type2,Epsilon,Sigma):
self.Type1 = Type1
self.Type2 = Type2
self.Epsilon = Epsilon
self.Sigma = Sigma
##################################################33
#Reading the file lopls.in.settings
f=open('lopls.in.settings',"r")
lines = f.readlines()
PairCoeffs = []
OtherCoeffs = []
i = 0
for line in lines:
if line.find("pair_coeff") != -1:
line=line.split()
PairCoeffs.append(PairCoeff(int(line[1]), int(line[2]), float(line[4]), float(line[5])))
#print PairCoeffs[i].Type1, PairCoeffs[i].Type2, PairCoeffs[i].Epsilon, PairCoeffs[i].Sigma
i += 1
else :
OtherCoeffs.append(line)
f.close()
#using gemetric combination rules to calculate the interaction between dissimilar atoms
Ntypes = len(PairCoeffs)
for i in range(0, Ntypes):
for j in range(i, Ntypes):
if i != j:
type1 = i + 1
type2 = j + 1
Epsilon = math.sqrt(PairCoeffs[i].Epsilon*PairCoeffs[j].Epsilon)
Sigma = math.sqrt(PairCoeffs[i].Sigma*PairCoeffs[j].Sigma)
PairCoeffs.append(PairCoeff(type1,type2,Epsilon,Sigma))
#Printing new coefficients
f = open('lopls.in.settings','w')
f.write("pair_coeff * * eam/fs Fe_mm.eam.fs ")
for i in range(0, Ntypes-1):
f.write("NULL ")
f.write("Fe \n")
for i in range(0, len(PairCoeffs)):
if ((PairCoeffs[i].Type1 !=Ntypes) or (PairCoeffs[i].Type2 !=Ntypes)) :
f.write("pair_coeff "+str(PairCoeffs[i].Type1) + " " +str(PairCoeffs[i].Type2) + " lj/cut/coul/long " + str(PairCoeffs[i].Epsilon) + " " + str(PairCoeffs[i].Sigma) + "\n")
for i in range(0, len(OtherCoeffs)):
f.write(OtherCoeffs[i])
f.close()
############################################
#adding eam/fs to the line pair_style hybrid lj/cut/coul/long 10.0 10.0
f=open('lopls.in.init',"r")
lines = f.readlines()
LinesOut = []
for line in lines:
if line.find("pair_style") != -1:
LinesOut.append("pair_style hybrid lj/cut/coul/long 10.0 10.0 eam/fs \n")
else :
LinesOut.append(line)
f.close()
f=open('lopls.in.init',"w")
for i in range(0, len(LinesOut)):
f.write(LinesOut[i])
|
JE1314/LAMMPS_builder
|
root/AddEAM.py
|
Python
|
gpl-3.0
| 2,700
|
[
"LAMMPS"
] |
81085b8db0b22d5c8cc31b65f78abbbb07281477aa7182604e3483942a4bfdb6
|
#!/usr/bin/python
# File: Psycollider.py
# Copyright (c) Benjamin Golinvaux
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
#
# Originally started by
# Benjamin Golinvaux
# benjamin.golinvaux@euresys.com
# messenger: bgolinvaux@hotmail.com
#
# currently maintained by:
# Christopher Frauenberger - frauenberger@iem.at
# John Glover - glover.john@gmail.com
# Martin Victory - martin.victory@gmail.com
#
# Latest changes:
# Nov 2009 (Martin)
# - Help file navigation
# - Find and Replace dialogs
# - Text Formatting
# - About dialog box
# Jan 2008 (John)
# - SDI model
# - Remembers the size and position of the post window
# - Allows you to save the contents of the post window (file > save/save as)
# - Automatically stops sc server and swing osc when closing the main (post) window
# - Allows you to change the default window size
# - Double clicking on brackets selects the block of text they surround (update by Christopher)
# - Like the GEdit SC plugin (linux), you can execute a block of text surrounded by round brackets
# by placing the cursor at the opening bracket and pressing evaluate (ctrl + enter).
# This only happens if the open bracket is the first character on the line (not including white space)
# - Disabled word wrap in the text editor
# - Can toggle displaying of line numbers on/off in code editor
# (effects all code windows and is saved to config)
# - added ability to clear the recent file list (file history)
# - added the option to set the tab size in code windows (saved to config)
#
# ---------------------------------------------------------------------
import PySCLang
import wx
import wx.stc as stc
import wx.html as html
import wx.richtext as richtext
import os, string, keyword, sys, time
if wx.Platform == '__WXMSW__':
faces = { 'times': 'Times New Roman', 'mono' : 'Courier New', 'helv' : 'Arial', 'other': 'Comic Sans MS', 'size' : 10, 'size2': 8, }
gAppHelpFolder = 'help_windows'
else:
faces = { 'times': 'Times', 'mono' : 'Courier', 'helv' : 'Helvetica', 'other': 'new century schoolbook', 'size' : 10, 'size2': 8, }
gAppHelpFolder = 'Help-windows'
gHelpFolder = 'Help'
gUserExtensionFolder = os.path.join(os.path.expanduser("~"), "SuperCollider\\Extensions")
MAX_HISTORY_FILES = 9
DEFAULT_SIZEX = 500
DEFAULT_SIZEY = 300
DEFAULT_POSX = 100
DEFAULT_POSY = 100
#----------------------------------------------------------------------
# set SC3_KEYWORDS as a global variable.
try:
file = open("keywords.list","r")
SC3_KEYWORDS = string.split( file.read() )
file.close()
except IOError:
SC3_KEYWORDS = [ "var", "arg", "Server" ]
print "warning:"
print "SC3-keywords definition file \"keywords.list\" was not found."
print "so now, these following words are the KEYWORDS for the meantime."
print SC3_KEYWORDS
SC3_KEYWORDS.sort()
# ---------------------------------------------------------------------
# PsycolliderWindow
#
# Base class for all windows
# - creates the default menus
# - asks to save a modified file when closing
# - adds file history
# - holds an ID number for each window, for PsycolliderDocument to refer to self
nextPsycolliderWindowId = 1
class PsycolliderWindow(wx.Frame):
config = None # wx.FileConfig object
menubar = None # wx.MenuBar object
fileMenu = None # file menu (wx.Menu object)
editMenu = None # edit menu (wx.Menu)
langMenu = None # lang menu (wx.Menu)
optionsMenu = None # options menu (wx.Menu)
helpMenu = None # help menu (wx.Menu)
title = "" # the window title
isModified = False # whether or not window contents have been modified
filePath = "" # path to file being displayed
windowId = -99
def __init__(self, parent, id, title="", winStyle=wx.DEFAULT_FRAME_STYLE):
self.title = title
self.config = wx.GetApp().config
wx.Frame.__init__(self, parent, id, title, style=winStyle)
global nextPsycolliderWindowId
windowId = nextPsycolliderWindowId
nextPsycolliderWindowId = nextPsycolliderWindowId + 1
#sys.stdout.write("windowId in pythonland is ")
#sys.stdout.write(str(windowId))
#sys.stdout.write("\n")
self.config.SetPath("/WindowSettings")
sizeX = self.config.ReadInt('DefaultSizeX', DEFAULT_SIZEX)
sizeY = self.config.ReadInt('DefaultSizeY', DEFAULT_SIZEY)
self.SetSize(wx.Size(sizeX, sizeY))
self.CreateMenuBar()
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
# file menu actions
def OnCloseWindow(self, event):
if not self.CanCloseWindow():
event.Veto()
wx.GetApp().fileHistory.RemoveMenu(self.openRecentMenu)
wx.GetApp().ClosedWindow(self)
self.Destroy()
def OnNewCodeWin(self, event):
wx.GetApp().NewCodeWindow()
def OnHtmlToCode(self, event):
wx.GetApp().HtmlToCode(self)
def OnOpenFile(self, event):
wx.GetApp().OpenFile()
def OnSaveFile(self, event):
self.SaveFile()
def OnSaveFileAs(self, event):
self.SaveFileAs()
# edit menu actions - need to be overriden by inheriting class
def OnUndo(self, event):
pass
def OnRedo(self, event):
pass
def OnCut(self, event):
pass
def OnCopy(self, event):
pass
def OnPaste(self, event):
pass
def OnDelete(self, event):
pass
def OnSelectAll(self, event):
pass
def OnShowFind(self, event):
findData = wx.FindReplaceData()
findDialog = wx.FindReplaceDialog(self, findData, "Find")
findDialog.findData = findData
findDialog.Show(True)
def OnShowFindReplace(self, event):
findReplaceData = wx.FindReplaceData()
findReplaceDialog = wx.FindReplaceDialog(self, findReplaceData, "Find & Replace", wx.FR_REPLACEDIALOG)
findReplaceDialog.findReplaceData = findReplaceData
findReplaceDialog.Show(True)
def OnFindClose(self, event):
event.GetDialog().Destroy()
def OnFind(self, event):
pass
# lang menu actions
def OnStopServer(self, event):
wx.GetApp().StopServer()
def OnRun(self, event):
wx.GetApp().Run()
def OnStop(self, event):
wx.GetApp().Stop()
def OnCompileLibrary(self, event):
wx.GetApp().CompileLibrary()
def OnOpenClassDef(self, event):
self.OpenClassDef()
def OnImpOf(self, event):
self.ImpOf()
def OnRefsTo(self, event):
self.RefsTo()
def OnEval(self, event):
self.Eval()
def OnClearPostWindow(self, event):
wx.GetApp().ClearPostWindow()
#help menu actions
def OnScHelp(self, event):
wx.GetApp().GoToHelpFile(self.GetSelectedTextOrLine())
def OnBrowseHelp(self, event):
wx.GetApp().Eval("Help.gui")
def OnBrowseClasses(self, event):
wx.GetApp().Eval("Help.browse")
def OnAbout(self, event):
description = 'a programming language and engine for real time audio synthesis.'
info = wx.AboutDialogInfo()
info.SetIcon(wx.Icon('Help/GUI/Cocoa-GUI/SCImage/icon.supercollider.png', wx.BITMAP_TYPE_PNG))
info.SetName('SuperCollider')
info.SetVersion('3.3.2')
info.SetDescription(description)
info.SetWebSite('http://supercollider.sourceforge.net')
wx.AboutBox(info)
#options menu actions
def OnSetDefaultWindowSize(self, event):
size = self.GetSize()
wx.GetApp().SetDefaultWindowSize(size.x, size.y)
def OnClearRecentFileList(self, event):
wx.GetApp().ClearRecentFileList()
# should be overwritten by inheriting classes
def SaveFile(self):
wx.MessageBox("Error: Saving not implemented for this type of window")
# should be overwritten by inheriting classes
def SaveFileAs(self):
wx.MessageBox("Error: Saving not implemented for this type of window")
def OpenClassDef(self):
wx.GetApp().OpenClassDef(self.GetSelectedTextOrLine())
def ImpOf(self):
wx.GetApp().ImpOf(self.GetSelectedTextOrLine())
def RefsTo(self):
wx.GetApp().RefsTo(self.GetSelectedTextOrLine())
def Eval(self):
wx.GetApp().Eval(self.GetSelectedTextOrLine())
self.LineDown()
# needs to be overwritten by inheriting classes
def LineDown(self):
pass
# should be overwritten by inheriting classes
def GetSelectedTextOrLine(self):
return ""
def CanCloseWindow(self):
if self.isModified:
if self.filePath == "":
dlg = wx.MessageDialog(self,"Do you want to save %s ? " % self.title,"SuperCollider",wx.CANCEL | wx.YES_NO)
reply = dlg.ShowModal()
if reply == wx.ID_YES:
self.SaveFileAs()
return True
elif reply == wx.ID_NO:
return True
elif reply == wx.ID_CANCEL:
return False
else:
dlg = wx.MessageDialog(self,"Do you want to save %s ?" % self.filePath,"SuperCollider",wx.CANCEL | wx.YES_NO)
reply = dlg.ShowModal()
if reply == wx.ID_YES:
self.SaveFile()
return True
elif reply == wx.ID_NO:
return True
elif reply == wx.ID_CANCEL:
return False
else:
return True
def CreateMenuBar(self):
self.fileMenu = wx.Menu()
self.openRecentMenu = wx.Menu()
self.editMenu = wx.Menu()
self.langMenu = wx.Menu()
self.optionsMenu = wx.Menu()
self.helpMenu = wx.Menu()
self.menubar = wx.MenuBar()
self.menubar.Append(self.fileMenu, "&File")
self.menubar.Append(self.editMenu, "&Edit")
self.menubar.Append(self.langMenu, "&Lang")
self.menubar.Append(self.optionsMenu, "&Options")
self.menubar.Append(self.helpMenu, "&Help")
self.SetMenuBar(self.menubar)
self.newCodeWin = wx.MenuItem(self.fileMenu, -1, '&New\tCtrl+N')
self.htmlToCode = wx.MenuItem(self.fileMenu, -1, 'H&TML to Code\tCtrl+T')
self.openFile = wx.MenuItem(self.fileMenu, -1, '&Open...\tCtrl+O')
self.saveFile = wx.MenuItem(self.fileMenu, -1, '&Save\tCtrl+S')
self.saveFileAs = wx.MenuItem(self.fileMenu, -1, 'Save &As...\tCtrl+Shift+S')
self.closeWindow = wx.MenuItem(self.fileMenu, -1, 'Close\tCtrl+W')
self.undo = wx.MenuItem(self.editMenu, -1, '&Undo\tCtrl+Z')
self.redo = wx.MenuItem(self.editMenu, -1, '&Redo\tCtrl+Y')
self.cut = wx.MenuItem(self.editMenu, -1, '&Cut\tCtrl+X')
self.copy = wx.MenuItem(self.editMenu, -1, 'C&opy\tCtrl+C')
self.paste = wx.MenuItem(self.editMenu, -1, '&Paste\tCtrl+V')
self.delete = wx.MenuItem(self.editMenu, -1, '&Delete\tDel')
self.selectAll = wx.MenuItem(self.editMenu, -1, '&Select All\tCtrl+A')
self.find = wx.MenuItem(self.editMenu, -1, '&Find\tCtrl+F')
self.replace = wx.MenuItem(self.editMenu, -1, '&Replace\tCtrl+H')
self.stopServer = wx.MenuItem(self.langMenu, -1, 'Stop Server')
self.run = wx.MenuItem(self.langMenu, -1, 'Run\tAlt+R')
self.stop = wx.MenuItem(self.langMenu, -1, '&Stop\tAlt+.')
self.compileLibrary = wx.MenuItem(self.langMenu, -1, 'Compile Library\tAlt+K')
self.openClassDef = wx.MenuItem(self.langMenu, -1, 'Open Class Def\tAlt+J')
self.impOf = wx.MenuItem(self.langMenu, -1, 'Implementations of\tAlt+Y')
self.refsTo = wx.MenuItem(self.langMenu, -1, 'References to\tShift+Alt+Y')
self.eval = wx.MenuItem(self.langMenu, -1, '&Evaluate Selection\tCtrl+Enter')
self.clearPostWindow = wx.MenuItem(self.langMenu, -1, '&Clear Post Window\tAlt+P')
self.setDefaultWindowSize = wx.MenuItem(self.optionsMenu, -1, '&Set This Window Size As Default')
self.clearRecentFileList = wx.MenuItem(self.optionsMenu, -1, '&Clear Recent File List')
self.scHelp = wx.MenuItem(self.helpMenu, -1, '&SuperCollider Help\tF1')
self.helpBrowser = wx.MenuItem(self.helpMenu, -1, '&Browse and Search Documentation\t')
self.classBrowser = wx.MenuItem(self.helpMenu, -1, '&Class Browser\t')
self.about = wx.MenuItem(self.helpMenu, -1, 'About\t')
self.fileMenu.AppendItem(self.newCodeWin)
self.fileMenu.AppendItem(self.openFile)
self.fileMenu.AppendSubMenu(self.openRecentMenu, "Open Recent")
self.fileMenu.AppendSeparator()
self.fileMenu.AppendItem(self.saveFile)
self.fileMenu.AppendItem(self.saveFileAs)
self.fileMenu.AppendItem(self.htmlToCode)
self.fileMenu.AppendSeparator()
self.fileMenu.AppendItem(self.closeWindow)
self.editMenu.AppendItem(self.undo)
self.editMenu.AppendItem(self.redo)
self.editMenu.AppendSeparator()
self.editMenu.AppendItem(self.cut)
self.editMenu.AppendItem(self.copy)
self.editMenu.AppendItem(self.paste)
self.editMenu.AppendItem(self.delete)
self.editMenu.AppendItem(self.selectAll)
self.editMenu.AppendSeparator()
self.editMenu.AppendItem(self.find)
self.editMenu.AppendItem(self.replace)
self.langMenu.AppendItem(self.stopServer)
self.langMenu.AppendItem(self.run)
self.langMenu.AppendItem(self.stop)
self.langMenu.AppendItem(self.compileLibrary)
self.langMenu.AppendItem(self.openClassDef)
self.langMenu.AppendItem(self.impOf)
self.langMenu.AppendItem(self.refsTo)
self.langMenu.AppendItem(self.eval)
self.langMenu.AppendItem(self.clearPostWindow)
self.optionsMenu.AppendItem(self.setDefaultWindowSize)
self.optionsMenu.AppendItem(self.clearRecentFileList)
self.helpMenu.AppendItem(self.scHelp)
self.helpMenu.AppendItem(self.helpBrowser)
self.helpMenu.AppendItem(self.classBrowser)
self.helpMenu.AppendSeparator()
self.helpMenu.AppendItem(self.about)
self.Bind(wx.EVT_MENU, self.OnNewCodeWin, id=self.newCodeWin.GetId())
self.Bind(wx.EVT_MENU, self.OnHtmlToCode, id=self.htmlToCode.GetId())
self.Bind(wx.EVT_MENU, self.OnOpenFile, id=self.openFile.GetId())
self.Bind(wx.EVT_MENU, self.OnSaveFile, id=self.saveFile.GetId())
self.Bind(wx.EVT_MENU, self.OnSaveFileAs, id=self.saveFileAs.GetId())
self.Bind(wx.EVT_MENU, self.OnCloseWindow, id=self.closeWindow.GetId())
self.Bind(wx.EVT_MENU, self.OnUndo, id=self.undo.GetId())
self.Bind(wx.EVT_MENU, self.OnRedo, id=self.redo.GetId())
self.Bind(wx.EVT_MENU, self.OnCut, id=self.cut.GetId())
self.Bind(wx.EVT_MENU, self.OnCopy, id=self.copy.GetId())
self.Bind(wx.EVT_MENU, self.OnPaste, id=self.paste.GetId())
self.Bind(wx.EVT_MENU, self.OnDelete, id=self.delete.GetId())
self.Bind(wx.EVT_MENU, self.OnSelectAll, id=self.selectAll.GetId())
self.Bind(wx.EVT_MENU, self.OnShowFind, id=self.find.GetId())
self.Bind(wx.EVT_MENU, self.OnShowFindReplace, id=self.replace.GetId())
self.Bind(wx.EVT_FIND, self.OnFind)
self.Bind(wx.EVT_FIND_NEXT, self.OnFind)
self.Bind(wx.EVT_FIND_REPLACE, self.OnFind)
self.Bind(wx.EVT_FIND_REPLACE_ALL, self.OnFind)
self.Bind(wx.EVT_FIND_CLOSE, self.OnFindClose)
self.Bind(wx.EVT_MENU, self.OnStopServer, id=self.stopServer.GetId())
self.Bind(wx.EVT_MENU, self.OnRun, id=self.run.GetId())
self.Bind(wx.EVT_MENU, self.OnStop, id=self.stop.GetId())
self.Bind(wx.EVT_MENU, self.OnCompileLibrary, id=self.compileLibrary.GetId())
self.Bind(wx.EVT_MENU, self.OnOpenClassDef, id=self.openClassDef.GetId())
self.Bind(wx.EVT_MENU, self.OnImpOf, id=self.impOf.GetId())
self.Bind(wx.EVT_MENU, self.OnRefsTo, id=self.refsTo.GetId())
self.Bind(wx.EVT_MENU, self.OnEval, id=self.eval.GetId())
self.Bind(wx.EVT_MENU, self.OnClearPostWindow, id=self.clearPostWindow.GetId())
self.Bind(wx.EVT_MENU, self.OnSetDefaultWindowSize, id=self.setDefaultWindowSize.GetId())
self.Bind(wx.EVT_MENU, self.OnClearRecentFileList, id=self.clearRecentFileList.GetId())
self.Bind(wx.EVT_MENU, self.OnScHelp, id=self.scHelp.GetId())
self.Bind(wx.EVT_MENU, self.OnBrowseHelp, id=self.helpBrowser.GetId())
self.Bind(wx.EVT_MENU, self.OnBrowseClasses, id=self.classBrowser.GetId())
self.Bind(wx.EVT_MENU, self.OnAbout, id=self.about.GetId())
wx.GetApp().fileHistory.UseMenu(self.openRecentMenu)
wx.GetApp().fileHistory.AddFilesToThisMenu(self.openRecentMenu)
self.Bind(wx.EVT_MENU_RANGE, wx.GetApp().doFileHistory, id=wx.ID_FILE1, id2=wx.ID_FILE9)
# ---------------------------------------------------------------------
# PsycolliderCodeSubWin - plain text window for code
class PsycolliderCodeSubWin(wx.stc.StyledTextCtrl):
fold_symbols = 3
def __init__ (self,parent):
stc.StyledTextCtrl.__init__(self,parent)
self.SetModEventMask(wx.stc.STC_MOD_INSERTTEXT | wx.stc.STC_MOD_DELETETEXT | wx.stc.STC_PERFORMED_USER)
# bindings
self.Bind(stc.EVT_STC_CHANGE, self.OnStcChange)
self.Bind(stc.EVT_STC_UPDATEUI, self.OnUpdateUI)
self.Bind(stc.EVT_STC_MARGINCLICK, self.OnMarginClick)
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyPressed)
self.Bind(wx.EVT_CHAR, self.OnChar) # this hack is to enable the alt+. shortcut
# to stop playback, which doesn't seem to work otherwise
# bug in wx?
self.Bind(wx.EVT_LEFT_DCLICK, self.OnDoubleClick)
self.SetLexer(wx.stc.STC_LEX_CPP) # yssr
self.SetKeyWords(0, " ".join(SC3_KEYWORDS)) # yssr
self.SetProperty("fold", "1")
self.SetProperty("tab.timmy.whinge.level", "1")
self.SetMargins(1,0) # yssr
# set end-of-line character to LF
self.SetEOLMode(wx.stc.STC_EOL_LF);
# some settings for appearance
self.SetViewWhiteSpace(False)
self.SetViewEOL(False)
self.SetUseAntiAliasing(True)
self.SetWrapMode(False)
self.SetEdgeMode(stc.STC_EDGE_NONE)
# Setup a margin to hold fold markers
self.SetMarginType(2, stc.STC_MARGIN_SYMBOL)
self.SetMarginMask(2, stc.STC_MASK_FOLDERS)
self.SetMarginSensitive(2, True)
self.SetMarginWidth(2, 12)
# Like a flattened tree control using square headers
self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPEN, stc.STC_MARK_BOXMINUS, "white", "#808080")
self.MarkerDefine(stc.STC_MARKNUM_FOLDER, stc.STC_MARK_BOXPLUS, "white", "#808080")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERSUB, stc.STC_MARK_VLINE, "white", "#808080")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERTAIL, stc.STC_MARK_LCORNER, "white", "#808080")
self.MarkerDefine(stc.STC_MARKNUM_FOLDEREND, stc.STC_MARK_BOXPLUSCONNECTED, "white", "#808080")
self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPENMID, stc.STC_MARK_BOXMINUSCONNECTED, "white", "#808080")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERMIDTAIL, stc.STC_MARK_TCORNER, "white", "#808080")
def OnChar(self, event):
if event.GetKeyCode() == 0x2e and event.AltDown():
self.GetParent().OnStop(None)
else:
event.Skip()
def OnStcChange(self, event):
self.GetParent().OnStcChange(event)
def OnKeyPressed(self, event):
#if self.CallTipActive():
# self.CallTipCancel()
key = event.GetKeyCode()
if key == 32 and event.ControlDown():
pos = self.GetCurrentPos()
# Tips
if event.ShiftDown():
self.CallTipSetBackground("yellow")
self.CallTipShow(pos, 'we can display tips here')
# Code completion
else:
#lst = []
#for x in range(50000):
# lst.append('%05d' % x)
#st = " ".join(lst)
#print len(st)
#self.AutoCompShow(0, st)
kw = keyword.kwlist[:]
#kw.append("zzzzzz?2")
kw.sort() # Python sorts are case sensitive
self.AutoCompSetIgnoreCase(False) # so this needs to match
# Images are specified with a appended "?type"
for i in range(len(kw)):
if kw[i] in keyword.kwlist:
kw[i] = kw[i] + "?1"
self.AutoCompShow(0, " ".join(kw))
else:
event.Skip()
def OnDoubleClick(self, evt):
braceAtCaret, braceOpposite = self.CheckForMatchingBraces()
if braceAtCaret != -1 and braceOpposite != -1:
if braceAtCaret < braceOpposite:
self.SetSelection(braceAtCaret+1, braceOpposite)
else:
self.SetSelection(braceOpposite+1, braceAtCaret)
else:
evt.Skip()
def OnUpdateUI(self, evt):
braceAtCaret, braceOpposite = self.CheckForMatchingBraces()
if braceAtCaret != -1 and braceOpposite == -1:
self.BraceBadLight(braceAtCaret)
else:
self.BraceHighlight(braceAtCaret, braceOpposite)
def OnMarginClick(self, evt):
# fold and unfold as needed
if evt.GetMargin() == 2:
if evt.GetShift() and evt.GetControl():
self.FoldAll()
else:
lineClicked = self.LineFromPosition(evt.GetPosition())
if self.GetFoldLevel(lineClicked) & stc.STC_FOLDLEVELHEADERFLAG:
if evt.GetShift():
self.SetFoldExpanded(lineClicked, True)
self.Expand(lineClicked, True, True, 1)
elif evt.GetControl():
if self.GetFoldExpanded(lineClicked):
self.SetFoldExpanded(lineClicked, False)
self.Expand(lineClicked, False, True, 0)
else:
self.SetFoldExpanded(lineClicked, True)
self.Expand(lineClicked, True, True, 100)
else:
self.ToggleFold(lineClicked)
def CheckForMatchingBraces(self):
braceAtCaret = -1
braceOpposite = -1
charAfter = None
charBefore = None
caretPos = self.GetCurrentPos()
if caretPos > 0:
charBefore = self.GetCharAt(caretPos - 1)
styleBefore = self.GetStyleAt(caretPos - 1)
# check before
if charBefore and chr(charBefore) in "[]{}()" and styleBefore == stc.STC_C_OPERATOR:
braceAtCaret = caretPos - 1
# check after
if braceAtCaret < 0:
charAfter = self.GetCharAt(caretPos)
styleAfter = self.GetStyleAt(caretPos)
if charAfter and chr(charAfter) in "[]{}()" and styleAfter == stc.STC_C_OPERATOR:
braceAtCaret = caretPos
if braceAtCaret >= 0:
braceOpposite = self.BraceMatch(braceAtCaret)
return braceAtCaret, braceOpposite
def FoldAll(self):
lineCount = self.GetLineCount()
expanding = True
# find out if we are folding or unfolding
for lineNum in range(lineCount):
if self.GetFoldLevel(lineNum) & stc.STC_FOLDLEVELHEADERFLAG:
expanding = not self.GetFoldExpanded(lineNum)
break;
lineNum = 0
while lineNum < lineCount:
level = self.GetFoldLevel(lineNum)
if level & stc.STC_FOLDLEVELHEADERFLAG and \
(level & stc.STC_FOLDLEVELNUMBERMASK) == stc.STC_FOLDLEVELBASE:
if expanding:
self.SetFoldExpanded(lineNum, True)
lineNum = self.Expand(lineNum, True)
lineNum = lineNum - 1
else:
lastChild = self.GetLastChild(lineNum, -1)
self.SetFoldExpanded(lineNum, False)
if lastChild > lineNum:
self.HideLines(lineNum+1, lastChild)
lineNum = lineNum + 1
def Expand(self, line, doExpand, force=False, visLevels=0, level=-1):
lastChild = self.GetLastChild(line, level)
line = line + 1
while line <= lastChild:
if force:
if visLevels > 0:
self.ShowLines(line, line)
else:
self.HideLines(line, line)
else:
if doExpand:
self.ShowLines(line, line)
if level == -1:
level = self.GetFoldLevel(line)
if level & stc.STC_FOLDLEVELHEADERFLAG:
if force:
if visLevels > 1:
self.SetFoldExpanded(line, True)
else:
self.SetFoldExpanded(line, False)
line = self.Expand(line, doExpand, force, visLevels-1)
else:
if doExpand and self.GetFoldExpanded(line):
line = self.Expand(line, True, force, visLevels-1)
else:
line = self.Expand(line, False, force, visLevels-1)
else:
line = line + 1;
return line
def SetShowLineNumbers(self, value):
if value:
self.SetMarginType(2, stc.STC_MARGIN_NUMBER)
else:
self.SetMarginType(2, stc.STC_MARGIN_SYMBOL)
def GetTabSize(self):
return self.GetTabWidth()
def SetTabSize(self, tabSize):
self.SetTabWidth(tabSize)
def GetText(self):
# append an extra space, as GetTextUTF8() seems to remove the last character, wx bug?
self.AppendTextUTF8(" ")
return self.GetTextUTF8()
# ---------------------------------------------------------------------
# Code Window
# accomodates the code sub window
class PsycolliderCodeWin(PsycolliderWindow):
SHOW_LINE_NUMBERS = False # default
TAB_SIZE = 8 # default tab size
def __init__ (self, parent, id, title, pos=wx.DefaultPosition, size=wx.DefaultSize):
PsycolliderWindow.__init__(self, parent, id, title)
self.fileMenu.Remove(self.htmlToCode.GetId()) # Remove unnecessary menu item
self.codeSubWin = PsycolliderCodeSubWin(self)
# this will be our default font
font = wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL, False, "Courier New")
self.fontSize = font.GetPointSize()
self.fontFace = font.GetFaceName()
self.ChangeFont(self.fontFace, self.fontSize)
# line numbers
self.config.SetPath("/CodeWindowOptions")
self.showLineNumbers.Check(self.config.ReadInt('ShowLineNumbers', self.SHOW_LINE_NUMBERS))
self.codeSubWin.SetShowLineNumbers(self.showLineNumbers.IsChecked())
# tab size
self.codeSubWin.SetTabSize(self.config.ReadInt('TabSize', self.TAB_SIZE))
def OnStcChange(self, event):
if not self.isModified:
self.SetTitle(self.GetTitle() + "*")
self.isModified = True
def OnShowLineNumbers(self, event):
for window in wx.GetApp().GetOpenWindows():
if type(window) == PsycolliderCodeWin:
window.SetShowLineNumbers(self.showLineNumbers.IsChecked())
self.config.SetPath("/CodeWindowOptions")
self.config.WriteInt('ShowLineNumbers', self.showLineNumbers.IsChecked())
def OnSetTabSize(self, event):
newTabSize = -1
getNewTabSize = wx.TextEntryDialog(self, 'Set tab size to:', 'Set Tab Size', str(self.TAB_SIZE))
getNewTabSize.SetValue(str(self.codeSubWin.GetTabSize()))
if getNewTabSize.ShowModal() == wx.ID_OK:
try:
newTabSize = int(getNewTabSize.GetValue())
if newTabSize <= 0:
raise
for window in wx.GetApp().GetOpenWindows():
if type(window) == PsycolliderCodeWin:
window.codeSubWin.SetTabSize(newTabSize)
self.config.SetPath("/CodeWindowOptions")
self.config.WriteInt('TabSize', newTabSize)
except:
WriteInLogWindow("Invalid tab size, ignoring. Please enter a positive integer\n")
return
getNewTabSize.Destroy()
def GetSelectedText(self):
return self.codeSubWin.GetSelectedTextUTF8()
def GetCurLineAsString(self):
return self.codeSubWin.GetCurLine()
def SetSubWinFocus(self):
self.codeSubWin.SetFocus()
def SelectRange(self,rangeStart,rangeSize):
self.codeSubWin.SetSelection(rangeStart,rangeStart+rangeSize)
def SetShowLineNumbers(self, value):
self.showLineNumbers.Check(value)
self.codeSubWin.SetShowLineNumbers(value)
def SaveFile(self):
if self.filePath == "":
self.SaveFileAs()
else:
try:
file = open(self.filePath, "w")
content = self.codeSubWin.GetText()
file.write(content)
self.SetTitle(self.filePath)
self.isModified = False
file.close()
except:
# Todo: better error handling? Just print error message for now
wx.MessageBox("Error: Could not save file " + self.filePath)
def SaveFileAs(self):
fileDlg = wx.FileDialog(self,style=wx.SAVE)
if fileDlg.ShowModal() == wx.ID_OK:
self.filePath = fileDlg.GetPath()
try:
file = open(self.filePath ,"w")
content = self.codeSubWin.GetText()
file.write(content)
file.close()
except:
# Todo: better error handling? Just print error message for now
wx.MessageBox("Error: Could not save file " + self.filePath)
return None
self.SetTitle(self.filePath)
self.isModified = False
wx.GetApp().AddFileToHistory(self.filePath)
def GetSelectedTextOrLine(self):
"""Returns selected text if any. If not, returns the current line"""
selection = str(self.codeSubWin.GetSelectedTextUTF8())
if selection == "":
# get current line, return if not at an open '(' bracket
currentLine = self.codeSubWin.GetLineUTF8(self.codeSubWin.GetCurrentLine())
selection = str(currentLine)
# see if the cursor is at a matched bracket
x, y = self.codeSubWin.CheckForMatchingBraces()
if x != -1 and y != -1:
if chr(self.codeSubWin.GetCharAt(x)) == "(":
# make sure the open bracket is the first character in the line
if currentLine.strip().find('(') == 0:
# get all text up to and including the closing bracket
selection = str(self.codeSubWin.GetTextRangeUTF8(x, y+1))
return selection
def LineDown(self):
self.codeSubWin.LineDown()
def OnOpenFonts(self, event):
data = wx.FontData()
data.EnableEffects(False)
dlg = wx.FontDialog(self, data)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetFontData()
font = data.GetChosenFont()
self.fontFace = font.GetFaceName()
self.fontSize = font.GetPointSize()
self.ChangeFont(self.fontFace, self.fontSize)
dlg.Destroy()
def ChangeFont(self, face, size):
self.codeSubWin.StyleClearAll() # Reset all to be like the default
self.codeSubWin.StyleSetSpec(stc.STC_STYLE_DEFAULT,
"face:%s, size:%d" % (face, size))
self.codeSubWin.StyleSetSpec(stc.STC_STYLE_LINENUMBER,
"back:#C0C0C0,face:%(helv)s,size:%(size2)d" % faces)
self.codeSubWin.StyleSetSpec(
stc.STC_STYLE_CONTROLCHAR, "face:%s, size:%d" % (face, size))
self.codeSubWin.StyleSetSpec(stc.STC_STYLE_BRACELIGHT,
"fore:#FFFFFF,back:#00FFFF,bold, face:%s, size:%d" % (face, size))
self.codeSubWin.StyleSetSpec(stc.STC_STYLE_BRACEBAD,
"fore:#000000,back:#FF3333,bold,face:%s, size:%d" % (face, size))
# Default face
self.codeSubWin.StyleSetSpec(stc.STC_C_DEFAULT,
"fore:#000000,face:%s,size:%d" % (face, size))
# Comments
self.codeSubWin.StyleSetSpec(stc.STC_C_COMMENTLINE,
"fore:#bf0000,face:%s,size:%d" % (face, size))
self.codeSubWin.StyleSetSpec(stc.STC_C_COMMENT,
"fore:#bf0000,face:%s,size:%d" % (face, size))
# Number
self.codeSubWin.StyleSetSpec(stc.STC_C_NUMBER,
"fore:#333333,face:%s,size:%d" % (face, size))
# String
self.codeSubWin.StyleSetSpec(stc.STC_C_STRING,
"italic,fore:#606060,face:%s,size:%d" % (face, size))
# Single quoted string
self.codeSubWin.StyleSetSpec(stc.STC_C_CHARACTER,
"fore:#007300,face:%s,size:%d" % (face, size))
# Keyword
self.codeSubWin.StyleSetSpec(stc.STC_C_WORD,
"fore:#0000bf,face:%s,size:%d" % (face, size))
self.codeSubWin.StyleSetSpec(stc.STC_C_WORD2,
"fore:#0000bf,face:%s,size:%d" % (face, size))
# Operators
self.codeSubWin.StyleSetSpec(stc.STC_C_OPERATOR,
"face:%s,size:%d" % (face, size))
# Identifiers
self.codeSubWin.StyleSetSpec(stc.STC_C_IDENTIFIER,
"fore:#000000,face:%s,size:%d" % (face, size))
self.codeSubWin.StyleSetSpec(stc.STC_C_UUID,
"fore:#000000,face:%s,size:%d" % (face, size))
# End of line where string is not closed
self.codeSubWin.StyleSetSpec(stc.STC_C_STRINGEOL,
"fore:#000000,face:%s,back:#ffffff,eol,size:%d" % (face, size))
self.codeSubWin.SetCaretForeground("BLACK")
def OnBiggerFont(self, event):
self.fontSize += 1
self.ChangeFont(self.fontFace, self.fontSize)
def OnSmallerFont(self, event):
self.fontSize -= 1
self.ChangeFont(self.fontFace, self.fontSize)
def CreateMenuBar(self):
PsycolliderWindow.CreateMenuBar(self)
self.showLineNumbers = wx.MenuItem(self.optionsMenu, -1, 'S&how Line Numbers', kind=wx.ITEM_CHECK)
self.setTabSize = wx.MenuItem(self.optionsMenu, -1, 'S&et Tab Size')
self.optionsMenu.AppendSeparator()
self.optionsMenu.AppendItem(self.showLineNumbers)
self.optionsMenu.AppendItem(self.setTabSize)
self.Bind(wx.EVT_MENU, self.OnShowLineNumbers, id=self.showLineNumbers.GetId())
self.Bind(wx.EVT_MENU, self.OnSetTabSize, id=self.setTabSize.GetId())
# format menu
formatMenu = wx.Menu()
self.menubar.Insert(4, formatMenu, "Fo&rmat")
self.openFonts = wx.MenuItem(formatMenu, -1, '&Show Fonts\tAlt+T')
self.biggerFont = wx.MenuItem(formatMenu, -1, '&Bigger Font\tCtrl++')
self.smallerFont = wx.MenuItem(formatMenu, -1, '&Smaller Font\tCtrl+-')
formatMenu.AppendItem(self.openFonts)
formatMenu.AppendSeparator()
formatMenu.AppendItem(self.biggerFont)
formatMenu.AppendItem(self.smallerFont)
self.Bind(wx.EVT_MENU, self.OnOpenFonts, id=self.openFonts.GetId())
self.Bind(wx.EVT_MENU, self.OnBiggerFont, id=self.biggerFont.GetId())
self.Bind(wx.EVT_MENU, self.OnSmallerFont, id=self.smallerFont.GetId())
# edit menu actions
def OnUndo(self, event):
self.codeSubWin.Undo()
def OnRedo(self, event):
self.codeSubWin.Redo()
def OnCut(self, event):
self.codeSubWin.Cut()
def OnCopy(self, event):
self.codeSubWin.Copy()
def OnPaste(self, event):
self.codeSubWin.Paste()
def OnDelete(self, event):
self.codeSubWin.Clear()
def OnSelectAll(self, event):
self.codeSubWin.SelectAll()
def OnFind(self, event):
map = {
wx.wxEVT_COMMAND_FIND : "FIND",
wx.wxEVT_COMMAND_FIND_NEXT : "FIND_NEXT",
wx.wxEVT_COMMAND_FIND_REPLACE : "REPLACE",
wx.wxEVT_COMMAND_FIND_REPLACE_ALL : "REPLACE_ALL",
}
et = event.GetEventType()
length = self.codeSubWin.GetTextLength()
str = event.GetFindString()
# find/find next
if et == wx.wxEVT_COMMAND_FIND or et == wx.wxEVT_COMMAND_FIND_NEXT:
flags = 0
if event.GetFlags() & wx.FR_MATCHCASE:
flags = flags | stc.STC_FIND_MATCHCASE
print "matching case"
if event.GetFlags() & wx.FR_WHOLEWORD:
flags = flags | stc.STC_FIND_WHOLEWORD
print "searching wholeword"
if event.GetFlags() & wx.FR_DOWN:
if et == wx.wxEVT_COMMAND_FIND_NEXT:
startPos = self.codeSubWin.GetCurrentPos()
endPos = self.codeSubWin.GetTextLength()
else:
startPos = 0
endPos = self.codeSubWin.GetTextLength()
else:
if et == wx.wxEVT_COMMAND_FIND_NEXT:
startPos = self.codeSubWin.GetCurrentPos()-1
endPos = 0
else:
startPos = self.codeSubWin.GetTextLength()
endPos = 0
pos = self.codeSubWin.FindText(startPos, endPos, str, flags)
if pos >= 0:
self.codeSubWin.SetSelection(pos, pos+len(str))
self.codeSubWin.EnsureCaretVisible()
else:
wx.MessageBox("Reached end of document", "Find", wx.ICON_EXCLAMATION | wx.OK, self.codeSubWin)
# replace
elif et == wx.wxEVT_COMMAND_FIND_REPLACE:
flags = 0
if event.GetFlags() & wx.FR_MATCHCASE:
flags = flags | stc.STC_FIND_MATCHCASE
if event.GetFlags() & wx.FR_WHOLEWORD:
flags = flags | stc.STC_FIND_WHOLEWORD
startPos = 0
endPos = self.codeSubWin.GetTextLength()
pos = self.codeSubWin.FindText(startPos, endPos, str, flags)
if pos >= 0:
self.codeSubWin.SetSelection(pos, pos+len(str))
self.codeSubWin.ReplaceSelection(event.GetReplaceString())
self.codeSubWin.EnsureCaretVisible()
else:
wx.MessageBox("Reached end of document", "Replace", wx.ICON_EXCLAMATION | wx.OK, self.codeSubWin)
# replace all
elif et == wx.wxEVT_COMMAND_FIND_REPLACE_ALL:
flags = 0
if event.GetFlags() & wx.FR_MATCHCASE:
flags = flags | stc.STC_FIND_MATCHCASE
if event.GetFlags() & wx.FR_WHOLEWORD:
flags = flags | stc.STC_FIND_WHOLEWORD
initPos = self.codeSubWin.GetCurrentPos()
startPos = 0
endPos = self.codeSubWin.GetTextLength()
numTokens = 0
pos = self.codeSubWin.FindText(startPos, endPos, str, flags)
while pos >= 0:
numTokens = numTokens+1
self.codeSubWin.SetSelection(pos, pos+len(str))
self.codeSubWin.ReplaceSelection(event.GetReplaceString())
self.codeSubWin.EnsureCaretVisible()
pos = self.codeSubWin.FindText(pos+len(str), endPos, str, flags)
self.codeSubWin.GotoPos(initPos)
wx.MessageBox("%d instance(s) replaced" % (numTokens), "Replace All", wx.ICON_EXCLAMATION | wx.OK, self.codeSubWin)
# ---------------------------------------------------------------------
# HTML Sub Window
#
class PsycolliderHTMLSubWin(wx.html.HtmlWindow):
def __init__ (self,parent):
wx.html.HtmlWindow.__init__(self,parent)
self.parent = parent
self.Bind(wx.EVT_CHAR, self.OnChar) # this hack is to enable the alt+. shortcut
self.Bind(html.EVT_HTML_LINK_CLICKED, self.OnClicked)
self.titles = [parent.GetTitle()]
self.titlePos = 0
def OnChar(self, event):
if event.GetKeyCode() == 0x2e and event.AltDown():
self.GetParent().OnStop(None)
elif event.GetKeyCode() == wx.WXK_LEFT and event.AltDown():
self.HistoryBack()
elif event.GetKeyCode() == wx.WXK_RIGHT and event.AltDown():
self.HistoryForward()
else:
event.Skip()
# this allows us to correctly set the title of the parent window
def OnClicked(self, event):
# clicking on a link effectively removes forward history
self.titles = self.titles[:self.titlePos+1]
info = event.GetLinkInfo()
href = info.GetHref()
self.LoadPage(href)
pageTitle = os.path.splitext(os.path.basename(href))[0]
self.parent.SetTitle(pageTitle)
self.titles.append(pageTitle)
self.titlePos += 1
def GoForward(self, event):
if self.HistoryCanForward():
self.HistoryForward()
self.titlePos += 1
self.parent.SetTitle(self.titles[self.titlePos])
def GoBack(self, event):
if self.HistoryCanBack():
self.HistoryBack()
self.titlePos -= 1
self.parent.SetTitle(self.titles[self.titlePos])
def GoHome(self, event):
filePath = os.path.join(gHelpFolder,"Help.html")
self.parent.SetTitle("Help")
self.LoadPage(filePath)
self.titles.append("Help")
self.titlePos += 1
# ---------------------------------------------------------------------
# HTML Window
# accomodates HTML sub window
class PsycolliderHTMLWin(PsycolliderWindow):
def __init__ (self,parent,id,title,pos=wx.DefaultPosition,size=wx.DefaultSize):
PsycolliderWindow.__init__(self, parent, id, title)
self.fileMenu.Remove(self.saveFile.GetId()) # Remove unnecessary menu items
self.fileMenu.Remove(self.saveFileAs.GetId())
self.htmlSubWin = PsycolliderHTMLSubWin(self)
# Remove edit menu
self.menubar.Remove(1)
# Add navigation menu to HTML windows
self.navMenu = wx.Menu()
self.menubar.Insert(3, self.navMenu, "&Navigation")
self.home = wx.MenuItem(self.navMenu, -1, '&Home')
self.forward = wx.MenuItem(self.navMenu, -1, '&Forward\tAlt+Right')
self.back = wx.MenuItem(self.navMenu, -1, '&Back\tAlt+Left')
self.navMenu.AppendItem(self.home)
self.navMenu.AppendItem(self.forward)
self.navMenu.AppendItem(self.back)
self.SetMenuBar(self.menubar)
self.Bind(wx.EVT_MENU, self.htmlSubWin.GoHome, id=self.home.GetId())
self.Bind(wx.EVT_MENU, self.htmlSubWin.GoForward, id=self.forward.GetId())
self.Bind(wx.EVT_MENU, self.htmlSubWin.GoBack, id=self.back.GetId())
def GetSelectedText(self):
return self.htmlSubWin.SelectionToText()
def GetCurLineAsString(self):
posInText = self.htmlSubWin.GetInsertionPoint()
(x,y) = self.htmlSubWin.PositionToXY(posInText)
return self.htmlSubWin.SelectLine(y)
def GetSelectedTextOrLine(self):
"""Returns selected text"""
return str(self.GetSelectedText())
def SetSubWinFocus(self):
self.htmlSubWin.SetFocus()
# ---------------------------------------------------------------------
# Psycollider Post Window
class PsycolliderPostWindow(PsycolliderWindow):
log = None # The wx.TextCtrl object that displays post info
def __init__(self, parent, id, title):
# init (no maximise button)
PsycolliderWindow.__init__(self, parent, id, title, wx.MINIMIZE_BOX | wx.CLOSE_BOX |wx.RESIZE_BORDER | wx.SYSTEM_MENU | wx.CAPTION)
self.config.SetPath("/WindowSettings")
sizeX = self.config.ReadInt('PostWindow-sizeX', DEFAULT_SIZEX)
sizeY = self.config.ReadInt('PostWindow-sizeY', DEFAULT_SIZEY)
posX = self.config.ReadInt('PostWindow-posX', DEFAULT_POSX)
posY = self.config.ReadInt('PostWindow-posY', DEFAULT_POSY)
# check that position is > 0
if posX < 0:
posX = DEFAULT_POSX
if posY < 0:
posY = DEFAULT_POSY
self.SetSize(wx.Size(sizeX, sizeY))
self.SetPosition(wx.Point(posX, posY))
self.fileMenu.Remove(self.htmlToCode.GetId()) # Remove unnecessary menu items
self.langMenu.Remove(self.run.GetId())
self.langMenu.Remove(self.stop.GetId())
self.langMenu.Remove(self.openClassDef.GetId())
self.langMenu.Remove(self.impOf.GetId())
self.langMenu.Remove(self.refsTo.GetId())
self.langMenu.Remove(self.eval.GetId())
self.menubar.Remove(1)
mainPanel = wx.Panel(self, -1)
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainPanel.SetSizer(mainSizer)
self.log = wx.TextCtrl(mainPanel, -1, style = wx.TE_MULTILINE | wx.TE_READONLY)
mainSizer.Add(self.log, proportion = 1, flag = wx.EXPAND)
self.Show(True)
def OnCloseWindow(self, event):
dlg = wx.MessageDialog(self, "This will shut down SuperCollider, stop all servers and close all code windows.\n Do you want to quit?")
reply = dlg.ShowModal()
dlg.Destroy()
if reply == wx.ID_OK:
PsycolliderWindow.OnCloseWindow(self, event)
size = self.GetSize()
pos = self.GetPosition()
self.config.SetPath("/WindowSettings")
self.config.WriteInt('PostWindow-sizeX', size.x)
self.config.WriteInt('PostWindow-sizeY', size.y)
self.config.WriteInt('PostWindow-posX', pos.x)
self.config.WriteInt('PostWindow-posY', pos.y)
wx.GetApp().Shutdown()
else:
# No need? wx.MessageBox("Canceled");
pass
def SaveFile(self):
if self.filePath == "":
self.SaveFileAs()
else:
try:
file = open(self.filePath, "w")
content = self.log.GetRange(0, self.log.GetLastPosition())
file.write(content)
file.close()
except:
# Todo: better error handling? Just print error message for now
wx.MessageBox("Error: Could not save file " + filePath)
def SaveFileAs(self):
fileDlg = wx.FileDialog(self,style=wx.SAVE)
if fileDlg.ShowModal() == wx.ID_OK:
self.filePath = fileDlg.GetPath()
try:
file = open(self.filePath ,"w")
content = self.log.GetRange(0, self.log.GetLastPosition())
file.write(content)
file.close()
except:
# Todo: better error handling? Just print error message for now
wx.MessageBox("Error: Could not save file " + self.filePath)
# ---------------------------------------------------------------------
# Psycollider
class Psycollider(wx.App):
theMainFrame = None # Points to the post window object
openWindows = [] # List of all windows currently open
config = None # Main wx.Config object, used by all windows
fileHistory = None # wx.FileHistory object
def OnInit(self):
self.config = wx.Config()
# File history
self.fileHistory = wx.FileHistory()
self.config.SetPath("/RecentFiles")
self.fileHistory.Load(self.config)
# Create post window
self.theMainFrame = PsycolliderPostWindow(None, -1, "SuperCollider (Post Window)")
self.theMainFrame.Show(True)
self.SetTopWindow(self.theMainFrame)
# enable images for html
wx.InitAllImageHandlers()
# Set the log sink function (writes to post window)
# On windows, we can write directly to it, and using the PostText function
# causes the post window to be updated slightly later which doesn't look too nice.
#
# Can't do this on Linux, as gtk is not thread safe, so must use PostText.
if(os.name == 'nt'):
PySCLang.setSCLogSink(WriteInLogWindow)
else:
PySCLang.setSCLogSink(PostText)
PySCLang.setPyPrOpenWinTextFile(OpenTextFile)
if not self.ChangeDirToSCClassLibraryFolder():
return False
PySCLang.start()
(addr, port) = self.GetServerAddressAndPort()
return True
def doFileHistory(self, event):
"""Open a file from file history"""
fileNumber = event.GetId() - wx.ID_FILE1
filename = self.fileHistory.GetHistoryFile(fileNumber)
newWindow = self.OpenFile(filename)
if newWindow != None:
self.openWindows.append(newWindow)
self.AddFileToHistory(filename) # add it back to the history so it will be moved up the list
def GetServerAddressAndPort(self):
return ("127.1.0.1", "57110")
def ChangeDirToSCClassLibraryFolder(self):
# first, we check if the current working folder
# contains an item called 'SCClassLibrary'
curPath = os.getcwd()
listOfFolders = os.listdir(curPath)
# if the cwd contains 'SCClassLibrary', we're done
if 'SCClassLibrary' in listOfFolders:
return True
# uniqueName is what gets returned from config.Read(...)
# if nothing was stored in the config yet
uniqueName = "{1FB0EC09-A883-4684-AD73-1D49A98A89DE}"
self.config.SetPath("/GeneralSettings")
classLibPath = self.config.Read("SCClassLibraryPath", uniqueName)
leafName = (os.path.split(classLibPath))[1]
# if the folder stored in the config is actually an existing
# folder called 'SCClassLibrary', we change cwd to that
# folder and we're done
if os.path.isdir(classLibPath) and leafName == 'SCClassLibrary':
classLibPath_split = os.path.split(classLibPath)
classLibParentFolder = classLibPath_split[0]
os.chdir(classLibParentFolder)
return True
# if something was stored in the config, but does not exist
# anymore or is not correct, let's warn the user
if classLibPath != uniqueName:
wx.MessageBox("The path stored in the application preferences is not a valid SCClassLibrary folder. You will now be requested to select an existing SCClassLibrary folder","Error", wx.OK | wx.ICON_ERROR)
# ask the user to locate the folder
continueLookingForFolder = True
classLibFolderFound = False
while continueLookingForFolder:
dlg = wx.DirDialog(None, "Please locate the SCClassLibrary")
if dlg.ShowModal() == wx.ID_CANCEL:
wx.MessageBox("Sorry. No class library available. SuperCollider will not work correctly","Error", wx.OK | wx.ICON_ERROR)
continueLookingForFolder = False
else:
classLibPath = dlg.GetPath()
leafName = (os.path.split(classLibPath))[1]
if leafName != 'SCClassLibrary':
wx.MessageBox("The folder needs to be called SCClassLibrary for SuperCollider to work correctly", "Error", wx.OK | wx.ICON_ERROR)
else:
continueLookingForFolder = False
classLibFolderFound = True
# only if a valid SCClassLibrary folder was found, then
# set the current folder as its parent
if classLibFolderFound:
self.config.Write("SCClassLibraryPath",classLibPath)
classLibPath_split = os.path.split(classLibPath)
classLibParentFolder = classLibPath_split[0]
os.chdir(classLibParentFolder)
return True
else:
return False
def NewCodeWindow(self):
window = PsycolliderCodeWin(self.theMainFrame, -1, "Untitled %d" % (len(self.openWindows)+1))
self.openWindows.append(window)
window.Show(True)
window.SetSubWinFocus()
return window
def NewHTMLWindow(self, filepath):
window = PsycolliderHTMLWin(self.theMainFrame, -1,
os.path.splitext(os.path.basename(filepath))[0])
self.openWindows.append(window)
window.Show(True)
window.SetSubWinFocus()
return window
def ClosedWindow(self, window):
try:
self.openWindows.remove(window)
except:
pass
def HtmlToCode(self, window):
if type(window) == PsycolliderHTMLWin:
text = window.htmlSubWin.ToText()
window.Destroy()
newWindow = PsycolliderCodeWin(self.theMainFrame, -1, "Untitled %d" % (len(self.openWindows)+1))
self.openWindows.append(newWindow)
newWindow.codeSubWin.AddText(text)
newWindow.Show(True)
newWindow.SetSubWinFocus()
return newWindow
def OpenFile(self, path=''):
if path == '':
fileDlg = wx.FileDialog(self.theMainFrame, style=wx.OPEN)
if not fileDlg.ShowModal() == wx.ID_OK:
return
path = fileDlg.GetPath()
self.AddFileToHistory(path)
try:
file = open(path ,"r")
textContent = file.read()
file.close()
except:
# Todo: better error handling? Just print error message for now
wx.MessageBox("Psycollider Error: Could not open file " + path)
return None
if textContent[0:5] == '{\\rtf':
win = self.NewCodeWindow()
win.codeSubWin.AddTextUTF8('Sorry, still no RTF support, wxRichTextControl does not yet support reading RTF files...')
win.isModified = False
return win
elif (textContent.find('<html') >= 0 or textContent.find('<HTML') >= 0):
win = self.NewHTMLWindow(path)
win.htmlSubWin.LoadPage(path)
return win
else:
# everything else is plain text code window
win = self.NewCodeWindow()
win.codeSubWin.AddTextUTF8(textContent)
win.filePath = path
win.SetTitle(path)
win.isModified = 0
return win
def StopServer(self):
if PySCLang.compiledOK():
PySCLang.setCmdLine('s.sendMsg("/quit");')
PySCLang.sendMain("interpretPrintCmdLine")
def StopSwingOSC(self):
if PySCLang.compiledOK():
PySCLang.setCmdLine('SwingOSC.default.sendMsg("/quit");')
PySCLang.sendMain("interpretPrintCmdLine")
def Run(self):
PySCLang.sendMain("run");
def Stop(self):
PySCLang.sendMain("stop");
def CompileLibrary(self):
self.StopServer()
self.StopSwingOSC()
time.sleep(1)
PySCLang.compileLibrary()
def OpenClassDef(self, text):
PySCLang.setCmdLine(text)
PySCLang.sendMain("openWinCodeFile")
def ImpOf(self, text):
PySCLang.setCmdLine(text)
PySCLang.sendMain("methodTemplates")
def RefsTo(self, text):
PySCLang.setCmdLine(text)
PySCLang.sendMain("methodReferences")
def Eval(self, text):
PySCLang.setCmdLine(text)
PySCLang.sendMain("interpretPrintCmdLine")
def GoToHelpFile(self, sel=""):
# TODO : test this : most help files don't open. remove trailing and leading spaces from sel, too, since wxTextCtrl is behaving strangely
foundFilePath = ""
filePath = ""
if sel == "-" : sel = "subtraction" # "subtraction.rtf"
elif sel == "/" : sel = "division" # "division.rtf"
elif sel == "*" : sel = "multiplication" # from "*.rtf"
elif sel == "**": sel = "exponentiation" # from "**.rtf"
elif sel == "<" : sel = "lessthan" # from "<.rtf"
elif sel == "<=": sel = "lessorequalthan" # from "<=.rtf"
elif sel == ">" : sel = "greaterthan" # from ">.rtf"
elif sel == ">=": sel = "greaterorequalthan" # from ">=.rtf"
elif sel == "%" : sel = "modulo" # from "%.rtf"
if sel != "":
for helpFolder in [gHelpFolder, gUserExtensionFolder]:
for folderPath, foldersInPath, fileNamesInFolder in os.walk(helpFolder):
# don't visit CVS directories
if 'CVS' in foldersInPath:
foldersInPath.remove('CVS')
# don't visit .svn directories
if '.svn' in foldersInPath:
foldersInPath.remove('.svn')
for fileName in fileNamesInFolder:
filePath = os.path.join(folderPath, fileName)
if fileName == sel + ".help.html":
foundFilePath = filePath
break
if fileName == sel + ".html":
foundFilePath = filePath
break
# for fileName
# if file is found, let's break
if foundFilePath != "":
break
# for folderPath, ....
# if file is found, let's break
if foundFilePath != "":
break
if foundFilePath == "":
foundFilePath = os.path.join(gHelpFolder,"Help.html")
self.OpenFile(foundFilePath)
def ClearPostWindow(self):
self.theMainFrame.log.Clear()
def SetDefaultWindowSize(self, sizeX, sizeY):
self.config.SetPath("/WindowSettings")
self.config.WriteInt('DefaultSizeX', sizeX)
self.config.WriteInt('DefaultSizeY', sizeY)
WriteInLogWindow("Set default window size to " + str(sizeX) + " x " + str(sizeY) + "\n")
def ClearRecentFileList(self):
numFiles = self.fileHistory.GetCount()
for i in range(numFiles):
self.fileHistory.RemoveFileFromHistory(0) # remove the first file every time
def AddFileToHistory(self, path):
self.fileHistory.AddFileToHistory(path)
def GetOpenWindows(self):
return self.openWindows
def Shutdown(self):
# Recent file list
self.config.SetPath("/RecentFiles")
self.fileHistory.Save(self.config)
del self.fileHistory
# stop scsynth and swingosc
self.StopServer()
self.StopSwingOSC()
# ---------------------------------------------------------------------
# WriteInLogWindow
def WriteInLogWindow(text):
if wx.GetApp().theMainFrame == None:
sys.stdout.write(text)
else:
wx.GetApp().theMainFrame.log.AppendText(text)
def PostText(text):
wx.CallAfter(WriteInLogWindow, text)
# ---------------------------------------------------------------------
def OpenTextFile(path, rangeStart, rangeSize):
if wx.GetApp().theMainFrame == None:
wx.MessageBox("Cannot open %s since the main window is not created yet","Error",wx.OK | wx.ICON_ERROR)
else:
codeWin = wx.GetApp().OpenFile(path)
#codeWin.SelectRange(rangeStart,rangeSize)
return codeWin
# ---------------------------------------------------------------------
# Main
app = Psycollider(0)
app.MainLoop()
|
GameOfLife/WFSCollider
|
editors/Psycollider/Psycollider.py
|
Python
|
gpl-3.0
| 60,560
|
[
"VisIt"
] |
caed73179b87bd8d16e515b313caab28140acc3d76f40ab4a1a195e10e5306fd
|
"""
Downloads feeds, keys, packages and icons.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _, logger
import os, sys
from zeroinstall import support
from zeroinstall.support import tasks, basedir, portable_rename
from zeroinstall.injector.namespaces import XMLNS_IFACE, config_site
from zeroinstall.injector import model
from zeroinstall.injector.model import Recipe, SafeException, escape, DistributionSource
from zeroinstall.injector.iface_cache import PendingFeed, ReplayAttack
from zeroinstall.injector.handler import NoTrustedKeys
from zeroinstall.injector import download
def _escape_slashes(path):
"""@type path: str
@rtype: str"""
return path.replace('/', '%23')
def _get_feed_dir(feed):
"""The algorithm from 0mirror.
@type feed: str
@rtype: str"""
if '#' in feed:
raise SafeException(_("Invalid URL '%s'") % feed)
scheme, rest = feed.split('://', 1)
assert '/' in rest, "Missing / in %s" % feed
domain, rest = rest.split('/', 1)
for x in [scheme, domain, rest]:
if not x or x.startswith('.'):
raise SafeException(_("Invalid URL '%s'") % feed)
return '/'.join(['feeds', scheme, domain, _escape_slashes(rest)])
class KeyInfoFetcher(object):
"""Fetches information about a GPG key from a key-info server.
See L{Fetcher.fetch_key_info} for details.
@since: 0.42
Example:
>>> kf = KeyInfoFetcher(fetcher, 'https://server', fingerprint)
>>> while True:
print kf.info
if kf.blocker is None: break
print kf.status
yield kf.blocker
"""
def __init__(self, fetcher, server, fingerprint):
"""@type fetcher: L{Fetcher}
@type server: str
@type fingerprint: str"""
self.fingerprint = fingerprint
self.info = []
self.blocker = None
if server is None: return
self.status = _('Fetching key information from %s...') % server
dl = fetcher.download_url(server + '/key/' + fingerprint)
from xml.dom import minidom
@tasks.async
def fetch_key_info():
tempfile = dl.tempfile
try:
yield dl.downloaded
self.blocker = None
tasks.check(dl.downloaded)
tempfile.seek(0)
doc = minidom.parse(tempfile)
if doc.documentElement.localName != 'key-lookup':
raise SafeException(_('Expected <key-lookup>, not <%s>') % doc.documentElement.localName)
self.info += doc.documentElement.childNodes
except Exception as ex:
doc = minidom.parseString('<item vote="bad"/>')
root = doc.documentElement
root.appendChild(doc.createTextNode(_('Error getting key information: %s') % ex))
self.info.append(root)
finally:
tempfile.close()
self.blocker = fetch_key_info()
class Fetcher(object):
"""Downloads and stores various things.
@ivar config: used to get handler, iface_cache and stores
@type config: L{config.Config}
@ivar key_info: caches information about GPG keys
@type key_info: {str: L{KeyInfoFetcher}}
"""
__slots__ = ['config', 'key_info', '_scheduler', 'external_store', 'external_fetcher']
def __init__(self, config):
"""@type config: L{zeroinstall.injector.config.Config}"""
assert config.handler, "API change!"
self.config = config
self.key_info = {}
self._scheduler = None
self.external_store = os.environ.get('ZEROINSTALL_EXTERNAL_STORE')
self.external_fetcher = os.environ.get('ZEROINSTALL_EXTERNAL_FETCHER')
@property
def handler(self):
return self.config.handler
@property
def scheduler(self):
if self._scheduler is None:
from . import scheduler
self._scheduler = scheduler.DownloadScheduler()
return self._scheduler
# (force is deprecated and ignored)
@tasks.async
def cook(self, required_digest, recipe, stores, force = False, impl_hint = None, dry_run = False, may_use_mirror = True):
"""Follow a Recipe.
@type required_digest: str
@type recipe: L{Recipe}
@type stores: L{zeroinstall.zerostore.Stores}
@type force: bool
@param impl_hint: the Implementation this is for (as a hint for the GUI, and to allow local files)
@type dry_run: bool
@type may_use_mirror: bool
@see: L{download_impl} uses this method when appropriate"""
# Maybe we're taking this metaphor too far?
# Start a download for each ingredient
blockers = []
steps = []
try:
for stepdata in recipe.steps:
cls = StepRunner.class_for(stepdata)
step = cls(stepdata, impl_hint = impl_hint, may_use_mirror = may_use_mirror)
step.prepare(self, blockers)
steps.append(step)
while blockers:
yield blockers
tasks.check(blockers)
blockers = [b for b in blockers if not b.happened]
if self.external_store:
# Note: external_store will not work with non-<archive> steps.
streams = [step.stream for step in steps]
self._add_to_external_store(required_digest, recipe.steps, streams)
else:
# Create an empty directory for the new implementation
store = stores.stores[0]
tmpdir = store.get_tmp_dir_for(required_digest)
try:
# Unpack each of the downloaded archives into it in turn
for step in steps:
step.apply(tmpdir)
# Check that the result is correct and store it in the cache
stores.check_manifest_and_rename(required_digest, tmpdir, dry_run=dry_run)
tmpdir = None
finally:
# If unpacking fails, remove the temporary directory
if tmpdir is not None:
support.ro_rmtree(tmpdir)
finally:
for step in steps:
try:
step.close()
except IOError as ex:
# Can get "close() called during
# concurrent operation on the same file
# object." if we're unlucky (Python problem).
logger.info("Failed to close: %s", ex)
def _get_mirror_url(self, feed_url, resource):
"""Return the URL of a mirror for this feed.
@type feed_url: str
@type resource: str
@rtype: str"""
if self.config.mirror is None:
return None
if feed_url.startswith('http://') or feed_url.startswith('https://'):
if support.urlparse(feed_url).hostname == 'localhost':
return None
return '%s/%s/%s' % (self.config.mirror, _get_feed_dir(feed_url), resource)
return None
def get_feed_mirror(self, url):
"""Return the URL of a mirror for this feed.
@type url: str
@rtype: str"""
return self._get_mirror_url(url, 'latest.xml')
def _get_archive_mirror(self, source):
"""@type source: L{model.DownloadSource}
@rtype: str"""
if self.config.mirror is None:
return None
if support.urlparse(source.url).hostname == 'localhost':
return None
if sys.version_info[0] > 2:
from urllib.parse import quote
else:
from urllib import quote
return '{mirror}/archive/{archive}'.format(
mirror = self.config.mirror,
archive = quote(source.url.replace('/', '#'), safe = ''))
def _get_impl_mirror(self, impl):
"""@type impl: L{zeroinstall.injector.model.ZeroInstallImplementation}
@rtype: str"""
return self._get_mirror_url(impl.feed.url, 'impl/' + _escape_slashes(impl.id))
@tasks.async
def get_packagekit_feed(self, feed_url):
"""Send a query to PackageKit (if available) for information about this package.
On success, the result is added to iface_cache.
@type feed_url: str"""
assert feed_url.startswith('distribution:'), feed_url
master_feed = self.config.iface_cache.get_feed(feed_url.split(':', 1)[1])
if master_feed:
fetch = self.config.iface_cache.distro.fetch_candidates(master_feed)
if fetch:
yield fetch
tasks.check(fetch)
# Force feed to be regenerated with the new information
self.config.iface_cache.get_feed(feed_url, force = True)
def download_and_import_feed(self, feed_url, iface_cache = None):
"""Download the feed, download any required keys, confirm trust if needed and import.
@param feed_url: the feed to be downloaded
@type feed_url: str
@param iface_cache: (deprecated)
@type iface_cache: L{zeroinstall.injector.iface_cache.IfaceCache} | None
@rtype: L{zeroinstall.support.tasks.Blocker}"""
from .download import DownloadAborted
assert iface_cache is None or iface_cache is self.config.iface_cache
if not self.config.handler.dry_run:
try:
self.config.iface_cache.mark_as_checking(feed_url)
except OSError as ex:
retval = tasks.Blocker("mark_as_checking")
retval.trigger(exception = (ex, None))
return retval
logger.debug(_("download_and_import_feed %(url)s"), {'url': feed_url})
assert not os.path.isabs(feed_url)
if feed_url.startswith('distribution:'):
return self.get_packagekit_feed(feed_url)
primary = self._download_and_import_feed(feed_url, use_mirror = False, timeout = 5)
@tasks.named_async("monitor feed downloads for " + feed_url)
def wait_for_downloads(primary):
# Download just the upstream feed, unless it takes too long...
timeout = primary.dl.timeout
yield primary, timeout
tasks.check(timeout)
try:
tasks.check(primary)
if primary.happened:
return # OK, primary succeeded!
# OK, maybe it's just being slow...
logger.info("Feed download from %s is taking a long time.", feed_url)
primary_ex = None
except NoTrustedKeys as ex:
raise # Don't bother trying the mirror if we have a trust problem
except ReplayAttack as ex:
raise # Don't bother trying the mirror if we have a replay attack
except DownloadAborted as ex:
raise # Don't bother trying the mirror if the user cancelled
except SafeException as ex:
# Primary failed
primary = None
primary_ex = ex
logger.warning(_("Feed download from %(url)s failed: %(exception)s"), {'url': feed_url, 'exception': ex})
# Start downloading from mirror...
mirror = self._download_and_import_feed(feed_url, use_mirror = True)
# Wait until both mirror and primary tasks are complete...
while True:
blockers = list(filter(None, [primary, mirror]))
if not blockers:
break
yield blockers
if primary:
try:
tasks.check(primary)
if primary.happened:
primary = None
# No point carrying on with the mirror once the primary has succeeded
if mirror:
logger.info(_("Primary feed download succeeded; aborting mirror download for %s") % feed_url)
mirror.dl.abort()
except SafeException as ex:
primary = None
primary_ex = ex
logger.info(_("Feed download from %(url)s failed; still trying mirror: %(exception)s"), {'url': feed_url, 'exception': ex})
if mirror:
try:
tasks.check(mirror)
if mirror.happened:
mirror = None
if primary_ex:
# We already warned; no need to raise an exception too,
# as the mirror download succeeded.
primary_ex = None
except ReplayAttack as ex:
logger.info(_("Version from mirror is older than cached version; ignoring it: %s"), ex)
mirror = None
primary_ex = None
except SafeException as ex:
logger.info(_("Mirror download failed: %s"), ex)
mirror = None
if primary_ex:
raise primary_ex
return wait_for_downloads(primary)
def _download_and_import_feed(self, feed_url, use_mirror, timeout = None):
"""Download and import a feed.
@type feed_url: str
@param use_mirror: False to use primary location; True to use mirror.
@type use_mirror: bool
@param timeout: callback to invoke when the download actually starts
@rtype: L{zeroinstall.support.tasks.Blocker}"""
if use_mirror:
url = self.get_feed_mirror(feed_url)
if url is None: return None
logger.info(_("Trying mirror server for feed %s") % feed_url)
else:
url = feed_url
if self.config.handler.dry_run:
print(_("[dry-run] downloading feed {url}").format(url = url))
dl = self.download_url(url, hint = feed_url, timeout = timeout)
stream = dl.tempfile
@tasks.named_async("fetch_feed " + url)
def fetch_feed():
try:
yield dl.downloaded
tasks.check(dl.downloaded)
pending = PendingFeed(feed_url, stream)
if use_mirror:
# If we got the feed from a mirror, get the key from there too
key_mirror = self.config.mirror + '/keys/'
else:
key_mirror = None
keys_downloaded = tasks.Task(pending.download_keys(self, feed_hint = feed_url, key_mirror = key_mirror), _("download keys for %s") % feed_url)
yield keys_downloaded.finished
tasks.check(keys_downloaded.finished)
dry_run = self.handler.dry_run
if not self.config.iface_cache.update_feed_if_trusted(pending.url, pending.sigs, pending.new_xml, dry_run = dry_run):
blocker = self.config.trust_mgr.confirm_keys(pending)
if blocker:
yield blocker
tasks.check(blocker)
if not self.config.iface_cache.update_feed_if_trusted(pending.url, pending.sigs, pending.new_xml, dry_run = dry_run):
raise NoTrustedKeys(_("No signing keys trusted; not importing"))
finally:
stream.close()
task = fetch_feed()
task.dl = dl
return task
def fetch_key_info(self, fingerprint):
"""@type fingerprint: str
@rtype: L{KeyInfoFetcher}"""
try:
return self.key_info[fingerprint]
except KeyError:
if self.config.handler.dry_run:
print(_("[dry-run] asking {url} about key {key}").format(
url = self.config.key_info_server,
key = fingerprint))
self.key_info[fingerprint] = key_info = KeyInfoFetcher(self,
self.config.key_info_server, fingerprint)
return key_info
# (force is deprecated and ignored)
def download_impl(self, impl, retrieval_method, stores, force = False):
"""Download an implementation.
@param impl: the selected implementation
@type impl: L{model.ZeroInstallImplementation}
@param retrieval_method: a way of getting the implementation (e.g. an Archive or a Recipe)
@type retrieval_method: L{model.RetrievalMethod}
@param stores: where to store the downloaded implementation
@type stores: L{zerostore.Stores}
@type force: bool
@rtype: L{tasks.Blocker}"""
assert impl
assert retrieval_method
if isinstance(retrieval_method, DistributionSource):
return retrieval_method.install(self.handler)
from zeroinstall.zerostore import manifest, parse_algorithm_digest_pair
best = None
for digest in impl.digests:
alg_name, digest_value = parse_algorithm_digest_pair(digest)
alg = manifest.algorithms.get(alg_name, None)
if alg and (best is None or best.rating < alg.rating):
best = alg
required_digest = digest
if best is None:
if not impl.digests:
raise SafeException(_("No <manifest-digest> given for '%(implementation)s' version %(version)s") %
{'implementation': impl.feed.get_name(), 'version': impl.get_version()})
raise SafeException(_("Unknown digest algorithms '%(algorithms)s' for '%(implementation)s' version %(version)s") %
{'algorithms': impl.digests, 'implementation': impl.feed.get_name(), 'version': impl.get_version()})
@tasks.async
def download_impl(method):
original_exception = None
while True:
if not isinstance(method, Recipe):
# turn an individual method into a single-step Recipe
step = method
method = Recipe()
method.steps.append(step)
try:
blocker = self.cook(required_digest, method, stores,
impl_hint = impl,
dry_run = self.handler.dry_run,
may_use_mirror = original_exception is None)
yield blocker
tasks.check(blocker)
except download.DownloadError as ex:
if original_exception:
logger.info("Error from mirror: %s", ex)
raise original_exception
else:
original_exception = ex
mirror_url = self._get_impl_mirror(impl)
if mirror_url is not None:
logger.info("%s: trying implementation mirror at %s", ex, mirror_url)
method = model.DownloadSource(impl, mirror_url,
None, None, type = 'application/x-bzip-compressed-tar')
continue # Retry
raise
except SafeException as ex:
raise SafeException("Error fetching {url} {version}: {ex}".format(
url = impl.feed.url,
version = impl.get_version(),
ex = ex))
break
self.handler.impl_added_to_store(impl)
return download_impl(retrieval_method)
def _add_to_external_store(self, required_digest, steps, streams):
"""@type required_digest: str"""
from zeroinstall.zerostore.unpack import type_from_url
# combine archive path, extract directory and MIME type arguments in an alternating fashion
paths = map(lambda stream: stream.name, streams)
extracts = map(lambda step: step.extract or "", steps)
types = map(lambda step: step.type or type_from_url(step.url), steps)
args = [None]*(len(paths)+len(extracts)+len(types))
args[::3] = paths
args[1::3] = extracts
args[2::3] = types
# close file handles to allow external processes access
for stream in streams:
stream.close()
# delegate extracting archives to external tool
import subprocess
retval = subprocess.call([self.external_store, "add", required_digest] + args)
# delete temp files
for path in paths:
os.remove(path)
if retval != 0:
raise SafeException(_("Extracting with external store failed"))
def _download_local_file(self, download_source, impl_hint):
# Relative path
if impl_hint is None or not impl_hint.feed.local_path:
raise SafeException(_("Relative URL '{url}' in non-local feed '{feed}'").format(
url = download_source.url,
feed = impl_hint.feed))
local_file = os.path.join(os.path.dirname(impl_hint.feed.local_path), download_source.url)
try:
size = os.path.getsize(local_file)
if size != download_source.size:
raise SafeException(_("Wrong size for {path}: feed says {expected}, but actually {actual} bytes").format(
path = local_file,
expected = download_source.size,
actual = size))
return (None, open(local_file, 'rb'))
except OSError as ex:
raise SafeException(str(ex)) # (error already includes path)
# (force is deprecated and ignored)
def download_archive(self, download_source, force = False, impl_hint = None, may_use_mirror = False):
"""Fetch an archive. You should normally call L{download_impl}
instead, since it handles other kinds of retrieval method too.
It is the caller's responsibility to ensure that the returned stream is closed.
If impl_hint is from a local feed and the url is relative, just opens the existing file for reading.
@type download_source: L{model.DownloadSource}
@type force: bool
@type may_use_mirror: bool
@rtype: (L{Blocker} | None, file)"""
from zeroinstall.zerostore import unpack
mime_type = download_source.type
if not mime_type:
mime_type = unpack.type_from_url(download_source.url)
if not mime_type:
raise SafeException(_("No 'type' attribute on archive, and I can't guess from the name (%s)") % download_source.url)
if not self.external_store:
unpack.check_type_ok(mime_type)
if '://' not in download_source.url:
return self._download_local_file(download_source, impl_hint)
if may_use_mirror:
mirror = self._get_archive_mirror(download_source)
else:
mirror = None
if self.config.handler.dry_run:
print(_("[dry-run] downloading archive {url}").format(url = download_source.url))
dl = self.download_url(download_source.url, hint = impl_hint, mirror_url = mirror)
if download_source.size is not None:
dl.expected_size = download_source.size + (download_source.start_offset or 0)
# (else don't know sizes for mirrored archives)
return (dl.downloaded, dl.tempfile)
def download_file(self, download_source, impl_hint=None):
"""Fetch a single file. You should normally call L{download_impl}
instead, since it handles other kinds of retrieval method too.
It is the caller's responsibility to ensure that the returned stream is closed.
@type download_source: L{zeroinstall.injector.model.FileSource}
@type impl_hint: L{zeroinstall.injector.model.ZeroInstallImplementation} | None
@rtype: tuple"""
if self.config.handler.dry_run:
print(_("[dry-run] downloading file {url}").format(url = download_source.url))
if '://' not in download_source.url:
return self._download_local_file(download_source, impl_hint)
dl = self.download_url(download_source.url, hint = impl_hint)
dl.expected_size = download_source.size
return (dl.downloaded, dl.tempfile)
# (force is deprecated and ignored)
def download_icon(self, interface, force = False):
"""Download an icon for this interface and add it to the
icon cache. If the interface has no icon do nothing.
@type interface: L{zeroinstall.injector.model.Interface}
@type force: bool
@return: the task doing the import, or None
@rtype: L{tasks.Task}"""
logger.debug("download_icon %(interface)s", {'interface': interface})
modification_time = None
existing_icon = self.config.iface_cache.get_icon_path(interface)
if existing_icon:
file_mtime = os.stat(existing_icon).st_mtime
from email.utils import formatdate
modification_time = formatdate(timeval = file_mtime, localtime = False, usegmt = True)
feed = self.config.iface_cache.get_feed(interface.uri)
if feed is None:
return None
# Find a suitable icon to download
for icon in feed.get_metadata(XMLNS_IFACE, 'icon'):
type = icon.getAttribute('type')
if type != 'image/png':
logger.debug(_('Skipping non-PNG icon'))
continue
source = icon.getAttribute('href')
if source:
break
logger.warning(_('Missing "href" attribute on <icon> in %s'), interface)
else:
logger.info(_('No PNG icons found in %s'), interface)
return
dl = self.download_url(source, hint = interface, modification_time = modification_time)
@tasks.async
def download_and_add_icon():
stream = dl.tempfile
try:
yield dl.downloaded
tasks.check(dl.downloaded)
if dl.unmodified: return
stream.seek(0)
import shutil, tempfile
icons_cache = basedir.save_cache_path(config_site, 'interface_icons')
tmp_file = tempfile.NamedTemporaryFile(dir = icons_cache, delete = False)
shutil.copyfileobj(stream, tmp_file)
tmp_file.close()
icon_file = os.path.join(icons_cache, escape(interface.uri))
portable_rename(tmp_file.name, icon_file)
finally:
stream.close()
return download_and_add_icon()
def download_impls(self, implementations, stores):
"""Download the given implementations, choosing a suitable retrieval method for each.
If any of the retrieval methods are DistributionSources and
need confirmation, handler.confirm is called to check that the
installation should proceed.
@type implementations: [L{zeroinstall.injector.model.ZeroInstallImplementation}]
@type stores: L{zeroinstall.zerostore.Stores}
@rtype: L{zeroinstall.support.tasks.Blocker}"""
if self.external_fetcher:
self._download_with_external_fetcher(implementations)
return None
unsafe_impls = []
to_download = []
for impl in implementations:
logger.debug(_("start_downloading_impls: for %(feed)s get %(implementation)s"), {'feed': impl.feed, 'implementation': impl})
source = self.get_best_source(impl)
if not source:
raise SafeException(_("Implementation %(implementation_id)s of interface %(interface)s"
" cannot be downloaded (no download locations given in "
"interface!)") % {'implementation_id': impl.id, 'interface': impl.feed.get_name()})
to_download.append((impl, source))
if isinstance(source, DistributionSource) and source.needs_confirmation:
unsafe_impls.append(source.package_id)
@tasks.async
def download_impls():
if unsafe_impls:
confirm = self.handler.confirm_install(_('The following components need to be installed using native packages. '
'These come from your distribution, and should therefore be trustworthy, but they also '
'run with extra privileges. In particular, installing them may run extra services on your '
'computer or affect other users. You may be asked to enter a password to confirm. The '
'packages are:\n\n') + ('\n'.join('- ' + x for x in unsafe_impls)))
yield confirm
tasks.check(confirm)
blockers = []
for impl, source in to_download:
blockers.append(self.download_impl(impl, source, stores))
# Record the first error log the rest
error = []
def dl_error(ex, tb = None):
if error:
self.handler.report_error(ex)
else:
error.append((ex, tb))
while blockers:
yield blockers
tasks.check(blockers, dl_error)
blockers = [b for b in blockers if not b.happened]
if error:
from zeroinstall import support
support.raise_with_traceback(*error[0])
if not to_download:
return None
return download_impls()
def _download_with_external_fetcher(self, implementations):
"""@type implementations: [L{zeroinstall.injector.model.ZeroInstallImplementation}]"""
# Serialize implementation list to XML
from xml.dom import minidom, XMLNS_NAMESPACE
from zeroinstall.injector.namespaces import XMLNS_IFACE
from zeroinstall.injector.qdom import Prefixes
doc = minidom.getDOMImplementation().createDocument(XMLNS_IFACE, "interface", None)
root = doc.documentElement
root.setAttributeNS(XMLNS_NAMESPACE, 'xmlns', XMLNS_IFACE)
for impl in implementations:
root.appendChild(impl._toxml(doc, Prefixes(XMLNS_IFACE)))
# Pipe XML into external process
import subprocess
process = subprocess.Popen(self.external_fetcher, stdin=subprocess.PIPE)
process.communicate(doc.toxml() + "\n")
if process.returncode != 0:
raise SafeException(_("Download with external fetcher failed"))
def get_best_source(self, impl):
"""Return the best download source for this implementation.
@type impl: L{zeroinstall.injector.model.ZeroInstallImplementation}
@rtype: L{model.RetrievalMethod}"""
if impl.download_sources:
return impl.download_sources[0]
return None
def download_url(self, url, hint = None, modification_time = None, expected_size = None, mirror_url = None, timeout = None, auto_delete = True):
"""The most low-level method here; just download a raw URL.
It is the caller's responsibility to ensure that dl.stream is closed.
@param url: the location to download from
@type url: str
@param hint: user-defined data to store on the Download (e.g. used by the GUI)
@param modification_time: don't download unless newer than this
@param mirror_url: an altertive URL to try if this one fails
@type mirror_url: str
@param timeout: create a blocker which triggers if a download hangs for this long
@type timeout: float | str | None
@rtype: L{download.Download}
@since: 1.5"""
if not (url.startswith('http:') or url.startswith('https:') or url.startswith('ftp:')):
raise SafeException(_("Unknown scheme in download URL '%s'") % url)
if self.external_store: auto_delete = False
dl = download.Download(url, hint = hint, modification_time = modification_time, expected_size = expected_size, auto_delete = auto_delete)
dl.mirror = mirror_url
self.handler.monitor_download(dl)
if isinstance(timeout, int):
dl.timeout = tasks.Blocker('Download timeout')
dl.downloaded = self.scheduler.download(dl, timeout = timeout)
return dl
class StepRunner(object):
"""The base class of all step runners.
@since: 1.10"""
def __init__(self, stepdata, impl_hint, may_use_mirror = True):
"""@type stepdata: L{zeroinstall.injector.model.RetrievalMethod}
@type may_use_mirror: bool"""
self.stepdata = stepdata
self.impl_hint = impl_hint
self.may_use_mirror = may_use_mirror
def prepare(self, fetcher, blockers):
"""@type fetcher: L{Fetcher}
@type blockers: [L{zeroinstall.support.tasks.Blocker}]"""
pass
@classmethod
def class_for(cls, model):
"""@type model: L{zeroinstall.injector.model.RetrievalMethod}"""
for subcls in cls.__subclasses__():
if subcls.model_type == type(model):
return subcls
raise Exception(_("Unknown download type for '%s'") % model)
def close(self):
"""Release any resources (called on success or failure)."""
pass
class RenameStepRunner(StepRunner):
"""A step runner for the <rename> step.
@since: 1.10"""
model_type = model.RenameStep
def apply(self, basedir):
"""@type basedir: str"""
source = native_path_within_base(basedir, self.stepdata.source)
dest = native_path_within_base(basedir, self.stepdata.dest)
_ensure_dir_exists(os.path.dirname(dest))
try:
os.rename(source, dest)
except OSError:
if not os.path.exists(source):
# Python by default reports the path of the destination in this case
raise SafeException("<rename> source '{source}' does not exist".format(
source = self.stepdata.source))
raise
class RemoveStepRunner(StepRunner):
"""A step runner for the <remove> step."""
model_type = model.RemoveStep
def apply(self, basedir):
"""@type basedir: str"""
path = native_path_within_base(basedir, self.stepdata.path)
support.ro_rmtree(path)
class DownloadStepRunner(StepRunner):
"""A step runner for the <archive> step.
@since: 1.10"""
model_type = model.DownloadSource
def prepare(self, fetcher, blockers):
"""@type fetcher: L{Fetcher}
@type blockers: [L{zeroinstall.support.tasks.Blocker}]"""
self.blocker, self.stream = fetcher.download_archive(self.stepdata, impl_hint = self.impl_hint, may_use_mirror = self.may_use_mirror)
assert self.stream
if self.blocker:
blockers.append(self.blocker)
def apply(self, basedir):
"""@type basedir: str"""
from zeroinstall.zerostore import unpack
assert self.blocker is None or self.blocker.happened
if self.stepdata.dest is not None:
basedir = native_path_within_base(basedir, self.stepdata.dest)
_ensure_dir_exists(basedir)
unpack.unpack_archive_over(self.stepdata.url, self.stream, basedir,
extract = self.stepdata.extract,
type=self.stepdata.type,
start_offset = self.stepdata.start_offset or 0)
def close(self):
self.stream.close()
class FileStepRunner(StepRunner):
"""A step runner for the <file> step."""
model_type = model.FileSource
def prepare(self, fetcher, blockers):
"""@type fetcher: L{Fetcher}
@type blockers: [L{zeroinstall.support.tasks.Blocker}]"""
self.blocker, self.stream = fetcher.download_file(self.stepdata,
impl_hint = self.impl_hint)
assert self.stream
if self.blocker:
blockers.append(self.blocker)
def apply(self, basedir):
"""@type basedir: str"""
import shutil
assert self.blocker is None or self.blocker.happened
dest = native_path_within_base(basedir, self.stepdata.dest)
_ensure_dir_exists(os.path.dirname(dest))
self.stream.seek(0)
with open(dest, 'wb') as output:
shutil.copyfileobj(self.stream, output)
os.utime(dest, (0, 0))
def close(self):
self.stream.close()
def native_path_within_base(base, crossplatform_path):
"""Takes a cross-platform relative path (i.e using forward slashes, even on windows)
and returns the absolute, platform-native version of the path.
If the path does not resolve to a location within `base`, a SafeError is raised.
@type base: str
@type crossplatform_path: str
@rtype: str
@since: 1.10"""
assert os.path.isabs(base)
if crossplatform_path.startswith("/"):
raise SafeException("path %r is not within the base directory" % (crossplatform_path,))
native_path = os.path.join(*crossplatform_path.split("/"))
fullpath = os.path.realpath(os.path.join(base, native_path))
base = os.path.realpath(base)
if not fullpath.startswith(base + os.path.sep):
raise SafeException("path %r is not within the base directory" % (crossplatform_path,))
return fullpath
def _ensure_dir_exists(dest):
"""@type dest: str"""
if not os.path.isdir(dest):
os.makedirs(dest)
|
linuxmidhun/0install
|
zeroinstall/injector/fetch.py
|
Python
|
lgpl-2.1
| 31,332
|
[
"VisIt"
] |
a4a6dc050d92cc8630a84a9bd782d3a23008c364864cb321f3f4d66d9af0860d
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 27 15:59:28 2015
@author: dominic
"""
###############################################################################
# Blast Functions #############################################################
###############################################################################
def NucleotideBlast(query, subject, output):
from os import system
"""Using a query from stdin blasts against a subject"""
system("blastn -query " + query + " -subject " + subject +
" -outfmt 5 >> " + output) #Outfmt 5 specifices xml output
return
def BestHitblastn(query, subject, output):
from os import system
"""This function simply runs blast.
I prefer the syntax of this over NCBI's python module"""
system("blastn -query " + str(query) + " -subject " + str(subject) + \
" -outfmt 6 -max_target_seqs 1 > " + str(output)) #Outfmt 6 specifices tsv output
return 0
def AllGenomeBlast(query, GenomeDir, output):
from os import listdir
"""Uses the previous blast function to blast query against all
subject genomes in a directory"""
for genome in listdir(GenomeDir):
NucleotideBlast(query, GenomeDir +'/'+ genome, output)
print "Blast " +query+ " vs " +genome+ " successfull"
return 0
###############################################################################
# Accessing output functions ##################################################
###############################################################################
def GetBlastRef(inblast):
"""Produces an array from a blast result tsv file"""
from numpy import array
from csv import reader
ref = array([line for line in reader(open(inblast), delimiter='\t')])
return ref
def GetBlast(blastfile):
from Bio.Blast import NCBIXML as Nxml
"""In XML format sbjct details (name) are contained in alignment objects
hit details (sequence, %ID, hitlength etc) are contained in HSP objects
HSP objects are a sub object of alignment objects (alignment[0].hsps)
If there are multiple hits for a single sbjct (contig) then multiple HSP
objects will be stored in the same alignment object.
It is therefore neccesarry to extract all HSP objects into a list which
is indexed by another list containing the sbjct names. This also deals with
no hit objects recorded in blast XML format.
"""
hitindex = []
hitdata = []
for ii in Nxml.parse(open(blastfile)): # Blast record iterator
if ii.alignments != []: # Empty lists show no hits and are ignored - This is the sbjct detail level
for jj in ii.alignments[0].hsps: # Goes a level deeper - This is the hit detail level
hitindex.append(ii.alignments[0].hit_def)
hitdata.append(jj)
return hitindex, hitdata
###############################################################################
# Blast hit extraction and SNP identification #################################
##############################################################################
#
def GetInContigPosition(sbjct_start, sbjct_end, contigcounter):
if sbjct_start < sbjct_end:
output = str(int(sbjct_start) + int(contigcounter))
else:
output = str(int(sbjct_start) - int(contigcounter))
return output
def ParaOrOrtho(blastfile, singlehitdata, referenceGenome, tmpseqfile, tmpblastfile):
import os
tmpseqfi = open(tmpseqfile, 'w')
tmpseqfi.write('>tmpfasta\n' + singlehitdata.sbjct)
tmpseqfi.close()
BestHitblastn(tmpseqfile, referenceGenome, tmpblastfile)
os.system('rm ' + tmpseqfile)
bestblasthit = GetBlastRef(tmpblastfile)
os.system('rm ' + tmpblastfile)
return os.path.basename(blastfile) == bestblasthit[0][1]
def snpsNseqs(singlehitdata, singlehitindex, alignment_end, blastfile, referenceGenome, tmpseqfile, tmpblastfile):
"""In order to produce good sequences for alignments the sequences must be gapped
to include gaps at the beginning of the sequence"""
if ParaOrOrtho(blastfile, singlehitdata, referenceGenome, tmpseqfile, tmpblastfile):
cntgbpcounter = 1
algnbpcounter = singlehitdata.query_start
SNPList = []
OutSeq = []
for ii in '-'*(singlehitdata.query_start - 1): # How many gaps to add to the start of the sequence
OutSeq.append(ii)
for ii,jj in zip(singlehitdata.query, singlehitdata.sbjct):
if ii == jj: # Nothing interesting
OutSeq.append(jj)
algnbpcounter += 1
cntgbpcounter += 1
elif ii == '-': # If gap in query (insertion)
SNPList.append([singlehitindex, ii, jj, algnbpcounter, GetInContigPosition(singlehitdata.sbjct_start, singlehitdata.sbjct_end, cntgbpcounter) ,singlehitdata.query_start, singlehitdata.query_end, singlehitdata.sbjct_start, singlehitdata.sbjct_end]) #Position of insertion in alignment(doesn't appear in alignment) and contig(for coverage lookup)
cntgbpcounter += 1 # Does not add to alignment counter as doesn't appear in alignment
elif jj == '-': # if gap in sbjct (deletion)
SNPList.append([singlehitindex, ii, jj, algnbpcounter, GetInContigPosition(singlehitdata.sbjct_start, singlehitdata.sbjct_end, cntgbpcounter), singlehitdata.query_start, singlehitdata.query_end, singlehitdata.sbjct_start, singlehitdata.sbjct_end])
OutSeq.append(jj) # gap added to alignment
algnbpcounter += 1 # Add position of gap in alignment, contig not added because gap is not present in contig
else: # Last possibility, simple SNP
SNPList.append([singlehitindex, ii, jj, algnbpcounter, GetInContigPosition(singlehitdata.sbjct_start, singlehitdata.sbjct_end, cntgbpcounter), singlehitdata.query_start, singlehitdata.query_end, singlehitdata.sbjct_start, singlehitdata.sbjct_end])
OutSeq.append(jj)
algnbpcounter += 1
cntgbpcounter += 1
if SNPList == []: # Adds entry to contig if no SNPs are found, this allows it to still be used to index with later
SNPList.append([singlehitindex,0,0,0,0,singlehitdata.query_start, singlehitdata.query_end,singlehitdata.sbjct_start,singlehitdata.sbjct_end])
for ii in '-' * (int(alignment_end) - singlehitdata.query_end):
OutSeq.append(ii)
return OutSeq, SNPList
return 'Paralog', 'Paralog'
def snpsNseqs4wholeblast(blastfile, referenceGenome, tmpseqfile, tmpblastfile):
""" Does snpsNseqs for every hit in a blast file"""
from numpy import array
import os
hitcounter = 1
hitindex, hitdata = GetBlast(blastfile)
fullSeq = []
fullSNP = []
for ii,jj in zip(hitdata, hitindex):
tmpSeq, tmpSNP = snpsNseqs(ii, jj, os.path.basename(blastfile).split('_')[-1], blastfile, referenceGenome, tmpseqfile, tmpblastfile)
if tmpSeq != 'Paralog':
print "hit " + str(hitcounter) + ": Ortholog"
fullSeq.append(tmpSeq)
fullSNP.append(tmpSNP)
hitcounter += 1
else:
print "hit " + str(hitcounter) + ": Paralog"
hitcounter += 1
return array(fullSeq), fullSNP
def writefasta(seqarray, seqindex, outfile):
handle = open(outfile, 'w')
for ii, jj in zip(seqarray, seqindex):
handle.write('>' + jj[0][0] + '\n' + ''.join(ii) + '\n')
handle.close()
def SNPfilter(SNPlist, seqarray, outfile):
from numpy import array, nonzero, unique
from csv import writer
GenomeIndex = array([ii[0][0].split('_')[0] for ii in SNPlist])
goodSNP = []
for SNP in SNPlist:
for jj in SNP:
if jj[1] != 'N' and jj[2] != 'N':
lentesttmp = array([(SNPlist[i][0][0].split('_')[0], SNPlist[i][0][0]) for i in nonzero(seqarray[:,int(jj[3]) - 1] != '-')[0]])
if len(unique(lentesttmp[:,0])) == len(unique(GenomeIndex)):
tmp = seqarray[nonzero(GenomeIndex == jj[0].split('_')[0])][:,int(jj[3]) - 1]
tmp = tmp[tmp!='-']
if len(unique(tmp)) == 1:
goodSNP.append(jj)
f = open(outfile, 'w')
outSNP = []
for ii in goodSNP:
if ii[1] != 0:
writer(f).writerow(ii)
outSNP.append(ii)
f.close()
return outSNP
def ExtractHitMain(blastfile, reference, outdir, tmpseqfile, tmpblastfile):
import os
seqarray, seqindex = snpsNseqs4wholeblast(blastfile, outdir + '/AllGenes.fasta', tmpseqfile, tmpblastfile)
print 'blast file ' +os.path.basename(blastfile)+ ' parsed'
writefasta(seqarray, seqindex, outdir + '/Alignments/' + os.path.basename(blastfile) + '.fasta')
print 'fasta ' +os.path.basename(blastfile)+ '.fasta written'
goodSNPs = SNPfilter(seqindex, seqarray, outdir + '/SNPs/' + os.path.basename(blastfile) + '.csv')
print str(len(goodSNPs)) + ' SNPs identified'
return goodSNPs
###############################################################################
# Old Versions ################################################################
###############################################################################
##
#
#
#def GenMultifasta(blastfile, outdir):
# import os
# os.system('mkdir ' + outdir + '/Alignments')
# os.system('mkdir ' + outdir + '/SNPs')
# import os
# index,blast = GetBlast(blastfile)
# strt,end = os.path.basename(blastfile).split('_')[-2:]
# if len(index) < 10:
# for hitindex,contighits in zip(index,blast):
# for hit in contighits:
# aligncounter = 1
# contigcounter = 1
# outseq = ''
# for querybase,sbjctbase in zip(hit.query,hit.sbjct):
#
#
# if querybase != sbjctbase and querybase != '-' and sbjctbase != '-': # If there is a difference which is not an indel
# #Write Out SNPs genomecontig, querybase, sbjctbase, in contig base position
# open(outdir + '/SNPs/' + os.path.basename(blastfile), 'a').write(hitindex[0].hit_def + ',' + querybase + ',' + sbjctbase +',' + str(aligncounter + hit.query_start-1)+','+ GetInContigPosition(hit.sbjct_start, hit.sbjct_end, contigcounter)+'\n')
# outseq += sbjctbase
# aligncounter += 1
# contigcounter += 1
#
#
#
#
# elif querybase != sbjctbase and sbjctbase == '-': # If there is a difference which is a deletion
# open(outdir + '/SNPs/' + os.path.basename(blastfile), 'a').write(hitindex[0].hit_def + ',' + querybase + ',' + sbjctbase +',' + str(aligncounter + hit.query_start-1)+','+GetInContigPosition(hit.sbjct_start, hit.sbjct_end, contigcounter)+'\n')
# outseq += sbjctbase
# aligncounter += 1
#
#
#
# elif querybase != sbjctbase and querybase == '-': # If there is a difference which is an insertion
# open(outdir + '/SNPs/' + os.path.basename(blastfile), 'a').write(hitindex[0].hit_def + ',' + querybase + ',' + sbjctbase +',' + str(aligncounter + hit.query_start-1)+','+GetInContigPosition(hit.sbjct_start, hit.sbjct_end, contigcounter)+'\n')
# contigcounter += 1
#
#
# elif querybase == sbjctbase:
# outseq += sbjctbase
# aligncounter += 1#
# contigcounter += 1
#
# else:
# print 'HUH?'
#
# open(outdir + '/Alignments/' + os.path.basename(blastfile), 'a').write('>' + hitindex[0].hit_def + '\n' + '-'*(hit.query_start-1) + outseq + ((int(end)-int(strt)+1)-hit.query_end)*'-' +'\n')#
#
#
###############################################################################
# Utility Functions ###########################################################
###############################################################################
def GetFasta(infasta):
from Bio import SeqIO
"""Opens a fasta file and returns as object"""
indat = SeqIO.parse(open(infasta), 'fasta')
seqdat = [line for line in indat]
if len(seqdat) == 1:
seqdat = seqdat[0]
return seqdat
def traverse(o, tree_types=(list, tuple)):
if isinstance(o, tree_types):
for value in o:
for subvalue in traverse(value, tree_types):
yield subvalue
else:
yield o
def SetFastaHeader(infasta, outdir):
from Utility import GetFasta
from Bio import SeqIO
import re
import os
"""Changes all headers in fasta to include
the file name"""
counter = 1 #Contig Counter used for ID
fastdat = GetFasta(infasta)
for sequence in fastdat:
# os.path.basename used to take filename from user input
# Genome Name taken from file name using re
# re returns 'genomename.' indexing then used to remove the last character (.)
sequence.id = re.match(r'.+\.', os.path.basename(infasta)).group()[:-1] + '_' + 'contig_' + str(counter)
sequence.description = sequence.id
counter += 1
outfile = open(outdir + '/' + os.path.basename(infasta), 'w')
SeqIO.write(fastdat, outfile, 'fasta')
outfile.close()
###############################################################################
# Split fasta #################################################################
###############################################################################
def splitgenome(Genome, window, outdir):
from Bio import SeqIO
outsplit = []
for contig in GetFasta(Genome):
counter = 0
while len(contig) - counter > 1.5*window: # Prevents the last window being smaller than half a window
tmp = contig[counter:counter+window]
tmp.id = contig.id + '_' + str(counter+1) + '_' + str(counter + window)
tmp.description = tmp.id
outfile = open(outdir + tmp.id, 'w')
SeqIO.write(tmp, outfile, 'fasta')
outfile.close()
counter += window
outsplit.append(tmp)
tmp = contig[counter:len(contig)]
tmp.id = contig.id + '_' + str(counter+1) + '_' + str(len(contig))
tmp.description = tmp.id
outfile = open(outdir + tmp.id, 'w') # Change dir to user input
SeqIO.write(tmp, outfile, 'fasta')
outfile.close()
outsplit.append(tmp)
return outsplit
###############################################################################
# Coverage Functions ##########################################################
###############################################################################
def extractcov(Genome, Contig, Pos, tmpfile, extrapos=10):
"""Fast method of getting coverage areas of interest in a file, faster than iterating
over whole coverage map in python"""
from csv import reader
import os
for ii in range(-int(int(extrapos)/2), int(int(extrapos)/2 + 1)):
# os.system("awk 'lines>0 {print; --lines} /contig_" + Contig + "\t" + str(int(Pos)-extrapos/2) + "\t/ {lines=" + str(extrapos/2) + "}' " + Genome)
os.system('cat ' + Genome + ' | grep -P "contig_' + Contig + '\t' + str(int(Pos)+ii) +'\t" >> ' + tmpfile)
CntgCov = [line for line in reader(open(tmpfile), delimiter='\t')]
print len(CntgCov)
os.system('rm ' + tmpfile)
return CntgCov
def GetCov(SNPdat, CovDir, outfile, tmpfile, extrapos=10, mincov=1):
import re
import os
from csv import writer, reader
CovList = []
for SNP in [line for line in reader(open(SNPdat))]:
print SNP
genome = [x for x in os.listdir(CovDir) if re.search(SNP[0].split('_')[0], x)]
fulltmp = extractcov(CovDir + '/' + genome[0], SNP[0].split('_')[-1], SNP[4], tmpfile, extrapos)
extrapostmp = []
for ii in fulltmp:
if int(ii[1]) != int(SNP[4]):
extrapostmp.extend([ii[4],ii[5]])
else:
if int(ii[3]) >= mincov:
actualpostmp = ii
else:
break
try:
actualpostmp.extend(extrapostmp)
CovList.append(actualpostmp)
except NameError:
print "SNP coverage too low or zero"
pass
output = open(outfile, 'w')
for line in CovList:
if line != []:
writer(output).writerow(line)
output.close()
return CovList
###############################################################################
# Main Function ###############################################################
###############################################################################
def SNPfinder(Gene, GenomeDir, FullReference, OutDir, tmpseqfile, tmpblastfile, CovDir, extrapos, mincov=1):
import os
import time
print "Beginning SNPfinder for gene: " + Gene
print "Blastn " + Gene + " vs subject genomes"
strttime = time.time()
AllGenomeBlast(Gene, GenomeDir, OutDir + '/' + os.path.basename(Gene))
print "Blastn finished, time taken: " + str(time.time()-strttime)
print "Starting hit extraction and SNP identification"
midtime = time.time()
ExtractHitMain(OutDir + '/' + os.path.basename(Gene), FullReference, OutDir, tmpseqfile, tmpblastfile)
print "Hits extracted, time taken: " + str(time.time() - midtime)
os.system('rm ' + OutDir + '/' + os.path.basename(Gene))
print "Starting SNP coverage extraction"
midtime = time.time()
GetCov(OutDir + '/SNPs/' + os.path.basename(Gene) + '.csv', CovDir, OutDir + '/SNPCov/' + os.path.basename(Gene) + '.csv' , OutDir + '/' + os.path.basename(Gene), extrapos, mincov)
print "SNP coverage extracted, time taken " + str(time.time() - midtime)
print "Total time taken: " + str(time.time() - strttime)
if __name__ == '__main__':
import sys
import os
from numpy import array, nonzero
import multiprocessing as mp
sysarray = array(sys.argv)
# Determining positions of arguments
OutDir = sys.argv[nonzero(sysarray == '-o')[0] + 1]
CovDir = sys.argv[nonzero(sysarray == '-c')[0] + 1]
# Optional input of -split
if len(sysarray[sysarray == '-split']) > 0:
try:
window = int(sysarray[nonzero(sysarray == '-split')[0] + 1][0])
except IndexError:
"Incorrect split size provided, please input -split ####"
else:
window = 1000000
if len(sysarray[sysarray == '-cpus']) > 0:
try:
cpus = int(sysarray[nonzero(sysarray == '-cpus')[0] +1][0])
except IndexError:
"Incorrect cpus provided, please input -cpus ####"
else:
cpus = 1
# Optional input of -cpus
if len(sysarray[sysarray == '-minc']) > 0:
try:
mincov = int(sysarray[nonzero(sysarray == '-minc')[0] + 1][0])
except IndexError:
"Incorrect split size provided, please input -split ####"
else:
mincov = 1
try:
RefGenome = sys.argv[nonzero(sysarray == '-r')[0] + 1]
except IndexError:
print "Please input complete reference genome fasta with: -r RefFasta"
quit()
except TypeError:
print "Please input complete reference genome fasta with: -r RefFasta"
quit()
# Optional input of additional position to get coverage
if len(sysarray[sysarray == '-xpos']) > 0:
try:
extrapos = int(sysarray[nonzero(sysarray == '-xpos')[0] +1][0])
except IndexError:
"Incorrect extra positions provide, plase input -xpos ####"
else:
extrapos = 10
#print nonzero(sysarray == '-cpus')[0]
#print type(sys.argv[nonzero(sysarray == '-cpus')[0] + 1])
# If -cpus is supplied
#if nonzero(sysarray == '-cpus')[0] != []: # If -cpus is supplied
# print nonzero(sysarray == '-cpus')[0]
#print sys.argv[nonzero(sysarray == '-cpus')[0] + 1] == int
#try:
# cpus = sys.argv[nonzero(sysarray == 'cpus')[0][0] + 1]
# print cpus
#except IndexError:
# print "Incorrect cpus provided, please supply number: -cpus ###"
#else:
# print "Incorrect cpus provided, please supply number: -cpus ###"
GenomeDir = sys.argv[nonzero(sysarray == '-g')[0] + 1]
###############################################################################
# Making output directories ###################################################
###############################################################################
try:
os.makedirs(OutDir + '/SplitGenome')
except OSError:
pass
try:
os.mkdir(OutDir + '/Assemblies')
except OSError:
pass
try:
os.mkdir(OutDir + '/Alignments')
except OSError:
pass
try:
os.mkdir(OutDir + '/SNPs')
except OSError:
pass
try:
os.mkdir(OutDir + '/SNPCov')
except OSError:
pass
###############################################################################
#
###############################################################################
# running split genome and mv fasta header
try:
splitgenome(RefGenome, window, OutDir +'/SplitGenome/')
except IOError:
print 'Incorrect Reference Sequence'
print "Please input complete reference genome fasta with: -r RefFasta"
quit()
os.system('cat ' + OutDir + '/SplitGenome/* >> ' + OutDir + '/AllGenes.fasta')
# running chngHeader on assemblies, ensures inline with assemblies used for
# coverage mapping. If original assemblies are used will produce
# Identical assemblies from coverage mapping
# If coverage mapped assemblies are used simply produces the same thing again
for ii in os.listdir(GenomeDir):
SetFastaHeader(GenomeDir + ii, OutDir + '/Assemblies/')
#threading SNPfinder proper
geneins = os.listdir(OutDir + '/SplitGenome')
while len(geneins) > 0:
#print cpus
#print range(cpus)
processes = [mp.Process(target=SNPfinder, args=(OutDir + '/SplitGenome/' + str(geneins[x]), GenomeDir, RefGenome, OutDir, OutDir + '/tmpseqfile' + str(x), OutDir + '/tmpblastfile' + str(x), CovDir, extrapos, mincov)) for x in range(cpus)]
#print processes
#print len(processes)
geneins = geneins[2:]
for p in processes:
p.start()
for p in processes:
p.join()
|
DJBAR/SNPfinder
|
SNPfinder.py
|
Python
|
gpl-2.0
| 23,524
|
[
"BLAST"
] |
64c0bffcebc786617ce26c283e2ab14168f235262663cda0009dc5ef5012e852
|
#!/usr/bin/env python
import os
import sys
import subprocess
from setuptools import setup, find_packages
subprocess.call(
('mkdir -p seqmagick/data && '
'git describe --tags --dirty > seqmagick/data/ver.tmp '
'&& mv seqmagick/data/ver.tmp seqmagick/data/ver '
'|| rm -f seqmagick/data/ver.tmp'),
shell=True, stderr=open(os.devnull, "w"))
# must import __version__ after call to 'git describe' above
from seqmagick import __version__
setup(name='seqmagick',
version=__version__,
description='Tools for converting and modifying sequence files '
'from the command-line',
url='http://github.com/fhcrc/seqmagick',
download_url='http://pypi.python.org/pypi/seqmagick',
author='Matsen Group',
# author_email='http://matsen.fhcrc.org/',
packages=find_packages(),
entry_points={
'console_scripts': [
'seqmagick = seqmagick.scripts.cli:main'
]},
package_data={
'seqmagick': ['data/*'],
'seqmagick.test.integration': ['data/*']
},
setup_requires=['nose>=1.0'],
python_requires='>=3.5',
test_suite='nose.collector',
install_requires=['biopython>=1.78', 'pygtrie>=2.1'],
classifiers=[
'License :: OSI Approved :: GNU General Public License (GPL)',
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
license="GPL V3")
|
fhcrc/seqmagick
|
setup.py
|
Python
|
gpl-3.0
| 1,667
|
[
"Biopython"
] |
d84de71120117ab97aef7a6562dbe9c55fc0a529bbca84db7430de2e8dc036bc
|
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## This is a sample controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
## - call exposes all registered services (none by default)
#########################################################################
import os
import socket
import urllib2
from gluon import *
import sys
biopython_lib = request.folder + "modules/"
sys.path.append(biopython_lib)
from Bio import Phylo
from Bio.Phylo import * #import biopython modules as per requirement
def index():
rootFolder = current.request.folder
folders = os.listdir(rootFolder+'static/sample_data/')
# Go through the subfolders within sample_data, only keep folders
# that match 'demo'
folders = [x.split('_',1)[1] for x in folders if x.split('_')[0] == 'demo']
folderLabels = [x.replace('_', ' ') for x in folders]
# Return a list of tuples, like [ ['Mammal_Hemoglobin', 'Mammal Hemoglobin'] , ... ]
# This lets us have nice display names in the <select> element in the view.
foldersAndLabels = zip(folders, folderLabels)
return dict(foldersAndLabels=foldersAndLabels)
def getFileName():
# Returns the absolute filename for the "input" gene tree
# file. Uses the treeFileName request parameter, as that's our
# main state-tracking value passed between the view and controller
# between each step.
rootFolder = current.request.folder
absoluteFileName = rootFolder+current.request.vars.treeFileName
return absoluteFileName
def getRelativeWebPath(suffix):
# Currently a no-op.
return suffix
def visualize():
# Visualize controller. Does some preprocessing before generating
# the HTML for viewing a given tree.
# Takes in the 'treeName' and 'file' params, and creates the URL
# which can be used to access / download the given tree file. This
# is only really necessary because the <applet> tag requires a
# parameter, which is then sent to Archaeopteryx, that requires
# the *entire* URL of the tree to load. With a non-applet based
# visualization this wouldn't be an issue.
treeFile = current.request.vars.treeName
suffix = current.request.vars.file
# Use the request http_host.
hostname = current.request.env.http_host
# Toggle to phylotastic URL if we detect a NESCENT socket host.
socket_hostname = socket.gethostname()
if 'phylotastic' in socket_hostname.lower():
hostname = 'phylotastic.nescent.org'
treeUrl = URL('static', 'sample_data/demo_'+treeFile+'/input_genetree.nwk'+suffix,
scheme='http', host=hostname)
return dict(treeUrl=treeUrl,
header=current.request.vars.header)
def getSpeciesList():
# Absolute file name of the input tree file.
absoluteFileName = getFileName()
# "Prefix", i.e. the input tree file without the ending
# '.txt'. This is used as the prefix for all generated output
# files
filePrefix = absoluteFileName[:-4]
shellCall = 'java -Xmx1024m -cp '
shellCall += current.request.folder+'static/lib/forester.jar '
shellCall += 'org.forester.application.gene_tree_preprocess '
shellCall += absoluteFileName
os.system(shellCall)
removedNodes = [l.strip() for l in open(filePrefix+'_removed_nodes.txt').readlines()]
keptNodes = [l.strip() for l in open(filePrefix+'_species_present.txt').readlines()]
geneTreeFile = getRelativeWebPath('_preprocessed_gene_tree.phylo.xml')
# Note: we always return the 'vizFile' and 'vizLabel' data values
# from each Ajax call, as they're used consistently by the
# front-end to generate visualization links.
return response.json( dict(vizFile = geneTreeFile,
vizLabel = "Input Gene Tree",
removedNodes = removedNodes,
keptNodes = keptNodes))
def getPhylotasticTree():
absoluteFileName = getFileName()
filePrefix = absoluteFileName[:-4]
# Load the kept nodes and create the comma-delimited species
# string for sending to PTastic
speciesList = [l.strip() for l in open(filePrefix+'_species_present.txt').readlines()]
# Need underscores instead of spaces
speciesList = [x.replace(' ', '_') for x in speciesList]
speciesString = ','.join(speciesList)
phylotasticUrlBase = 'http://phylotastic-wg.nescent.org/script/phylotastic.cgi?species='
speciesTreeUrl = phylotasticUrlBase+speciesString+'&tree=mammals&format=newick'
conn = urllib2.urlopen(speciesTreeUrl)
speciesTreeString = conn.read()
speciesTreeString = speciesTreeString.strip()
speciesTreeFilename = filePrefix+'_species_tree.txt'
open(speciesTreeFilename,'w').write(speciesTreeString)
#setting a counter for counting nodes i.e.number of species in the species newick tree.
#I have referenced the link http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc182 to understand the count_terminals() function
got_nodes=0 #counter to keep the count of the nodes
tree = Phylo.read(speciesTreeFilename, 'newick') #tree reads the species labels in newick format
got_nodes=BaseTree.TreeMixin.count_terminals(tree);
#no. of non-terminal nodes received in got_nodes.Now can be printed or checked with user input value to test if all species received.
speciesTreeWebFile = getRelativeWebPath('_species_tree.txt')
return response.json( dict(vizFile = speciesTreeWebFile,
vizLabel = "Phylotastic Species Tree",
got_nodes=got_nodes
) )
def reconcileTrees():
absoluteFileName = getFileName()
filePrefix = absoluteFileName[:-4]
geneTreeFile = filePrefix+'_preprocessed_gene_tree.phylo.xml'
speciesTreeFile = filePrefix+'_species_tree.txt'
shellCall = 'java -Xmx1024m -cp '
shellCall += current.request.folder+'static/lib/forester.jar '
shellCall += 'org.forester.application.gsdi '
shellCall += '-m -q '
shellCall += geneTreeFile
shellCall += ' '+speciesTreeFile
os.system(shellCall)
reconciledTreeWebFile = getRelativeWebPath('_preprocessed_gene_tree.phylo_gsdi_out.phylo.xml')
return response.json( dict(vizFile = reconciledTreeWebFile,
vizLabel = "Reconciled Tree (red=duplication, green=speciation)"
) )
|
prateekgupta3991/justforlearn
|
controllers/reconciliotastic.py
|
Python
|
mit
| 6,674
|
[
"Biopython"
] |
badcaa102681ac436d415eec39374775a17c0f6b700a5df63b3bd7855750dfaa
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os, csv
import pandas
class CombineCSV(object):
"""
Combine series of csv outputs in separate files for each time step into a single file.
The class may be run from the command line. Command line help is available
with '-h'.
Args:
basename[str]: Basename of csv time series.
outfilename[str]: Output file path/name.
y_varname[str]: "y" variable name.
Kwargs:
delimiter[str]: Separating character(s) between values, usually ",".
write_header[bool]: True writes header to ouput file.
x_variable[str]: "x" variable name.
last[int]: Take only the final "last" number of steps.
start[int]: Start at step number "start".
end[int]: End at step number "end".
timefile[bool]: True takes time from "*time.csv" file.
bilinear[bool]: True formats output as bilinear file
"""
def __init__(self, basename, outfilename, y_varname, **kwargs):
"""
Set internal variables and initialize with data.
"""
# Set object characteristics
self.__ended = False
self.__basename = basename
self.__outfilename = outfilename
self.__y_varname = y_varname
self.__delimiter = kwargs.pop('delimiter', ',')
self.__write_header = kwargs.pop('write_header', False)
self.__x_varname = kwargs.pop('x_varname', None)
self.__lastn = kwargs.pop('lastn', None)
self.__startt = kwargs.pop('startt', 0)
self.__endt = kwargs.pop('endt', None)
self.__timefile = kwargs.pop('timefile', False)
self.__bilinear = kwargs.pop('bilinear', False)
# Start work
csvfile_names = []
csvfiles = []
csvdictreaders = []
times = []
time_idx = 0
while(True):
file_name = self.__basename + "{0:04d}".format(time_idx) + '.csv'
time_idx += 1
if not os.path.isfile(file_name):
break
csvfile_names.append(file_name)
if len(csvfile_names) == 0:
raise CombineCSV.CombineError("BasenameError",
"Could not find any input files with basename: {0}".format(
self.__basename))
# Determine start and stop
if self.__lastn != None:
if self.__startt != 0 or self.__endt != None:
raise CombineCSV.CombineError("StepBoundsError",
"Cannot specify --last together with --start or --end")
self.__startt = len(csvfile_names) - self.__lastn
if self.__endt == None:
self.__endt = len(csvfile_names) - 1
if self.__timefile:
df_time = pandas.read_csv(self.__basename+'time.csv')
for i, file_name in enumerate(csvfile_names):
if i >= self.__startt and i <= self.__endt:
csvfiles.append(open(file_name))
csvdictreaders.append(csv.DictReader(csvfiles[-1]))
if self.__timefile:
# Swap timestep for time
times.append(str(df_time.iloc[i,0]))
else:
times.append(str(i))
fieldnames = []
if self.__x_varname != None:
fieldnames += [self.__x_varname]
fieldnames += times
outfile = open(self.__outfilename, 'w')
csvwriter = csv.DictWriter(outfile, delimiter=self.__delimiter,
lineterminator='\n', fieldnames=fieldnames)
if self.__write_header:
csvwriter.writeheader()
keep_reading = True
while (keep_reading):
for icsv, csvdictreader in enumerate(csvdictreaders):
try:
curr_line_data = csvdictreader.__next__()
except StopIteration:
keep_reading = False
break
if icsv == 0:
line_data = {}
if self.__x_varname != None:
try:
cur_xvar = curr_line_data[self.__x_varname]
except KeyError as kerr:
for csvfile in csvfiles:
csvfile.close()
outfile.close()
raise CombineCSV.CombineError("XVariableError",
"Cannot find '" + self.__x_varname +
"' field in file: " +
csvfile_names[icsv]) from kerr
if icsv == 0:
line_data[self.__x_varname] = cur_xvar
else:
if cur_xvar != line_data[self.__x_varname]:
for csvfile in csvfiles:
csvfile.close()
outfile.close()
raise CombineCSV.CombineError("InconsistentError",
"Inconsistent value for '" +
self.__x_varname + "' field in file: " +
csvfile_names[icsv] + "\ncur: " +
cur_xvar + " orig: " +
line_data[self.__x_varname])
try:
cur_data = curr_line_data[self.__y_varname]
except KeyError as kerr:
for csvfile in csvfiles:
csvfile.close()
outfile.close()
raise CombineCSV.CombineError("YVariableError",
"Cannot find '" + self.__y_varname +
"' field in file: " + csvfile_names[i]) from kerr
line_data[times[icsv]] = cur_data
if keep_reading:
csvwriter.writerow(line_data)
for csvfile in csvfiles:
csvfile.close()
outfile.close()
if self.__bilinear:
df_csv = pandas.read_csv(self.__outfilename, index_col=0)
df_tran = df_csv.T
all_col = df_tran.columns.map(str)
with open(self.__outfilename, 'w') as f:
f.write(','.join(all_col.values.tolist()) + '\n')
df_tran.to_csv(f, header=False)
# Final
self._final_df = pandas.read_csv(self.__outfilename)
self._ended = True
class CombineError(Exception):
"""
Class to handle all generated errors by combine_csv.
Args:
name[str]: A short identifier key of error.
msg[str]: A detailed error message for output.
"""
def __init__(self, name, msg):
"""
Error characteristics initialized.
"""
self._name = name
self._msg = msg
# This file is executable and allows for running combine_csv via command line.
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="Combine series of csv outputs in separate files for each time step into a single file")
parser.add_argument("basename", type=str, help="Basename of csv file time series")
parser.add_argument("-o", "--output", type=str, help="output file", required=True)
parser.add_argument("-d", "--delimiter", type=str, default=',', help="delimiter for output file")
parser.add_argument("-w", "--write_header", action="store_true", help="write header in output file")
parser.add_argument("-x", "--x_variable", type=str, help="x variable name")
parser.add_argument("-y", "--y_variable", type=str, help="y variable name", required=True)
parser.add_argument("-l", "--last", type=int, help="take last n steps")
parser.add_argument("-s", "--start", type=int, help="start at step", default=0)
parser.add_argument("-e", "--end", type=int, help="end at step")
parser.add_argument("-t", "--timefile", action="store_true", help="time will be taken from '*time.csv' file")
parser.add_argument("-b", "--bilinear", action="store_true", help="create piecwise bilinear file, usually requires -t and -w")
args=parser.parse_args()
run_program = CombineCSV(args.basename, args.output, args.y_variable,
delimiter=args.delimiter, write_header=args.write_header,
x_varname=args.x_variable, lastn=args.last, startt=args.start,
endt=args.end, timefile=args.timefile, bilinear=args.bilinear)
|
nuclear-wizard/moose
|
python/postprocessing/combine_csv.py
|
Python
|
lgpl-2.1
| 8,745
|
[
"MOOSE"
] |
a07f11474a5b6d8ef1b5197627b29ab576f38eafff657cfe799fbb60889fbba0
|
# sybase/base.py
# Copyright (C) 2010-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
# get_select_precolumns(), limit_clause() implementation
# copyright (C) 2007 Fisch Asset Management
# AG https://www.fam.ch, with coding by Alexander Houben
# alexander.houben@thor-solutions.ch
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sybase
:name: Sybase
.. note::
The Sybase dialect within SQLAlchemy **is not currently supported**.
It is not tested within continuous integration and is likely to have
many issues and caveats not currently handled. Consider using the
`external dialect <https://github.com/gordthompson/sqlalchemy-sybase>`_
instead.
.. deprecated:: 1.4 The internal Sybase dialect is deprecated and will be
removed in a future version. Use the external dialect.
"""
import re
from sqlalchemy import exc
from sqlalchemy import schema as sa_schema
from sqlalchemy import types as sqltypes
from sqlalchemy import util
from sqlalchemy.engine import default
from sqlalchemy.engine import reflection
from sqlalchemy.sql import compiler
from sqlalchemy.sql import text
from sqlalchemy.types import BIGINT
from sqlalchemy.types import BINARY
from sqlalchemy.types import CHAR
from sqlalchemy.types import DATE
from sqlalchemy.types import DATETIME
from sqlalchemy.types import DECIMAL
from sqlalchemy.types import FLOAT
from sqlalchemy.types import INT # noqa
from sqlalchemy.types import INTEGER
from sqlalchemy.types import NCHAR
from sqlalchemy.types import NUMERIC
from sqlalchemy.types import NVARCHAR
from sqlalchemy.types import REAL
from sqlalchemy.types import SMALLINT
from sqlalchemy.types import TEXT
from sqlalchemy.types import TIME
from sqlalchemy.types import TIMESTAMP
from sqlalchemy.types import Unicode
from sqlalchemy.types import VARBINARY
from sqlalchemy.types import VARCHAR
RESERVED_WORDS = set(
[
"add",
"all",
"alter",
"and",
"any",
"as",
"asc",
"backup",
"begin",
"between",
"bigint",
"binary",
"bit",
"bottom",
"break",
"by",
"call",
"capability",
"cascade",
"case",
"cast",
"char",
"char_convert",
"character",
"check",
"checkpoint",
"close",
"comment",
"commit",
"connect",
"constraint",
"contains",
"continue",
"convert",
"create",
"cross",
"cube",
"current",
"current_timestamp",
"current_user",
"cursor",
"date",
"dbspace",
"deallocate",
"dec",
"decimal",
"declare",
"default",
"delete",
"deleting",
"desc",
"distinct",
"do",
"double",
"drop",
"dynamic",
"else",
"elseif",
"encrypted",
"end",
"endif",
"escape",
"except",
"exception",
"exec",
"execute",
"existing",
"exists",
"externlogin",
"fetch",
"first",
"float",
"for",
"force",
"foreign",
"forward",
"from",
"full",
"goto",
"grant",
"group",
"having",
"holdlock",
"identified",
"if",
"in",
"index",
"index_lparen",
"inner",
"inout",
"insensitive",
"insert",
"inserting",
"install",
"instead",
"int",
"integer",
"integrated",
"intersect",
"into",
"iq",
"is",
"isolation",
"join",
"key",
"lateral",
"left",
"like",
"lock",
"login",
"long",
"match",
"membership",
"message",
"mode",
"modify",
"natural",
"new",
"no",
"noholdlock",
"not",
"notify",
"null",
"numeric",
"of",
"off",
"on",
"open",
"option",
"options",
"or",
"order",
"others",
"out",
"outer",
"over",
"passthrough",
"precision",
"prepare",
"primary",
"print",
"privileges",
"proc",
"procedure",
"publication",
"raiserror",
"readtext",
"real",
"reference",
"references",
"release",
"remote",
"remove",
"rename",
"reorganize",
"resource",
"restore",
"restrict",
"return",
"revoke",
"right",
"rollback",
"rollup",
"save",
"savepoint",
"scroll",
"select",
"sensitive",
"session",
"set",
"setuser",
"share",
"smallint",
"some",
"sqlcode",
"sqlstate",
"start",
"stop",
"subtrans",
"subtransaction",
"synchronize",
"syntax_error",
"table",
"temporary",
"then",
"time",
"timestamp",
"tinyint",
"to",
"top",
"tran",
"trigger",
"truncate",
"tsequal",
"unbounded",
"union",
"unique",
"unknown",
"unsigned",
"update",
"updating",
"user",
"using",
"validate",
"values",
"varbinary",
"varchar",
"variable",
"varying",
"view",
"wait",
"waitfor",
"when",
"where",
"while",
"window",
"with",
"with_cube",
"with_lparen",
"with_rollup",
"within",
"work",
"writetext",
]
)
class _SybaseUnitypeMixin(object):
"""these types appear to return a buffer object."""
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
return str(value) # decode("ucs-2")
else:
return None
return process
class UNICHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = "UNICHAR"
class UNIVARCHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = "UNIVARCHAR"
class UNITEXT(_SybaseUnitypeMixin, sqltypes.UnicodeText):
__visit_name__ = "UNITEXT"
class TINYINT(sqltypes.Integer):
__visit_name__ = "TINYINT"
class BIT(sqltypes.TypeEngine):
__visit_name__ = "BIT"
class MONEY(sqltypes.TypeEngine):
__visit_name__ = "MONEY"
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = "SMALLMONEY"
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = "IMAGE"
class SybaseTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_, **kw):
return self.visit_IMAGE(type_)
def visit_boolean(self, type_, **kw):
return self.visit_BIT(type_)
def visit_unicode(self, type_, **kw):
return self.visit_NVARCHAR(type_)
def visit_UNICHAR(self, type_, **kw):
return "UNICHAR(%d)" % type_.length
def visit_UNIVARCHAR(self, type_, **kw):
return "UNIVARCHAR(%d)" % type_.length
def visit_UNITEXT(self, type_, **kw):
return "UNITEXT"
def visit_TINYINT(self, type_, **kw):
return "TINYINT"
def visit_IMAGE(self, type_, **kw):
return "IMAGE"
def visit_BIT(self, type_, **kw):
return "BIT"
def visit_MONEY(self, type_, **kw):
return "MONEY"
def visit_SMALLMONEY(self, type_, **kw):
return "SMALLMONEY"
def visit_UNIQUEIDENTIFIER(self, type_, **kw):
return "UNIQUEIDENTIFIER"
ischema_names = {
"bigint": BIGINT,
"int": INTEGER,
"integer": INTEGER,
"smallint": SMALLINT,
"tinyint": TINYINT,
"unsigned bigint": BIGINT, # TODO: unsigned flags
"unsigned int": INTEGER, # TODO: unsigned flags
"unsigned smallint": SMALLINT, # TODO: unsigned flags
"numeric": NUMERIC,
"decimal": DECIMAL,
"dec": DECIMAL,
"float": FLOAT,
"double": NUMERIC, # TODO
"double precision": NUMERIC, # TODO
"real": REAL,
"smallmoney": SMALLMONEY,
"money": MONEY,
"smalldatetime": DATETIME,
"datetime": DATETIME,
"date": DATE,
"time": TIME,
"char": CHAR,
"character": CHAR,
"varchar": VARCHAR,
"character varying": VARCHAR,
"char varying": VARCHAR,
"unichar": UNICHAR,
"unicode character": UNIVARCHAR,
"nchar": NCHAR,
"national char": NCHAR,
"national character": NCHAR,
"nvarchar": NVARCHAR,
"nchar varying": NVARCHAR,
"national char varying": NVARCHAR,
"national character varying": NVARCHAR,
"text": TEXT,
"unitext": UNITEXT,
"binary": BINARY,
"varbinary": VARBINARY,
"image": IMAGE,
"bit": BIT,
# not in documentation for ASE 15.7
"long varchar": TEXT, # TODO
"timestamp": TIMESTAMP,
"uniqueidentifier": UNIQUEIDENTIFIER,
}
class SybaseInspector(reflection.Inspector):
def __init__(self, conn):
reflection.Inspector.__init__(self, conn)
def get_table_id(self, table_name, schema=None):
"""Return the table id from `table_name` and `schema`."""
return self.dialect.get_table_id(
self.bind, table_name, schema, info_cache=self.info_cache
)
class SybaseExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
def set_ddl_autocommit(self, connection, value):
"""Must be implemented by subclasses to accommodate DDL executions.
"connection" is the raw unwrapped DBAPI connection. "value"
is True or False. when True, the connection should be configured
such that a DDL can take place subsequently. when False,
a DDL has taken place and the connection should be resumed
into non-autocommit mode.
"""
raise NotImplementedError()
def pre_exec(self):
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
if insert_has_sequence:
self._enable_identity_insert = (
seq_column.key in self.compiled_parameters[0]
)
else:
self._enable_identity_insert = False
if self._enable_identity_insert:
self.cursor.execute(
"SET IDENTITY_INSERT %s ON"
% self.dialect.identifier_preparer.format_table(tbl)
)
if self.isddl:
# TODO: to enhance this, we can detect "ddl in tran" on the
# database settings. this error message should be improved to
# include a note about that.
if not self.should_autocommit:
raise exc.InvalidRequestError(
"The Sybase dialect only supports "
"DDL in 'autocommit' mode at this time."
)
self.root_connection.engine.logger.info(
"AUTOCOMMIT (Assuming no Sybase 'ddl in tran')"
)
self.set_ddl_autocommit(
self.root_connection.connection.connection, True
)
def post_exec(self):
if self.isddl:
self.set_ddl_autocommit(self.root_connection, False)
if self._enable_identity_insert:
self.cursor.execute(
"SET IDENTITY_INSERT %s OFF"
% self.dialect.identifier_preparer.format_table(
self.compiled.statement.table
)
)
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT @@identity AS lastrowid")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class SybaseSQLCompiler(compiler.SQLCompiler):
ansi_bind_rules = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{"doy": "dayofyear", "dow": "weekday", "milliseconds": "millisecond"},
)
def get_from_hint_text(self, table, text):
return text
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += " ROWS LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += " ROWS"
text += " OFFSET " + self.process(select._offset_clause, **kw)
return text
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % (field, self.process(extract.expr, **kw))
def visit_now_func(self, fn, **kw):
return "GETDATE()"
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR"
# which SQLAlchemy doesn't use
return ""
def order_by_clause(self, select, **kw):
kw["literal_binds"] = True
order_by = self.process(select._order_by_clause, **kw)
# SybaseSQL only allows ORDER BY in subqueries if there is a LIMIT
if order_by and (not self.is_subquery() or select._limit):
return " ORDER BY " + order_by
else:
return ""
def delete_table_clause(self, delete_stmt, from_table, extra_froms):
"""If we have extra froms make sure we render any alias as hint."""
ashint = False
if extra_froms:
ashint = True
return from_table._compiler_dispatch(
self, asfrom=True, iscrud=True, ashint=ashint
)
def delete_extra_from_clause(
self, delete_stmt, from_table, extra_froms, from_hints, **kw
):
"""Render the DELETE .. FROM clause specific to Sybase."""
kw["asfrom"] = True
return "FROM " + ", ".join(
t._compiler_dispatch(self, fromhints=from_hints, **kw)
for t in [from_table] + extra_froms
)
class SybaseDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = (
self.preparer.format_column(column)
+ " "
+ self.dialect.type_compiler.process(
column.type, type_expression=column
)
)
if column.table is None:
raise exc.CompileError(
"The Sybase dialect requires Table-bound "
"columns in order to generate DDL"
)
seq_col = column.table._autoincrement_column
# install a IDENTITY Sequence if we have an implicit IDENTITY column
if seq_col is column:
sequence = (
isinstance(column.default, sa_schema.Sequence)
and column.default
)
if sequence:
start, increment = sequence.start or 1, sequence.increment or 1
else:
start, increment = 1, 1
if (start, increment) == (1, 1):
colspec += " IDENTITY"
else:
# TODO: need correct syntax for this
colspec += " IDENTITY(%s,%s)" % (start, increment)
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if column.nullable is not None:
if not column.nullable or column.primary_key:
colspec += " NOT NULL"
else:
colspec += " NULL"
return colspec
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX %s.%s" % (
self.preparer.quote_identifier(index.table.name),
self._prepared_index_name(drop.element, include_schema=False),
)
class SybaseIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
class SybaseDialect(default.DefaultDialect):
name = "sybase"
supports_unicode_statements = False
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
supports_statement_cache = True
supports_native_boolean = False
supports_unicode_binds = False
postfetch_lastrowid = True
colspecs = {}
ischema_names = ischema_names
type_compiler = SybaseTypeCompiler
statement_compiler = SybaseSQLCompiler
ddl_compiler = SybaseDDLCompiler
preparer = SybaseIdentifierPreparer
inspector = SybaseInspector
construct_arguments = []
def __init__(self, *args, **kwargs):
util.warn_deprecated(
"The Sybase dialect is deprecated and will be removed "
"in a future version. This dialect is superseded by the external "
"dialect https://github.com/gordthompson/sqlalchemy-sybase.",
version="1.4",
)
super(SybaseDialect, self).__init__(*args, **kwargs)
def _get_default_schema_name(self, connection):
return connection.scalar(
text("SELECT user_name() as user_name").columns(username=Unicode)
)
def initialize(self, connection):
super(SybaseDialect, self).initialize(connection)
if (
self.server_version_info is not None
and self.server_version_info < (15,)
):
self.max_identifier_length = 30
else:
self.max_identifier_length = 255
def get_table_id(self, connection, table_name, schema=None, **kw):
"""Fetch the id for schema.table_name.
Several reflection methods require the table id. The idea for using
this method is that it can be fetched one time and cached for
subsequent calls.
"""
table_id = None
if schema is None:
schema = self.default_schema_name
TABLEID_SQL = text(
"""
SELECT o.id AS id
FROM sysobjects o JOIN sysusers u ON o.uid=u.uid
WHERE u.name = :schema_name
AND o.name = :table_name
AND o.type in ('U', 'V')
"""
)
if util.py2k:
if isinstance(schema, unicode): # noqa
schema = schema.encode("ascii")
if isinstance(table_name, unicode): # noqa
table_name = table_name.encode("ascii")
result = connection.execute(
TABLEID_SQL, schema_name=schema, table_name=table_name
)
table_id = result.scalar()
if table_id is None:
raise exc.NoSuchTableError(table_name)
return table_id
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
COLUMN_SQL = text(
"""
SELECT col.name AS name,
t.name AS type,
(col.status & 8) AS nullable,
(col.status & 128) AS autoincrement,
com.text AS 'default',
col.prec AS precision,
col.scale AS scale,
col.length AS length
FROM systypes t, syscolumns col LEFT OUTER JOIN syscomments com ON
col.cdefault = com.id
WHERE col.usertype = t.usertype
AND col.id = :table_id
ORDER BY col.colid
"""
)
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = []
for (
name,
type_,
nullable,
autoincrement,
default_,
precision,
scale,
length,
) in results:
col_info = self._get_column_info(
name,
type_,
bool(nullable),
bool(autoincrement),
default_,
precision,
scale,
length,
)
columns.append(col_info)
return columns
def _get_column_info(
self,
name,
type_,
nullable,
autoincrement,
default,
precision,
scale,
length,
):
coltype = self.ischema_names.get(type_, None)
kwargs = {}
if coltype in (NUMERIC, DECIMAL):
args = (precision, scale)
elif coltype == FLOAT:
args = (precision,)
elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR):
args = (length,)
else:
args = ()
if coltype:
coltype = coltype(*args, **kwargs)
# is this necessary
# if is_array:
# coltype = ARRAY(coltype)
else:
util.warn(
"Did not recognize type '%s' of column '%s'" % (type_, name)
)
coltype = sqltypes.NULLTYPE
if default:
default = default.replace("DEFAULT", "").strip()
default = re.sub("^'(.*)'$", lambda m: m.group(1), default)
else:
default = None
column_info = dict(
name=name,
type=coltype,
nullable=nullable,
default=default,
autoincrement=autoincrement,
)
return column_info
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
table_cache = {}
column_cache = {}
foreign_keys = []
table_cache[table_id] = {"name": table_name, "schema": schema}
COLUMN_SQL = text(
"""
SELECT c.colid AS id, c.name AS name
FROM syscolumns c
WHERE c.id = :table_id
"""
)
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = {}
for col in results:
columns[col["id"]] = col["name"]
column_cache[table_id] = columns
REFCONSTRAINT_SQL = text(
"""
SELECT o.name AS name, r.reftabid AS reftable_id,
r.keycnt AS 'count',
r.fokey1 AS fokey1, r.fokey2 AS fokey2, r.fokey3 AS fokey3,
r.fokey4 AS fokey4, r.fokey5 AS fokey5, r.fokey6 AS fokey6,
r.fokey7 AS fokey7, r.fokey1 AS fokey8, r.fokey9 AS fokey9,
r.fokey10 AS fokey10, r.fokey11 AS fokey11, r.fokey12 AS fokey12,
r.fokey13 AS fokey13, r.fokey14 AS fokey14, r.fokey15 AS fokey15,
r.fokey16 AS fokey16,
r.refkey1 AS refkey1, r.refkey2 AS refkey2, r.refkey3 AS refkey3,
r.refkey4 AS refkey4, r.refkey5 AS refkey5, r.refkey6 AS refkey6,
r.refkey7 AS refkey7, r.refkey1 AS refkey8, r.refkey9 AS refkey9,
r.refkey10 AS refkey10, r.refkey11 AS refkey11,
r.refkey12 AS refkey12, r.refkey13 AS refkey13,
r.refkey14 AS refkey14, r.refkey15 AS refkey15,
r.refkey16 AS refkey16
FROM sysreferences r JOIN sysobjects o on r.tableid = o.id
WHERE r.tableid = :table_id
"""
)
referential_constraints = connection.execute(
REFCONSTRAINT_SQL, table_id=table_id
).fetchall()
REFTABLE_SQL = text(
"""
SELECT o.name AS name, u.name AS 'schema'
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE o.id = :table_id
"""
)
for r in referential_constraints:
reftable_id = r["reftable_id"]
if reftable_id not in table_cache:
c = connection.execute(REFTABLE_SQL, table_id=reftable_id)
reftable = c.fetchone()
c.close()
table_info = {"name": reftable["name"], "schema": None}
if (
schema is not None
or reftable["schema"] != self.default_schema_name
):
table_info["schema"] = reftable["schema"]
table_cache[reftable_id] = table_info
results = connection.execute(COLUMN_SQL, table_id=reftable_id)
reftable_columns = {}
for col in results:
reftable_columns[col["id"]] = col["name"]
column_cache[reftable_id] = reftable_columns
reftable = table_cache[reftable_id]
reftable_columns = column_cache[reftable_id]
constrained_columns = []
referred_columns = []
for i in range(1, r["count"] + 1):
constrained_columns.append(columns[r["fokey%i" % i]])
referred_columns.append(reftable_columns[r["refkey%i" % i]])
fk_info = {
"constrained_columns": constrained_columns,
"referred_schema": reftable["schema"],
"referred_table": reftable["name"],
"referred_columns": referred_columns,
"name": r["name"],
}
foreign_keys.append(fk_info)
return foreign_keys
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
INDEX_SQL = text(
"""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
(i.status & 0x2) AS 'unique',
index_col(object_name(i.id), i.indid, 1) AS col_1,
index_col(object_name(i.id), i.indid, 2) AS col_2,
index_col(object_name(i.id), i.indid, 3) AS col_3,
index_col(object_name(i.id), i.indid, 4) AS col_4,
index_col(object_name(i.id), i.indid, 5) AS col_5,
index_col(object_name(i.id), i.indid, 6) AS col_6,
index_col(object_name(i.id), i.indid, 7) AS col_7,
index_col(object_name(i.id), i.indid, 8) AS col_8,
index_col(object_name(i.id), i.indid, 9) AS col_9,
index_col(object_name(i.id), i.indid, 10) AS col_10,
index_col(object_name(i.id), i.indid, 11) AS col_11,
index_col(object_name(i.id), i.indid, 12) AS col_12,
index_col(object_name(i.id), i.indid, 13) AS col_13,
index_col(object_name(i.id), i.indid, 14) AS col_14,
index_col(object_name(i.id), i.indid, 15) AS col_15,
index_col(object_name(i.id), i.indid, 16) AS col_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 0
AND i.indid BETWEEN 1 AND 254
"""
)
results = connection.execute(INDEX_SQL, table_id=table_id)
indexes = []
for r in results:
column_names = []
for i in range(1, r["count"]):
column_names.append(r["col_%i" % (i,)])
index_info = {
"name": r["name"],
"unique": bool(r["unique"]),
"column_names": column_names,
}
indexes.append(index_info)
return indexes
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
PK_SQL = text(
"""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
index_col(object_name(i.id), i.indid, 1) AS pk_1,
index_col(object_name(i.id), i.indid, 2) AS pk_2,
index_col(object_name(i.id), i.indid, 3) AS pk_3,
index_col(object_name(i.id), i.indid, 4) AS pk_4,
index_col(object_name(i.id), i.indid, 5) AS pk_5,
index_col(object_name(i.id), i.indid, 6) AS pk_6,
index_col(object_name(i.id), i.indid, 7) AS pk_7,
index_col(object_name(i.id), i.indid, 8) AS pk_8,
index_col(object_name(i.id), i.indid, 9) AS pk_9,
index_col(object_name(i.id), i.indid, 10) AS pk_10,
index_col(object_name(i.id), i.indid, 11) AS pk_11,
index_col(object_name(i.id), i.indid, 12) AS pk_12,
index_col(object_name(i.id), i.indid, 13) AS pk_13,
index_col(object_name(i.id), i.indid, 14) AS pk_14,
index_col(object_name(i.id), i.indid, 15) AS pk_15,
index_col(object_name(i.id), i.indid, 16) AS pk_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 2048
AND i.indid BETWEEN 1 AND 254
"""
)
results = connection.execute(PK_SQL, table_id=table_id)
pks = results.fetchone()
results.close()
constrained_columns = []
if pks:
for i in range(1, pks["count"] + 1):
constrained_columns.append(pks["pk_%i" % (i,)])
return {
"constrained_columns": constrained_columns,
"name": pks["name"],
}
else:
return {"constrained_columns": [], "name": None}
@reflection.cache
def get_schema_names(self, connection, **kw):
SCHEMA_SQL = text("SELECT u.name AS name FROM sysusers u")
schemas = connection.execute(SCHEMA_SQL)
return [s["name"] for s in schemas]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
TABLE_SQL = text(
"""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'U'
"""
)
if util.py2k:
if isinstance(schema, unicode): # noqa
schema = schema.encode("ascii")
tables = connection.execute(TABLE_SQL, schema_name=schema)
return [t["name"] for t in tables]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_DEF_SQL = text(
"""
SELECT c.text
FROM syscomments c JOIN sysobjects o ON c.id = o.id
WHERE o.name = :view_name
AND o.type = 'V'
"""
)
if util.py2k:
if isinstance(view_name, unicode): # noqa
view_name = view_name.encode("ascii")
view = connection.execute(VIEW_DEF_SQL, view_name=view_name)
return view.scalar()
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_SQL = text(
"""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'V'
"""
)
if util.py2k:
if isinstance(schema, unicode): # noqa
schema = schema.encode("ascii")
views = connection.execute(VIEW_SQL, schema_name=schema)
return [v["name"] for v in views]
def has_table(self, connection, table_name, schema=None):
self._ensure_has_table_connection(connection)
try:
self.get_table_id(connection, table_name, schema)
except exc.NoSuchTableError:
return False
else:
return True
|
monetate/sqlalchemy
|
lib/sqlalchemy/dialects/sybase/base.py
|
Python
|
mit
| 32,421
|
[
"ASE"
] |
79307c618266e6237ee767b75fcf7349f5c693f6fa4d58dbac13122608eb6305
|
#
# Evy - a concurrent networking library for Python
#
# Unless otherwise noted, the files in Evy are under the following MIT license:
#
# Copyright (c) 2012, Alvaro Saurin
# Copyright (c) 2008-2010, Eventlet Contributors (see AUTHORS)
# Copyright (c) 2007-2010, Linden Research, Inc.
# Copyright (c) 2005-2006, Bob Ippolito
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from evy import hubs
from evy.support import greenlets as greenlet
from evy.timeout import Timeout, with_timeout
__all__ = ['Event']
class NOT_USED:
def __repr__ (self):
return 'NOT_USED'
NOT_USED = NOT_USED()
class Event(object):
"""
An abstraction where an arbitrary number of coroutines can wait for one event from another.
Events are similar to a Queue that can only hold one item, but differ in two important ways:
1. calling :meth:`send` never unschedules the current greenthread
2. :meth:`send` can only be called once; create a new event to send again.
They are good for communicating results between coroutines, and are the basis for how
:meth:`GreenThread.wait() <evy.greenthread.GreenThread.wait>` is implemented.
>>> from evy import event
>>> import evy
>>> evt = event.Event()
>>> def baz(b):
... evt.send(b + 1)
...
>>> _ = spawn_n(baz, 3)
>>> evt.wait()
4
"""
__slots__ = ['_result', '_exc', '_waiters']
def __init__ (self):
self._result = None
self._exc = None
self._waiters = set()
self.reset()
def __str__ (self):
params = (self.__class__.__name__, hex(id(self)),
self._result, self._exc, len(self._waiters))
return '<%s at %s result=%r _exc=%r _waiters[%d]>' % params
def reset (self):
# this is kind of a misfeature and doesn't work perfectly well,
# it's better to create a new event rather than reset an old one
# removing documentation so that we don't get new use cases for it
assert self._result is not NOT_USED, 'Trying to re-reset() a fresh event.'
self._result = NOT_USED
self._exc = None
def ready (self):
"""
Return true if the :meth:`wait` call will return immediately.
Used to avoid waiting for things that might take a while to time out.
For example, you can put a bunch of events into a list, and then visit
them all repeatedly, calling :meth:`ready` until one returns ``True``,
and then you can :meth:`wait` on that one.
"""
return self._result is not NOT_USED
def has_exception (self):
return self._exc is not None
def has_result (self):
return self._result is not NOT_USED and self._exc is None
def poll (self, notready = None):
if self.ready():
return self.wait()
return notready
# QQQ make it return tuple (type, value, tb) instead of raising
# because
# 1) "poll" does not imply raising
# 2) it's better not to screw up caller's sys.exc_info() by default
# (e.g. if caller wants to calls the function in except or finally)
def poll_exception (self, notready = None):
if self.has_exception():
return self.wait()
return notready
def poll_result (self, notready = None):
if self.has_result():
return self.wait()
return notready
def wait (self, timeout = None, exception = None):
"""
Wait until another coroutine calls :meth:`send`.
Returns the value the other coroutine passed to
:meth:`send`.
>>> from evy import event
>>> import evy
>>> evt = event.Event()
>>> def wait_on():
... retval = evt.wait()
... print "waited for", retval
>>> _ = evy.spawn(wait_on)
>>> evt.send('result')
>>> sleep(0)
waited for result
Returns immediately if the event has already
occured.
>>> evt.wait()
'result'
"""
current = greenlet.getcurrent()
if self._result is NOT_USED:
with Timeout(timeout, exception):
self._waiters.add(current)
try:
return hubs.get_hub().switch()
finally:
self._waiters.discard(current)
if self._exc is not None:
current.throw(*self._exc)
return self._result
def send (self, result = None, exc = None):
"""
Makes arrangements for the waiters to be woken with the
result and then returns immediately to the parent.
>>> from evy import event
>>> import evy
>>> evt = event.Event()
>>> def waiter():
... print 'about to wait'
... result = evt.wait()
... print 'waited for', result
>>> _ = evy.spawn(waiter)
>>> sleep(0)
about to wait
>>> evt.send('a')
>>> sleep(0)
waited for a
It is an error to call :meth:`send` multiple times on the same event.
>>> evt.send('whoops')
Traceback (most recent call last):
...
AssertionError: Trying to re-send() an already-triggered event.
Use :meth:`reset` between :meth:`send` s to reuse an event object.
"""
assert self._result is NOT_USED, 'Trying to re-send() an already-triggered event.'
self._result = result
if exc is not None and not isinstance(exc, tuple):
exc = (exc, )
self._exc = exc
hub = hubs.get_hub()
for waiter in self._waiters:
hub.run_callback(self._do_send, self._result, self._exc, waiter)
def _do_send (self, result, exc, waiter):
if waiter in self._waiters:
if exc is None:
waiter.switch(result)
else:
waiter.throw(*exc)
def send_exception (self, *args):
"""
Same as :meth:`send`, but sends an exception to waiters.
The arguments to send_exception are the same as the arguments
to ``raise``. If a single exception object is passed in, it
will be re-raised when :meth:`wait` is called, generating a
new stacktrace.
>>> from evy import event
>>> evt = event.Event()
>>> evt.send_exception(RuntimeError())
>>> evt.wait()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "evy/event.py", line 120, in wait
current.throw(*self._exc)
RuntimeError
If it's important to preserve the entire original stack trace,
you must pass in the entire :func:`sys.exc_info` tuple.
>>> import sys
>>> evt = event.Event()
>>> try:
... raise RuntimeError()
... except RuntimeError:
... evt.send_exception(*sys.exc_info())
...
>>> evt.wait()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "evy/event.py", line 120, in wait
current.throw(*self._exc)
File "<stdin>", line 2, in <module>
RuntimeError
Note that doing so stores a traceback object directly on the
Event object, which may cause reference cycles. See the
:func:`sys.exc_info` documentation.
"""
# the arguments and the same as for greenlet.throw
return self.send(None, args)
class metaphore(object):
"""
This is sort of an inverse semaphore: a counter that starts at 0 and
waits only if nonzero. It's used to implement a "wait for all" scenario.
>>> from evy import event
>>> count = event.metaphore()
>>> count.wait()
>>> def decrementer(count, id):
... print "%s decrementing" % id
... count.dec()
...
>>> _ = evy.spawn(decrementer, count, 'A')
>>> _ = evy.spawn(decrementer, count, 'B')
>>> count.inc(2)
>>> count.wait()
A decrementing
B decrementing
"""
def __init__ (self):
self.counter = 0
self.event = Event()
# send() right away, else we'd wait on the default 0 count!
self.event.send()
def inc (self, by = 1):
"""
Increment our counter. If this transitions the counter from zero to
nonzero, make any subsequent :meth:`wait` call wait.
"""
assert by > 0
self.counter += by
if self.counter == by:
# If we just incremented self.counter by 'by', and the new count
# equals 'by', then the old value of self.counter was 0.
# Transitioning from 0 to a nonzero value means wait() must
# actually wait.
self.event.reset()
def dec (self, by = 1):
"""
Decrement our counter. If this transitions the counter from nonzero
to zero, a current or subsequent wait() call need no longer wait.
"""
assert by > 0
self.counter -= by
if self.counter <= 0:
# Don't leave self.counter < 0, that will screw things up in
# future calls.
self.counter = 0
# Transitioning from nonzero to 0 means wait() need no longer wait.
self.event.send()
def wait (self):
"""
Suspend the caller only if our count is nonzero. In that case,
resume the caller once the count decrements to zero again.
"""
self.event.wait()
|
inercia/evy
|
evy/event.py
|
Python
|
mit
| 10,576
|
[
"VisIt"
] |
b79327337a393e315421a6c5e2bf076d726a409c25cab7fd833ce8e598c2fe61
|
#
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Options for authenticating with the API.
class ModuleDocFragment(object):
DOCUMENTATION = '''
options:
host:
description:
- Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
api_key:
description:
- Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment variable.
kubeconfig:
description:
- Path to an existing Kubernetes config file. If not provided, and no other connection
options are provided, the openshift client will attempt to load the default
configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG environment
variable.
context:
description:
- The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment variable.
username:
description:
- Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME environment
variable.
password:
description:
- Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD environment
variable.
cert_file:
description:
- Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE environment
variable.
key_file:
description:
- Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_HOST environment
variable.
ssl_ca_cert:
description:
- Path to a CA certificate used to authenticate with the API. Can also be specified via K8S_AUTH_SSL_CA_CERT
environment variable.
verify_ssl:
description:
- "Whether or not to verify the API server's SSL certificates. Can also be specified via K8S_AUTH_VERIFY_SSL
environment variable."
type: bool
notes:
- "The OpenShift Python client wraps the K8s Python client, providing full access to
all of the APIS and models available on both platforms. For API version details and
additional information visit https://github.com/openshift/openshift-restclient-python"
'''
|
skg-net/ansible
|
lib/ansible/utils/module_docs_fragments/k8s_auth_options.py
|
Python
|
gpl-3.0
| 2,834
|
[
"VisIt"
] |
fc4aa8f2563a8315d38c9b360c9d9e687bc34b02c79172189f00bdf717f54430
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RA4(RPackage):
"""Automated Affymetrix Array Analysis Umbrella Package."""
homepage = "https://www.bioconductor.org/packages/a4/"
url = "https://git.bioconductor.org/packages/a4"
list_url = homepage
version('1.24.0', git='https://git.bioconductor.org/packages/a4', commit='79b5143652176787c85a0d587b3bbfad6b4a19f4')
depends_on('r@3.4.0:3.4.9', when='@1.24.0')
depends_on('r-a4base', type=('build', 'run'))
depends_on('r-a4preproc', type=('build', 'run'))
depends_on('r-a4classif', type=('build', 'run'))
depends_on('r-a4core', type=('build', 'run'))
depends_on('r-a4reporting', type=('build', 'run'))
|
skosukhin/spack
|
var/spack/repos/builtin/packages/r-a4/package.py
|
Python
|
lgpl-2.1
| 1,915
|
[
"Bioconductor"
] |
e85db38c4a93367b1a2e1102a78c8ae1128ef451de27eabe6a52d04b9ba262e5
|
# Copyright (C) 2014-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
This module provides support for file download and upload. It calculates the
location of the input and output directories. It also has a utility for parsing
the job input file ('job_input.json').
We use the following shorthands
<idir> == input directory $HOME/in
<odir> == output directory $HOME/out
A simple example of the job input, when run locally, is:
{
"seq2": {
"$dnanexus_link": {
"project": "project-1111",
"id": "file-1111"
}
},
"seq1": {
"$dnanexus_link": {
"project": "project-2222",
"id": "file-2222"
}
}
"blast_args": "",
"evalue": 0.01
}
The first two elements are files {seq1, seq2}, the other elements are
{blast_args, evalue}. The files for seq1,seq2 should be saved into:
<idir>/seq1/<filename>
<idir>/seq2/<filename>
An example for a shell command that would create these arguments is:
$ dx run coolapp -iseq1=NC_000868.fasta -iseq2=NC_001422.fasta
It would run an app named "coolapp", with file arguments for seq1 and seq2. Both NC_*
files should be the names of files in a DNAnexus project (and should be resolved to their
file IDs by dx). Subsequently, after dx-download-all-inputs is run,
file seq1 should appear in the execution environment at path:
<idir>/seq1/NC_000868.fasta
File Arrays
{
"reads": [{
"$dnanexus_link": {
"project": "project-3333",
"id": "file-3333"
}
},
{
"$dnanexus_link": {
"project": "project-4444",
"id": "file-4444"
}
}]
}
This is a file array with two files. Running a command like this:
$ dx run coolapp -ireads=A.fastq -ireads=B.fasta
will download into the execution environment:
<idir>/reads/A.fastq
B.fastq
'''
from __future__ import print_function, unicode_literals, division, absolute_import
import json
import pipes
import os
import fnmatch
import sys
import collections
import errno
import dxpy
from ..compat import environ, open, basestring
from ..exceptions import DXError
def get_input_dir(job_homedir=None):
'''
:param job_homedir: explicit value for home directory, used for testing purposes
:rtype: string
:returns: path to input directory
Returns the input directory, where all inputs are downloaded
'''
if job_homedir is not None:
home_dir = job_homedir
else:
home_dir = environ.get('HOME')
idir = os.path.join(home_dir, 'in')
return idir
def get_output_dir(job_homedir=None):
'''
:param job_homedir: explicit value for home directory, used for testing purposes
:rtype: string
:returns: path to output directory
Returns the output directory, where all outputs are created, and
uploaded from
'''
if job_homedir is not None:
home_dir = job_homedir
else:
home_dir = environ.get('HOME')
odir = os.path.join(home_dir, 'out')
return odir
def get_input_json_file():
"""
:rtype: string
:returns: path to input JSON file
"""
home_dir = environ.get('HOME')
return os.path.join(home_dir, "job_input.json")
def get_output_json_file():
"""
:rtype: string
:returns: Path to output JSON file
"""
home_dir = environ.get('HOME')
return os.path.join(home_dir, "job_output.json")
def rm_output_json_file():
""" Warning: this is not for casual use.
It erases the output json file, and should be used for testing purposes only.
"""
path = get_output_json_file()
try:
os.remove(path)
except OSError as e:
if e.errno == errno.ENOENT:
pass
else:
raise
def ensure_dir(path):
"""
:param path: path to directory to be created
Create a directory if it does not already exist.
"""
if not os.path.exists(path):
# path does not exist, create the directory
os.mkdir(path)
else:
# The path exists, check that it is not a file
if os.path.isfile(path):
raise Exception("Path %s already exists, and it is a file, not a directory" % path)
def make_unix_filename(fname):
"""
:param fname: the basename of a file (e.g., xxx in /zzz/yyy/xxx).
:returns: a valid unix filename
:rtype: string
:raises: DXError if the filename is invalid on a Unix system
The problem being solved here is that *fname* is a python string, it
may contain characters that are invalid for a file name. We replace all the slashes with %2F.
Another issue, is that the user may choose an invalid name. Since we focus
on Unix systems, the only possibilies are "." and "..".
"""
# sanity check for filenames
bad_filenames = [".", ".."]
if fname in bad_filenames:
raise DXError("Invalid filename {}".format(fname))
return fname.replace('/', '%2F')
## filter from a dictionary a list of matching keys
def filter_dict(dict_, excl_keys):
return {k: v for k, v in list(dict_.items()) if k not in excl_keys}
def get_job_input_filenames(job_input_file):
"""Extract list of files, returns a set of directories to create, and
a set of files, with sources and destinations. The paths created are
relative to the input directory.
Note: we go through file names inside arrays, and create a
separate subdirectory for each. This avoids clobbering files when
duplicate filenames appear in an array.
"""
def get_input_hash():
with open(job_input_file) as fh:
job_input = json.load(fh)
return job_input
job_input = get_input_hash()
files = collections.defaultdict(list) # dictionary, with empty lists as default elements
dirs = [] # directories to create under <idir>
# Local function for adding a file to the list of files to be created
# for example:
# iname == "seq1"
# subdir == "015"
# value == { "$dnanexus_link": {
# "project": "project-BKJfY1j0b06Z4y8PX8bQ094f",
# "id": "file-BKQGkgQ0b06xG5560GGQ001B"
# }
# will create a record describing that the file should
# be downloaded into seq1/015/<filename>
def add_file(iname, subdir, value):
if not dxpy.is_dxlink(value):
return
handler = dxpy.get_handler(value)
if not isinstance(handler, dxpy.DXFile):
return
filename = make_unix_filename(handler.name)
trg_dir = iname
if subdir is not None:
trg_dir = os.path.join(trg_dir, subdir)
files[iname].append({'trg_fname': os.path.join(trg_dir, filename),
'handler': handler,
'src_file_id': handler.id})
dirs.append(trg_dir)
# An array of inputs, for a single key. A directory
# will be created per array entry. For example, if the input key is
# FOO, and the inputs are {A, B, C}.vcf then, the directory structure
# will be:
# <idir>/FOO/00/A.vcf
# <idir>/FOO/01/B.vcf
# <idir>/FOO/02/C.vcf
def add_file_array(input_name, links):
num_files = len(links)
if num_files == 0:
return
num_digits = len(str(num_files - 1))
dirs.append(input_name)
for i, link in enumerate(links):
subdir = str(i).zfill(num_digits)
add_file(input_name, subdir, link)
for input_name, value in list(job_input.items()):
if isinstance(value, list):
# This is a file array
add_file_array(input_name, value)
else:
add_file(input_name, None, value)
## create a dictionary of the all non-file elements
rest_hash = {key: val for key, val in list(job_input.items()) if key not in files}
return dirs, files, rest_hash
def get_input_spec_patterns():
''' Extract the inputSpec patterns, if they exist -- modifed from dx-upload-all-outputs
Returns a dict of all patterns, with keys equal to the respective
input parameter names.
'''
input_spec = None
if 'DX_JOB_ID' in environ:
# works in the cloud, not locally
job_desc = dxpy.describe(dxpy.JOB_ID)
if job_desc["function"] == "main":
# The input spec does not apply for subjobs
desc = dxpy.describe(job_desc.get("app", job_desc.get("applet")))
if "inputSpec" in desc:
input_spec = desc["inputSpec"]
elif 'DX_TEST_DXAPP_JSON' in environ:
# works only locally
path_to_dxapp_json = environ['DX_TEST_DXAPP_JSON']
with open(path_to_dxapp_json) as fd:
dxapp_json = json.load(fd)
input_spec = dxapp_json.get('inputSpec')
# convert to a dictionary. Each entry in the input spec
# has {name, class} attributes.
if input_spec is None:
return {}
# For each field name, return its patterns.
# Make sure a pattern is legal, ignore illegal patterns.
def is_legal_pattern(pattern):
return "*" in pattern
patterns_dict = {}
for spec in input_spec:
name = spec['name']
if 'patterns' in spec:
patterns_dict[name] = []
for p in spec['patterns']:
if is_legal_pattern(p):
patterns_dict[name].append(p)
return patterns_dict
# return the shorter string between p and q
def choose_shorter_string(p, q):
if p is None:
return q
if q is None:
return p
if len(q) < len(p):
return q
return p
def analyze_bash_vars(job_input_file, job_homedir):
'''
This function examines the input file, and calculates variables to
instantiate in the shell environment. It is called right before starting the
execution of an app in a worker.
For each input key, we want to have
$var
$var_filename
$var_prefix
remove last dot (+gz), and/or remove patterns
$var_path
$HOME/in/var/$var_filename
For example,
$HOME/in/genes/A.txt
B.txt
export genes=('{"$dnanexus_link": "file-xxxx"}' '{"$dnanexus_link": "file-yyyy"}')
export genes_filename=("A.txt" "B.txt")
export genes_prefix=("A" "B")
export genes_path=("$HOME/in/genes/A.txt" "$HOME/in/genes/B.txt")
If there are patterns defined in the input spec, then the prefix respects them.
Here are several examples, where the patterns are:
*.bam, *.bwa-index.tar.gz, foo*.sam, z*ra.sam
file name prefix matches
foo.zed.bam foo.zed *.bam
xxx.bwa-index.tar.gz xxx *.bwa-index.tar.gz
food.sam food foo*.sam
zebra.sam zebra z*ra.sam
xx.c xx
xx.c.gz xx
The only patterns we recognize are of the form x*.y. For example:
legal *.sam, *.c.py, foo*.sam, a*b*c.baz
ignored uu.txt x???.tar mon[a-z].py
'''
_, file_entries, rest_hash = get_job_input_filenames(job_input_file)
patterns_dict = get_input_spec_patterns()
# Note: there may be multiple matches, choose the shortest prefix.
def get_prefix(basename, key):
best_prefix = None
patterns = patterns_dict.get(key)
if patterns is not None:
for pattern in patterns:
if fnmatch.fnmatch(basename, pattern):
_, _, right_piece = pattern.rpartition("*")
best_prefix = choose_shorter_string(best_prefix, basename[:-len(right_piece)])
if best_prefix is not None:
return best_prefix
else:
# no matching rule
parts = os.path.splitext(basename)
if parts[1] == ".gz":
parts = os.path.splitext(parts[0])
return parts[0]
def factory():
return {'handler': [], 'basename': [], 'prefix': [], 'path': []}
file_key_descs = collections.defaultdict(factory)
rel_home_dir = get_input_dir(job_homedir)
for key, entries in list(file_entries.items()):
for entry in entries:
filename = entry['trg_fname']
basename = os.path.basename(filename)
prefix = get_prefix(basename, key)
k_desc = file_key_descs[key]
k_desc['handler'].append(entry['handler'])
k_desc['basename'].append(basename)
k_desc['prefix'].append(prefix)
k_desc['path'].append(os.path.join(rel_home_dir, filename))
return file_key_descs, rest_hash
#
# Note: pipes.quote() to be replaced with shlex.quote() in Python 3
# (see http://docs.python.org/2/library/pipes.html#pipes.quote)
#
def gen_bash_vars(job_input_file, job_homedir=None, check_name_collision=True):
"""
:param job_input_file: path to a JSON file describing the job inputs
:param job_homedir: path to home directory, used for testing purposes
:param check_name_collision: should we check for name collisions?
:return: list of lines
:rtype: list of strings
Calculates a line for each shell variable to instantiate.
If *check_name_collision* is true, then detect and warn about
collisions with essential environment variables.
"""
file_key_descs, rest_hash = analyze_bash_vars(job_input_file, job_homedir)
def string_of_elem(elem):
result = None
if isinstance(elem, basestring):
result = elem
elif isinstance(elem, dxpy.DXFile):
result = json.dumps(dxpy.dxlink(elem))
else:
result = json.dumps(elem)
return pipes.quote(result)
def string_of_value(val):
if isinstance(val, list):
string = " ".join([string_of_elem(vitem) for vitem in val])
return "( {} )".format(string)
else:
return string_of_elem(val)
var_defs_hash = {}
def gen_text_line_and_name_collision(key, val):
''' In the absence of a name collision, create a line describing a bash variable.
'''
if check_name_collision:
if key not in environ and key not in var_defs_hash:
var_defs_hash[key] = val
else:
sys.stderr.write(dxpy.utils.printing.fill(
"Creating environment variable ({}) would cause a name collision".format(key)
) + "\n")
else:
var_defs_hash[key] = val
# Processing non-file variables before the file variables. This priorities them,
# so that in case of name collisions, the file-variables will be dropped.
for key, desc in list(rest_hash.items()):
gen_text_line_and_name_collision(key, string_of_value(desc))
for file_key, desc in list(file_key_descs.items()):
gen_text_line_and_name_collision(file_key, string_of_value(desc['handler']))
gen_text_line_and_name_collision(file_key + "_name", string_of_value(desc['basename']))
gen_text_line_and_name_collision(file_key + "_prefix", string_of_value(desc['prefix']))
gen_text_line_and_name_collision(file_key + "_path", string_of_value(desc['path']))
return var_defs_hash
|
dnanexus/dx-toolkit
|
src/python/dxpy/utils/file_load_utils.py
|
Python
|
apache-2.0
| 15,802
|
[
"BWA"
] |
0ee115e42bfa913cb8d06302e9cd8a6fb4e7391b29cb9295933b4c99d0f8ed5d
|
# nsdf.py ---
#
# Filename: nsdf.py
# Description:
# Author: subha
# Maintainer:
# Created: Fri Jun 26 12:23:07 2015 (-0400)
# Version:
# Last-Updated: Tue Dec 29 12:50:27 2015 (-0500)
# By: Subhasis Ray
# Update #: 6
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
# __BROKEN__
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
"""
NSDF : Neuroscience Simulation Data Format
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
NSDF is an HDF5 based format for storing data from neuroscience
simulation.
This script is for demonstrating the use of NSDFWriter class to
dump data in NSDF format.
The present implementation of NSDFWriter puts all value fields
connected to its requestData into
/data/uniform/{className}/{fieldName} 2D dataset - each row
corresponding to one object.
Event data are stored in
/data/event/{className}/{fieldName}/{Id}_{dataIndex}_{fieldIndex}
where the last component is the string representation of the ObjId of
the source.
The model tree (starting below root element) is saved as a tree of
groups under /model/modeltree (one could easily add the fields as
atributes with a little bit of more code).
The mapping between data source and uniformly sampled data is stored
as a dimension scale in /map/uniform/{className}/{fieldName}. That for
event data is stored as a compound dataset in
/map/event/{className}/{fieldName} with a [source, data] columns.
The start and end timestamps of the simulation are saved as file
attributes: C/C++ time functions have this limitation that they give
resolution up to a second, this means for simulation lasting < 1 s the
two timestamps may be identical.
Much of the environment specification is set as HDF5 attributes (which
is a generic feature from HDF5WriterBase).
MOOSE is unit agnostic at present so unit specification is not
implemented in NSDFWriter. But units can be easily added as dataset
attribute if desired as shown in this example.
References:
Ray, Chintaluri, Bhalla and Wojcik. NSDF: Neuroscience Simulation Data
Format, Neuroinformatics, 2015.
http://nsdf.readthedocs.org/en/latest/
"""
import numpy as np
from datetime import datetime
import getpass
import moose
try:
nsdf = moose.NSDFWriter('/test')
except AttributeError as e:
print( "[INFO ] This build is not built with NSDFWriter." )
print( '\tPlease see https://github.com/BhallaLab/moose-core' )
quit()
def setup_model():
"""
Setup a dummy model with a PulseGen and a SpikeGen. The SpikeGen
detects the leading edges of the pulses created by the PulseGen
and sends out the event times. We record the PulseGen outputValue
as Uniform data and leading edge time as Event data in the NSDF
file.
"""
simtime = 100.0
dt = 1e-3
model = moose.Neutral('/model')
pulse = moose.PulseGen('/model/pulse')
pulse.level[0] = 1.0
pulse.delay[0] = 10
pulse.width[0] = 20
t_lead = moose.SpikeGen('/model/t_lead')
t_lead.threshold = 0.5
moose.connect(pulse, 'output', t_lead,'Vm');
nsdf = moose.NSDFWriter('/model/writer')
nsdf.filename = 'nsdf_demo.h5'
nsdf.mode = 2 #overwrite existing file
nsdf.flushLimit = 100
moose.connect(nsdf, 'requestOut', pulse, 'getOutputValue')
print(('event input', nsdf.eventInput, nsdf.eventInput.num))
print(nsdf)
nsdf.eventInput.num = 1
ei = nsdf.eventInput[0]
print((ei.path))
moose.connect(t_lead, 'spikeOut', nsdf.eventInput[0], 'input')
tab = moose.Table('spiketab')
tab.threshold = t_lead.threshold
clock = moose.element('/clock')
for ii in range(32):
moose.setClock(ii, dt)
moose.connect(pulse, 'output', tab, 'spike')
print(('Starting simulation at:', datetime.now().isoformat()))
moose.reinit()
moose.start(simtime)
print(('Finished simulation at:', datetime.now().isoformat()))
np.savetxt('nsdf.txt', tab.vector)
###################################
# Set the environment attributes
###################################
nsdf.stringAttr['title'] = 'NSDF writing demo for moose'
nsdf.stringAttr['description'] = '''An example of writing data to NSDF file from MOOSE simulation. In
this simulation we generate square pules from a PulseGen object and
use a SpikeGen to detect the threshold crossing events of rising
edges. We store the pulsegen output as Uniform data and the threshold
crossing times as Event data. '''
nsdf.stringAttr['creator'] = getpass.getuser()
nsdf.stringVecAttr['software'] = ['python2.7', 'moose3' ]
nsdf.stringVecAttr['method'] = ['']
nsdf.stringAttr['rights'] = ''
nsdf.stringAttr['license'] = 'CC-BY-NC'
# Specify units. MOOSE is unit agnostic, so we explicitly set the
# unit attibutes on individual datasets
nsdf.stringAttr['/data/uniform/PulseGen/outputValue/tunit'] = 's'
nsdf.stringAttr['/data/uniform/PulseGen/outputValue/unit'] = 'A'
eventDataPath = '/data/event/SpikeGen/spikeOut/{}_{}_{}/unit'.format(t_lead.vec.value,
t_lead.getDataIndex(),
t_lead.fieldIndex)
nsdf.stringAttr[eventDataPath] = 's'
def main():
setup_model()
if __name__ == '__main__':
main()
#
# nsdf.py ends here
|
BhallaLab/moose-examples
|
snippets/nsdf.py
|
Python
|
gpl-2.0
| 6,010
|
[
"MOOSE"
] |
87ab7c0b314fea9f976d3789f8faa1ad6fb394c01093f18124e67469f6f1d6ec
|
# Orca
#
# Copyright 2005-2008 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Provides support for defining keybindings and matching them to input
events."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2008 Sun Microsystems Inc."
__license__ = "LGPL"
from gi.repository import Gdk
import functools
import pyatspi
from . import debug
from . import settings
from .orca_i18n import _
_keysymsCache = {}
_keycodeCache = {}
MODIFIER_ORCA = 8
NO_MODIFIER_MASK = 0
ALT_MODIFIER_MASK = 1 << pyatspi.MODIFIER_ALT
CTRL_MODIFIER_MASK = 1 << pyatspi.MODIFIER_CONTROL
ORCA_MODIFIER_MASK = 1 << MODIFIER_ORCA
ORCA_ALT_MODIFIER_MASK = (1 << MODIFIER_ORCA |
1 << pyatspi.MODIFIER_ALT)
ORCA_CTRL_MODIFIER_MASK = (1 << MODIFIER_ORCA |
1 << pyatspi.MODIFIER_CONTROL)
ORCA_CTRL_ALT_MODIFIER_MASK = (1 << MODIFIER_ORCA |
1 << pyatspi.MODIFIER_CONTROL |
1 << pyatspi.MODIFIER_ALT)
ORCA_SHIFT_MODIFIER_MASK = (1 << MODIFIER_ORCA |
1 << pyatspi.MODIFIER_SHIFT)
SHIFT_MODIFIER_MASK = 1 << pyatspi.MODIFIER_SHIFT
SHIFT_ALT_MODIFIER_MASK = (1 << pyatspi.MODIFIER_SHIFT |
1 << pyatspi.MODIFIER_ALT)
CTRL_ALT_MODIFIER_MASK = (1 << pyatspi.MODIFIER_CONTROL |
1 << pyatspi.MODIFIER_ALT)
COMMAND_MODIFIER_MASK = (1 << pyatspi.MODIFIER_ALT |
1 << pyatspi.MODIFIER_CONTROL |
1 << pyatspi.MODIFIER_META2 |
1 << pyatspi.MODIFIER_META3)
NON_LOCKING_MODIFIER_MASK = (1 << pyatspi.MODIFIER_SHIFT |
1 << pyatspi.MODIFIER_ALT |
1 << pyatspi.MODIFIER_CONTROL |
1 << pyatspi.MODIFIER_META2 |
1 << pyatspi.MODIFIER_META3 |
1 << MODIFIER_ORCA)
defaultModifierMask = NON_LOCKING_MODIFIER_MASK
def getKeycode(keysym):
"""Converts an XKeysym string (e.g., 'KP_Enter') to a keycode that
should match the event.hw_code for key events.
This whole situation is caused by the fact that Solaris chooses
to give us different keycodes for the same key, and the keypad
is the primary place where this happens: if NumLock is not on,
there is no telling the difference between keypad keys and the
other navigation keys (e.g., arrows, page up/down, etc.). One,
for example, would expect to get KP_End for the '1' key on the
keypad if NumLock were not on. Instead, we get 'End' and the
keycode for it matches the keycode for the other 'End' key. Odd.
If NumLock is on, we at least get KP_* keys.
So...when setting up keybindings, we say we're interested in
KeySyms, but those keysyms are carefully chosen so as to result
in a keycode that matches the actual key on the keyboard. This
is why we use KP_1 instead of KP_End and so on in our keybindings.
Arguments:
- keysym: a string that is a valid representation of an XKeysym.
Returns an integer representing a key code that should match the
event.hw_code for key events.
"""
if not keysym:
return 0
if keysym not in _keycodeCache:
keymap = Gdk.Keymap.get_default()
# Find the numerical value of the keysym
#
keyval = Gdk.keyval_from_name(keysym)
if keyval == 0:
return 0
# Now find the keycodes for the keysym. Since a keysym can
# be associated with more than one key, we'll shoot for the
# keysym that's in group 0, regardless of shift level (each
# entry is of the form [keycode, group, level]).
#
_keycodeCache[keysym] = 0
success, entries = keymap.get_entries_for_keyval(keyval)
for entry in entries:
if entry.group == 0:
_keycodeCache[keysym] = entry.keycode
break
if _keycodeCache[keysym] == 0:
_keycodeCache[keysym] = entries[0].keycode
#print keysym, keyval, entries, _keycodeCache[keysym]
return _keycodeCache[keysym]
def getModifierNames(mods):
"""Gets the modifier names of a numeric modifier mask as a human
consumable string.
"""
text = ""
if mods & ORCA_MODIFIER_MASK:
if settings.keyboardLayout == settings.GENERAL_KEYBOARD_LAYOUT_DESKTOP:
# Translators: this is presented in a GUI to represent the
# "insert" key when used as the Orca modifier.
text += _("Insert") + "+"
else:
# Translators: this is presented in a GUI to represent the
# "caps lock" modifier.
text += _("Caps_Lock") + "+"
elif mods & (1 << pyatspi.MODIFIER_SHIFTLOCK):
# Translators: this is presented in a GUI to represent the
# "caps lock" modifier.
#
text += _("Caps_Lock") + "+"
#if mods & (1 << pyatspi.MODIFIER_NUMLOCK):
# text += _("Num_Lock") + "+"
if mods & 128:
# Translators: this is presented in a GUI to represent the
# "right alt" modifier.
#
text += _("Alt_R") + "+"
if mods & (1 << pyatspi.MODIFIER_META3):
# Translators: this is presented in a GUI to represent the
# "super" modifier.
#
text += _("Super") + "+"
if mods & (1 << pyatspi.MODIFIER_META2):
# Translators: this is presented in a GUI to represent the
# "meta 2" modifier.
#
text += _("Meta2") + "+"
#if mods & (1 << pyatspi.MODIFIER_META):
# text += _("Meta") + "+"
if mods & ALT_MODIFIER_MASK:
# Translators: this is presented in a GUI to represent the
# "left alt" modifier.
#
text += _("Alt_L") + "+"
if mods & CTRL_MODIFIER_MASK:
# Translators: this is presented in a GUI to represent the
# "control" modifier.
#
text += _("Ctrl") + "+"
if mods & SHIFT_MODIFIER_MASK:
# Translators: this is presented in a GUI to represent the
# "shift " modifier.
#
text += _("Shift") + "+"
return text
def getClickCountString(count):
"""Returns a human-consumable string representing the number of
clicks, such as 'double click' and 'triple click'."""
if count == 2:
# Translators: Orca keybindings support double
# and triple "clicks" or key presses, similar to
# using a mouse.
#
return _("double click")
if count == 3:
# Translators: Orca keybindings support double
# and triple "clicks" or key presses, similar to
# using a mouse.
#
return _("triple click")
return ""
class KeyBinding:
"""A single key binding, consisting of a keycode, a modifier mask,
and the InputEventHandler.
"""
def __init__(self, keysymstring, modifier_mask, modifiers, handler,
click_count = 1):
"""Creates a new key binding.
Arguments:
- keysymstring: the keysymstring - this is typically a string
from /usr/include/X11/keysymdef.h with the preceding 'XK_'
removed (e.g., XK_KP_Enter becomes the string 'KP_Enter').
- modifier_mask: bit mask where a set bit tells us what modifiers
we care about (see pyatspi.MODIFIER_*)
- modifiers: the state the modifiers we care about must be in for
this key binding to match an input event (see also
pyatspi.MODIFIER_*)
- handler: the InputEventHandler for this key binding
"""
self.keysymstring = keysymstring
self.modifier_mask = modifier_mask
self.modifiers = modifiers
self.handler = handler
self.click_count = click_count
self.keycode = None
def matches(self, keycode, modifiers):
"""Returns true if this key binding matches the given keycode and
modifier state.
"""
# We lazily bind the keycode. The primary reason for doing this
# is so that atspi does not have to be initialized before setting
# keybindings in the user's preferences file.
#
if not self.keycode:
self.keycode = getKeycode(self.keysymstring)
if self.keycode == keycode:
result = modifiers & self.modifier_mask
return result == self.modifiers
else:
return False
def description(self):
"""Returns the description of this binding's functionality."""
try:
return self.handler.description
except:
return ''
def asString(self):
"""Returns a more human-consumable string representing this binding."""
mods = getModifierNames(self.modifiers)
clickCount = getClickCountString(self.click_count)
keysym = self.keysymstring
string = '%s%s %s' % (mods, keysym, clickCount)
return string.strip()
class KeyBindings:
"""Structure that maintains a set of KeyBinding instances.
"""
def __init__(self):
self.keyBindings = []
def __str__(self):
result = "[\n"
for keyBinding in self.keyBindings:
result += " [%x %x %s %d %s]\n" % \
(keyBinding.modifier_mask,
keyBinding.modifiers,
keyBinding.keysymstring,
keyBinding.click_count,
keyBinding.handler.description)
result += "]"
return result
def add(self, keyBinding):
"""Adds the given KeyBinding instance to this set of keybindings.
"""
self.keyBindings.append(keyBinding)
def remove(self, keyBinding):
"""Removes the given KeyBinding instance from this set of keybindings.
"""
try:
i = self.keyBindings.index(keyBinding)
except:
pass
else:
del self.keyBindings[i]
def removeByHandler(self, handler):
"""Removes the given KeyBinding instance from this set of keybindings.
"""
i = len(self.keyBindings)
while i > 0:
if self.keyBindings[i - 1].handler == handler:
del self.keyBindings[i - 1]
i = i - 1
def hasKeyBinding (self, newKeyBinding, typeOfSearch="strict"):
"""Return True if keyBinding is already in self.keyBindings.
The typeOfSearch can be:
"strict": matches description, modifiers, key, and
click count
"description": matches only description.
"keys": matches the modifiers, key, and modifier mask,
and click count
"keysNoMask": matches the modifiers, key, and click count
"""
hasIt = False
for keyBinding in self.keyBindings:
if typeOfSearch == "strict":
if (keyBinding.handler.description \
== newKeyBinding.handler.description) \
and (keyBinding.keysymstring \
== newKeyBinding.keysymstring) \
and (keyBinding.modifier_mask \
== newKeyBinding.modifier_mask) \
and (keyBinding.modifiers \
== newKeyBinding.modifiers) \
and (keyBinding.click_count \
== newKeyBinding.click_count):
hasIt = True
elif typeOfSearch == "description":
if keyBinding.handler.description \
== newKeyBinding.handler.description:
hasIt = True
elif typeOfSearch == "keys":
if (keyBinding.keysymstring \
== newKeyBinding.keysymstring) \
and (keyBinding.modifier_mask \
== newKeyBinding.modifier_mask) \
and (keyBinding.modifiers \
== newKeyBinding.modifiers) \
and (keyBinding.click_count \
== newKeyBinding.click_count):
hasIt = True
elif typeOfSearch == "keysNoMask":
if (keyBinding.keysymstring \
== newKeyBinding.keysymstring) \
and (keyBinding.modifiers \
== newKeyBinding.modifiers) \
and (keyBinding.click_count \
== newKeyBinding.click_count):
hasIt = True
return hasIt
def getBoundBindings(self, uniqueOnly=False):
"""Returns the KeyBinding instances which are bound to a keystroke.
Arguments:
- uniqueOnly: Should alternative bindings for the same handler be
filtered out (default: False)
"""
bound = [kb for kb in self.keyBindings if kb.keysymstring]
if uniqueOnly:
handlers = [kb.handler.description for kb in bound]
bound = [bound[i] for i in map(handlers.index, set(handlers))]
return bound
def getBindingsForHandler(self, handler):
"""Returns the KeyBinding instances associated with handler."""
return [kb for kb in self.keyBindings if kb.handler == handler]
def getInputHandler(self, keyboardEvent):
"""Returns the input handler of the key binding that matches the
given keycode and modifiers, or None if no match exists.
"""
candidates = []
clickCount = keyboardEvent.getClickCount()
for keyBinding in self.keyBindings:
if keyBinding.matches(keyboardEvent.hw_code,
keyboardEvent.modifiers):
if keyBinding.modifier_mask == keyboardEvent.modifiers and \
keyBinding.click_count == clickCount:
return keyBinding.handler
# If there's no keysymstring, it's unbound and cannot be
# a match.
#
if keyBinding.keysymstring:
candidates.append(keyBinding)
if keyboardEvent.modifiers & (1 << pyatspi.MODIFIER_NUMLOCK) \
and keyboardEvent.keyval_name.startswith("KP"):
return None
# If we're still here, we don't have an exact match. Prefer
# the one whose click count is closest to, but does not exceed,
# the actual click count.
#
comparison = lambda x, y: y.click_count - x.click_count
candidates.sort(key=functools.cmp_to_key(comparison))
for candidate in candidates:
if candidate.click_count <= clickCount:
return candidate.handler
return None
def consumeKeyboardEvent(self, script, keyboardEvent):
"""Attempts to consume the given keyboard event. If these
keybindings have a handler for the given keyboardEvent, it is
assumed the event will always be consumed.
"""
consumed = False
handler = self.getInputHandler(keyboardEvent)
if handler:
consumed = True
if keyboardEvent.type == pyatspi.KEY_PRESSED_EVENT:
try:
handler.processInputEvent(script, keyboardEvent)
except:
debug.printException(debug.LEVEL_SEVERE)
return consumed
def load(self, keymap, handlers):
""" Takes the keymappings and tries to find a matching named
function in handlers.
keymap is a list of lists, each list contains 5 elements
If addUnbound is set to true, then at the end of loading all the
keybindings, any remaining functions will be unbound.
"""
for i in keymap:
keysymstring = i[0]
modifierMask = i[1]
modifiers = i[2]
handler = i[3]
try:
clickCount = i[4]
except:
clickCount = 1
if handler in handlers:
# add the keybinding
self.add(KeyBinding( \
keysymstring, modifierMask, modifiers, \
handlers[handler], clickCount))
else:
debug.println(debug.LEVEL_WARNING, \
"WARNING: could not find %s handler to associate " \
"with keybinding." % handler)
|
chrys87/orca-beep
|
src/orca/keybindings.py
|
Python
|
lgpl-2.1
| 17,431
|
[
"ORCA"
] |
48815ba6c7f6bc43650cf9f4274859658543f272e709a80f5b2f65f24744f348
|
###########################################################################
# (C) 2016 Elettra - Sincrotrone Trieste S.C.p.A.. All rights reserved. #
# #
# #
# This file is part of STP-Core, the Python core of SYRMEP Tomo Project, #
# a software tool for the reconstruction of experimental CT datasets. #
# #
# STP-Core is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# STP-Core is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License #
# for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with STP-Core. If not, see <http://www.gnu.org/licenses/>. #
# #
###########################################################################
#
# Author: Francesco Brun
# Last modified: Sept, 28th 2016
#
# python:
from sys import argv, exit
from os import remove, sep, linesep, listdir
from os.path import exists, dirname, basename, splitext
from numpy import array, finfo, copy, float32, double, amin, amax, tile, concatenate, asarray, isscalar, pi
from numpy import empty, reshape, log as nplog, arange, squeeze, fromfile, ndarray, where, meshgrid, roll
from time import time
from multiprocessing import Process, Array
from scipy.misc import imresize #scipy 0.12
# pystp-specific:
from preprocess.extfov_correction import extfov_correction
from preprocess.flat_fielding import flat_fielding
from preprocess.dynamic_flatfielding import dff_prepare_plan, dynamic_flat_fielding
from preprocess.ring_correction import ring_correction
from preprocess.extract_flatdark import extract_flatdark, _medianize
from phaseretrieval.tiehom2020 import tiehom2020, tiehom_plan2020
from phaseretrieval.tiehom import tiehom, tiehom_plan
from phaseretrieval.phrt import phrt, phrt_plan
from reconstruct.rec_astra import recon_astra_fbp, recon_astra_iterative
from reconstruct.rec_fista_tv import recon_fista_tv
from reconstruct.rec_mr_fbp import recon_mr_fbp
from reconstruct.rec_gridrec import recon_gridrec
#from reconstruct.rec_sirt_fbp import recon_sirt_fbp
from postprocess.polarfilter import polarfilter
from postprocess.croprescale import croprescale
from utils.padding import upperPowerOfTwo, padImage, padSmoothWidth
from utils.caching import cache2plan, plan2cache
from tifffile import imread, imsave
from h5py import File as getHDF5
import stpio.tdf as tdf
def reconstruct(im, angles, offset, logtransform, recpar, circle, scale, pad, method,
zerone_mode, dset_min, dset_max, corr_offset, rolling, roll_shift, tmppath):
"""Reconstruct a sinogram with the specified reconstruction method (or algorithm).
Parameters
----------
im1 : array_like
Sinogram image data as numpy array.
center : float
Offset of the center of rotation to use for the tomographic
reconstruction with respect to the half of sinogram width
(default=0, i.e. half width).
logtransform : boolean
Apply logarithmic transformation before reconstruction (default=True).
filter : string
Filter to apply before the application of the reconstruction algorithm. Filter
types are: ram-lak, shepp-logan, cosine, hamming, hann, tukey, lanczos, triangular,
gaussian, barlett-hann, blackman, nuttall, blackman-harris, blackman-nuttall,
flat-top, kaiser, parzen.
circle : boolean
Create a circle in the reconstructed image and set to zero pixels outside the
circle (default=False).
"""
# Upscale projections (if required):
if (abs(scale - 1.0) > finfo(float32).eps):
siz_orig1 = im.shape[1]
im = imresize(im, (im.shape[0], int(round(scale * im.shape[1]))), interp='bicubic', mode='F')
offset = int(offset * scale)
# Apply transformation for changes in the center of rotation:
if ( (method == 'GRIDREC') or (method == 'MR-FBP_CUDA') or (method == 'FISTA-TV_CUDA') ):
offset = int(round(offset))
if (offset != 0):
if (offset >= 0):
im = im[:,:-offset]
tmp = im[:,0] # Get first column
tmp = tile(tmp, (offset,1)) # Replicate the first column the right number of times
im = concatenate((tmp.T,im), axis=1) # Concatenate tmp before the image
else:
im = im[:,abs(offset):]
tmp = im[:,im.shape[1] - 1] # Get last column
tmp = tile(tmp, (abs(offset),1)) # Replicate the last column the right number of times
im = concatenate((im,tmp.T), axis=1) # Concatenate tmp after the image
# Sinogram rolling (if required). It doesn't make sense in limited angle tomography, so check if 180 or 360:
if ((rolling == True) and (roll_shift > 0)):
if ( (angles - pi) < finfo(float32).eps ):
# Flip the last rows:
im[-roll_shift:,:] = im[-roll_shift:,::-1]
# Now roll the sinogram:
im = roll(im, roll_shift, axis=0)
elif ((angles - pi*2.0) < finfo(float32).eps):
# Only roll the sinogram:
im = roll(im, roll_shift, axis=0)
# Scale image to [0,1] range (if required):
if (zerone_mode):
im = (im - dset_min) / (dset_max - dset_min)
# Cheating the whole process:
#im = (im - numpy.amin(im[:])) / (numpy.amax(im[:]) - numpy.amin(im[:]))
# Apply log transform:
im[im <= finfo(float32).eps] = finfo(float32).eps
if (logtransform == True):
im = -nplog(im + corr_offset)
# Replicate pad image to double the width:
if (pad):
dim_o = im.shape[1]
n_pad = im.shape[1] + im.shape[1] / 2
marg = (n_pad - dim_o) / 2
# Pad image:
if (method == 'GRIDREC'):
im = padSmoothWidth(im, n_pad)
else:
im = padImage(im, im.shape[0], n_pad)
# Perform the actual reconstruction:
if (method.startswith('FBP')):
im = recon_astra_fbp(im, angles, method, recpar, offset)
elif (method == 'MR-FBP_CUDA'):
im = recon_mr_fbp(im, angles, offset)
elif (method == 'FISTA-TV_CUDA'):
lam, fgpiter, tviter = recpar.split(":")
lam = float32(lam)
fgpiter = int(fgpiter)
tviter = int(tviter)
im = recon_fista_tv(im, angles, lam, fgpiter, tviter, offset)
#elif (method == 'SIRT-FBP_CUDA'):
# im = recon_sirt_fbp(im, angles, int(recpar), tmppath )
# # Clean SIRT-FBP cache:
#
elif (method == 'GRIDREC'):
[im, im] = recon_gridrec(im, im, angles, recpar)
else:
im = recon_astra_iterative(im, angles, method, recpar, zerone_mode, offset)
# Crop:
if (pad):
im = im[marg:dim_o + marg, marg:dim_o + marg]
# Resize (if necessary):
if (abs(scale - 1.0) > finfo(float32).eps):
im = imresize(im, (siz_orig1, siz_orig1), interp='nearest', mode='F')
# Return output:
return im.astype(float32)
#def _testwritedownsino(tmp_im):
# for ct in range(0, tmp_im.shape[0]):
# a = tmp_im[ct,:,:].squeeze()
# fname = 'C:\\Temp\\StupidFolder\\sino_' + str(ct).zfill(4) + '.tif'
# imsave(fname, a.astype(float32))
#def _testwritedownproj(tmp_im):
# for ct in range(0, tmp_im.shape[1]):
# a = tmp_im[:,ct,:].squeeze()
# fname = 'C:\\Temp\\StupidFolder\\proj_' + str(ct).zfill(4) + '.tif'
# imsave(fname, a.astype(float32))
def process(sino_idx, num_sinos, infile, outfile, preprocessing_required, corr_plan, skipflat, norm_sx, norm_dx, flat_end, half_half,
half_half_line, ext_fov, ext_fov_rot_right, ext_fov_overlap, ext_fov_normalize, ext_fov_average,
ringrem, phaseretrieval_required, phrtmethod, phrt_param1,
phrt_param2, energy, distance, pixsize, phrtpad, approx_win, angles, angles_projfrom, angles_projto,
offset, logtransform, recpar, circle, scale, pad, method, rolling, roll_shift,
zerone_mode, dset_min, dset_max, decim_factor, downsc_factor, corr_offset, postprocess_required,
polarfilt_opt, convert_opt, crop_opt, dynamic_ff, EFF, filtEFF, im_dark, nr_threads, logfilename, tmppath):
"""To do...
"""
# Perform reconstruction (on-the-fly preprocessing and phase retrieval, if
# required):
if (phaseretrieval_required):
# In this case a bunch of sinograms is loaded into memory:
#
# Load the temporary data structure reading the input TDF file.
# To know the right dimension the first sinogram is pre-processed.
#
# Open the TDF file and get the dataset:
f_in = getHDF5(infile, 'r')
if "/tomo" in f_in:
dset = f_in['tomo']
else:
dset = f_in['exchange/data']
# Downscaling and decimation factors considered when determining the
# approximation window:
zrange = arange(sino_idx - approx_win * downsc_factor / 2, sino_idx + approx_win * downsc_factor / 2, downsc_factor)
zrange = zrange[(zrange >= 0)]
zrange = zrange[(zrange < num_sinos)]
approx_win = zrange.shape[0]
# Approximation window cannot be odd:
if (approx_win % 2 == 1):
approx_win = approx_win - 1
zrange = zrange[0:approx_win]
# Read one sinogram to get the proper dimensions:
test_im = tdf.read_sino(dset, zrange[0]*downsc_factor).astype(float32)
# Apply projection removal (if required):
test_im = test_im[angles_projfrom:angles_projto, :]
# Apply decimation and downscaling (if required):
test_im = test_im[::decim_factor, ::downsc_factor]
# Perform the pre-processing of the first sinogram to get the right
# dimension:
if (preprocessing_required):
if not skipflat:
if dynamic_ff:
# Dynamic flat fielding with downsampling = 2:
#test_im = dynamic_flat_fielding(test_im, zrange[0] / downsc_factor, EFF, filtEFF, 2, im_dark, norm_sx, norm_dx)
test_im = dynamic_flat_fielding(test_im, zrange[0] , EFF, filtEFF, 2, im_dark, norm_sx, norm_dx)
else:
#test_im = flat_fielding(test_im, zrange[0] / downsc_factor, corr_plan, flat_end, half_half,
# half_half_line / decim_factor, norm_sx, norm_dx).astype(float32)
test_im = flat_fielding(test_im, zrange[0], corr_plan, flat_end, half_half,
half_half_line / decim_factor, norm_sx, norm_dx).astype(float32)
if ext_fov:
test_im = extfov_correction(test_im, ext_fov_rot_right, ext_fov_overlap / downsc_factor, ext_fov_normalize, ext_fov_average).astype(float32)
if not skipflat and not dynamic_ff:
test_im = ring_correction(test_im, ringrem, flat_end, corr_plan['skip_flat_after'], half_half,
half_half_line / decim_factor, ext_fov).astype(float32)
else:
test_im = ring_correction(test_im, ringrem, False, False, half_half,
half_half_line / decim_factor, ext_fov).astype(float32)
# Now we can allocate memory for the bunch of slices:
tmp_im = empty((approx_win, test_im.shape[0], test_im.shape[1]), dtype=float32)
tmp_im[0,:,:] = test_im
# Reading all the the sinos from TDF file and close:
for ct in range(1, approx_win):
# Read the sinogram:
test_im = tdf.read_sino(dset, zrange[ct]*downsc_factor).astype(float32)
# Apply projection removal (if required):
test_im = test_im[angles_projfrom:angles_projto, :]
# Apply decimation and downscaling (if required):
test_im = test_im[::decim_factor, ::downsc_factor]
# Perform the pre-processing for each sinogram of the bunch:
if (preprocessing_required):
if not skipflat:
if dynamic_ff:
# Dynamic flat fielding with downsampling = 2:
#test_im = dynamic_flat_fielding(test_im, zrange[ct] / downsc_factor, EFF, filtEFF, 2, im_dark, norm_sx, norm_dx)
test_im = dynamic_flat_fielding(test_im, zrange[ct], EFF, filtEFF, 2, im_dark, norm_sx, norm_dx)
else:
#test_im = flat_fielding(test_im, zrange[ct] / downsc_factor, corr_plan, flat_end, half_half,
# half_half_line / decim_factor, norm_sx, norm_dx).astype(float32)
test_im = flat_fielding(test_im, zrange[ct], corr_plan, flat_end, half_half,
half_half_line / decim_factor, norm_sx, norm_dx).astype(float32)
if ext_fov:
test_im = extfov_correction(test_im, ext_fov_rot_right, ext_fov_overlap / downsc_factor, ext_fov_normalize, ext_fov_average).astype(float32)
if not skipflat and not dynamic_ff:
test_im = ring_correction(test_im, ringrem, flat_end, corr_plan['skip_flat_after'], half_half,
half_half_line / decim_factor, ext_fov).astype(float32)
else:
test_im = ring_correction(test_im, ringrem, False, False, half_half,
half_half_line / decim_factor, ext_fov).astype(float32)
tmp_im[ct,:,:] = test_im
f_in.close()
# Now everything has to refer to a downscaled dataset:
sino_idx = ((zrange == sino_idx).nonzero())
#
# Perform phase retrieval:
#
# Prepare the plan:
if (phrtmethod == 0):
# Paganin 2002:
phrtplan = tiehom_plan(tmp_im[:,0,:], phrt_param1, phrt_param2, energy, distance, pixsize * downsc_factor, phrtpad)
elif (phrtmethod == 1):
# Paganin 2020:
phrtplan = tiehom_plan2020(tmp_im[:,0,:], phrt_param1, phrt_param2, energy, distance, pixsize * downsc_factor, phrtpad)
else:
phrtplan = phrt_plan(tmp_im[:,0,:], energy, distance, pixsize * downsc_factor, phrt_param2, phrt_param1, phrtmethod, phrtpad)
#phrtplan = prepare_plan (tmp_im[:,0,:], beta, delta, energy, distance,
#pixsize*downsc_factor, padding=phrtpad)
# Process each projection (whose height depends on the size of the bunch):
for ct in range(0, tmp_im.shape[1]):
#tmp_im[:,ct,:] = phase_retrieval(tmp_im[:,ct,:], phrtplan).astype(float32)
if (phrtmethod == 0):
tmp_im[:,ct,:] = tiehom(tmp_im[:,ct,:], phrtplan).astype(float32)
elif (phrtmethod == 1):
tmp_im[:,ct,:] = tiehom2020(tmp_im[:,ct,:], phrtplan).astype(float32)
else:
tmp_im[:,ct,:] = phrt(tmp_im[:,ct,:], phrtplan, phrtmethod).astype(float32)
# Extract the requested sinogram:
im = tmp_im[sino_idx[0],:,:].squeeze()
else:
# Read only one sinogram:
f_in = getHDF5(infile, 'r')
if "/tomo" in f_in:
dset = f_in['tomo']
else:
dset = f_in['exchange/data']
im = tdf.read_sino(dset,sino_idx * downsc_factor).astype(float32)
f_in.close()
# Apply projection removal (if required):
im = im[angles_projfrom:angles_projto, :]
# Apply decimation and downscaling (if required):
im = im[::decim_factor,::downsc_factor]
#sino_idx = sino_idx / downsc_factor # Downscaling for the index already applied
# Perform the preprocessing of the sinogram (if required):
if (preprocessing_required):
if not skipflat:
if dynamic_ff:
# Dynamic flat fielding with downsampling = 2:
im = dynamic_flat_fielding(im, sino_idx, EFF, filtEFF, 2, im_dark, norm_sx, norm_dx)
else:
im = flat_fielding(im, sino_idx, corr_plan, flat_end, half_half, half_half_line / decim_factor,
norm_sx, norm_dx).astype(float32)
if ext_fov:
im = extfov_correction(im, ext_fov_rot_right, ext_fov_overlap / downsc_factor, ext_fov_normalize, ext_fov_average)
if not skipflat and not dynamic_ff:
im = ring_correction(im, ringrem, flat_end, corr_plan['skip_flat_after'], half_half,
half_half_line / decim_factor, ext_fov)
else:
im = ring_correction(im, ringrem, False, False, half_half,
half_half_line / decim_factor, ext_fov)
# Additional ring removal before reconstruction:
#im = boinhaibel(im, '11;')
#im = munchetal(im, '5;1.8')
#im = rivers(im, '13;')
#im = raven(im, '11;0.8')
#im = oimoen(im, '51;51')
# Actual reconstruction:
im = reconstruct(im, angles, offset / downsc_factor, logtransform, recpar, circle, scale, pad, method,
zerone_mode, dset_min, dset_max, corr_offset, rolling, roll_shift, tmppath).astype(float32)
# Apply post-processing (if required):
if postprocess_required:
im = polarfilter(im, polarfilt_opt)
im = croprescale(im, convert_opt, crop_opt)
else:
# Create the circle mask for fancy output:
if (circle == True):
siz = im.shape[1]
if siz % 2:
rang = arange(-siz / 2 + 1, siz / 2 + 1)
else:
rang = arange(-siz / 2,siz / 2)
x,y = meshgrid(rang,rang)
z = x ** 2 + y ** 2
a = (z < (siz / 2 - int(round(abs(offset) / downsc_factor))) ** 2)
im = im * a
# Write down reconstructed preview file (file name modified with metadata):
im = im.astype(float32)
outfile = outfile + '_' + str(im.shape[1]) + 'x' + str(im.shape[0]) + '_' + str(amin(im)) + '$' + str(amax(im))
im.tofile(outfile)
#print "With %d thread(s): [%0.3f sec, %0.3f sec, %0.3f sec]." % (nr_threads,
#t1-t0, t2-t1, t3-t2)
def main(argv):
"""To do...
Usage
-----
Parameters
---------
Example
--------------------------
"""
# Get the from and to number of files to process:
sino_idx = int(argv[0])
# Get paths:
infile = argv[1]
outfile = argv[2]
# Essential reconstruction parameters:
angles = float(argv[3])
offset = float(argv[4])
recpar = argv[5]
scale = int(float(argv[6]))
overpad = True if argv[7] == "True" else False
logtrsf = True if argv[8] == "True" else False
circle = True if argv[9] == "True" else False
# Parameters for on-the-fly pre-processing:
preprocessing_required = True if argv[10] == "True" else False
flat_end = True if argv[11] == "True" else False
half_half = True if argv[12] == "True" else False
half_half_line = int(argv[13])
ext_fov = True if argv[14] == "True" else False
norm_sx = int(argv[19])
norm_dx = int(argv[20])
ext_fov_rot_right = argv[15]
if ext_fov_rot_right == "True":
ext_fov_rot_right = True
if (ext_fov):
norm_sx = 0
else:
ext_fov_rot_right = False
if (ext_fov):
norm_dx = 0
ext_fov_overlap = int(argv[16])
ext_fov_normalize = True if argv[17] == "True" else False
ext_fov_average = True if argv[18] == "True" else False
skip_ringrem = True if argv[21] == "True" else False
ringrem = argv[22]
# Extra reconstruction parameters:
zerone_mode = True if argv[23] == "True" else False
corr_offset = float(argv[24])
reconmethod = argv[25]
# Force overpadding in case of GRIDREC for unknown reasons:
if reconmethod == "GRIDREC":
overpad = True
decim_factor = int(argv[26])
downsc_factor = int(argv[27])
# Parameters for postprocessing:
postprocess_required = True if argv[28] == "True" else False
polarfilt_opt = argv[29]
convert_opt = argv[30]
crop_opt = argv[31]
# Parameters for on-the-fly phase retrieval:
phaseretrieval_required = True if argv[32] == "True" else False
phrtmethod = int(argv[33])
phrt_param1 = double(argv[34]) # param1( e.g. regParam, or beta)
phrt_param2 = double(argv[35]) # param2( e.g. thresh or delta)
energy = double(argv[36])
distance = double(argv[37])
pixsize = double(argv[38]) / 1000.0 # pixsixe from micron to mm:
phrtpad = True if argv[39] == "True" else False
approx_win = int(argv[40])
angles_projfrom = int(argv[41])
angles_projto = int(argv[42])
rolling = True if argv[43] == "True" else False
roll_shift = int(int(argv[44]) / decim_factor)
preprocessingplan_fromcache = True if argv[45] == "True" else False
dynamic_ff = True if argv[46] == "True" else False
nr_threads = int(argv[47])
tmppath = argv[48]
if not tmppath.endswith(sep): tmppath += sep
logfilename = argv[49]
# Open the HDF5 file:
f_in = getHDF5(infile, 'r')
if "/tomo" in f_in:
dset = f_in['tomo']
else:
dset = f_in['exchange/data']
if "/provenance/detector_output" in f_in:
prov_dset = f_in['provenance/detector_output']
dset_min = -1
dset_max = -1
if (zerone_mode):
if ('min' in dset.attrs):
dset_min = float(dset.attrs['min'])
else:
zerone_mode = False
if ('max' in dset.attrs):
dset_max = float(dset.attrs['max'])
else:
zerone_mode = False
num_sinos = tdf.get_nr_sinos(dset) # Pay attention to the downscale factor
if (num_sinos == 0):
exit()
# Check extrema:
if (sino_idx >= num_sinos / downsc_factor):
sino_idx = num_sinos / downsc_factor - 1
# Get correction plan and phase retrieval plan (if required):
skipflat = False
corrplan = 0
im_dark = 0
EFF = 0
filtEFF = 0
if (preprocessing_required):
if not dynamic_ff:
# Load flat fielding plan either from cache (if required) or from TDF file
# and cache it for faster re-use:
if (preprocessingplan_fromcache):
try:
corrplan = cache2plan(infile, tmppath)
except Exception as e:
#print "Error(s) when reading from cache"
corrplan = extract_flatdark(f_in, flat_end, logfilename)
if (isscalar(corrplan['im_flat']) and isscalar(corrplan['im_flat_after'])):
skipflat = True
else:
plan2cache(corrplan, infile, tmppath)
else:
corrplan = extract_flatdark(f_in, flat_end, logfilename)
if (isscalar(corrplan['im_flat']) and isscalar(corrplan['im_flat_after'])):
skipflat = True
else:
plan2cache(corrplan, infile, tmppath)
# Dowscale flat and dark images if necessary:
if isinstance(corrplan['im_flat'], ndarray):
corrplan['im_flat'] = corrplan['im_flat'][::downsc_factor,::downsc_factor]
if isinstance(corrplan['im_dark'], ndarray):
corrplan['im_dark'] = corrplan['im_dark'][::downsc_factor,::downsc_factor]
if isinstance(corrplan['im_flat_after'], ndarray):
corrplan['im_flat_after'] = corrplan['im_flat_after'][::downsc_factor,::downsc_factor]
if isinstance(corrplan['im_dark_after'], ndarray):
corrplan['im_dark_after'] = corrplan['im_dark_after'][::downsc_factor,::downsc_factor]
else:
# Dynamic flat fielding:
if "/tomo" in f_in:
if "/flat" in f_in:
flat_dset = f_in['flat']
if "/dark" in f_in:
im_dark = _medianize(f_in['dark'])
else:
skipdark = True
else:
skipflat = True # Nothing to do in this case
else:
if "/exchange/data_white" in f_in:
flat_dset = f_in['/exchange/data_white']
if "/exchange/data_dark" in f_in:
im_dark = _medianize(f_in['/exchange/data_dark'])
else:
skipdark = True
else:
skipflat = True # Nothing to do in this case
# Prepare plan for dynamic flat fielding with 16 repetitions:
if not skipflat:
EFF, filtEFF = dff_prepare_plan(flat_dset, 16, im_dark)
# Downscale images if necessary:
im_dark = im_dark[::downsc_factor,::downsc_factor]
EFF = EFF[::downsc_factor,::downsc_factor,:]
filtEFF = filtEFF[::downsc_factor,::downsc_factor,:]
f_in.close()
# Run computation:
process(sino_idx, num_sinos, infile, outfile, preprocessing_required, corrplan, skipflat, norm_sx,
norm_dx, flat_end, half_half, half_half_line, ext_fov, ext_fov_rot_right, ext_fov_overlap,
ext_fov_normalize, ext_fov_average, ringrem, phaseretrieval_required, phrtmethod, phrt_param1,
phrt_param2, energy, distance, pixsize, phrtpad, approx_win, angles, angles_projfrom,
angles_projto, offset, logtrsf, recpar, circle, scale, overpad, reconmethod, rolling,
roll_shift, zerone_mode, dset_min, dset_max, decim_factor, downsc_factor, corr_offset,
postprocess_required, polarfilt_opt, convert_opt, crop_opt, dynamic_ff, EFF, filtEFF,
im_dark, nr_threads, logfilename, tmppath)
if __name__ == "__main__":
main(argv[1:])
|
ElettraSciComp/STP-Core
|
STP-Core/preview_reconstruct.py
|
Python
|
gpl-3.0
| 23,605
|
[
"Gaussian"
] |
ac90849f85af2125a58953a8ab6f0badacf27aa774fed7fa098f0761a9edd492
|
import json
import numpy as np
import os
from itertools import groupby
from operator import itemgetter
import copy
import sys
from finisherSCCoreLib import alignerRobot
from finisherSCCoreLib import graphLib
from finisherSCCoreLib import IORobot
from finisherSCCoreLib import houseKeeper
import associatedReadFinder
import readContigGraphFormer
import repeatFinder
import abunHouseKeeper
### Abundance split and its subroutines
def obtainNonEmpty(repeatPairs):
newRepeatPairs = []
for eachitem in repeatPairs:
if len(eachitem[0]) > 0 and len(eachitem[1]) > 0:
newRepeatPairs.append(eachitem)
return newRepeatPairs
def convert4to1base(i):
return i/4
def convert4to2base(i):
return i/2
def getCt(inList ,myCountDic):
newInList = []
for i in inList:
tmp1 = convert4to1base(i)
tmp2 = convert4to2base(i)
newInList.append([tmp2 , myCountDic["Segkk"+str(tmp1)]])
return newInList
def satisfyMatch(initem ,newOutList, sd):
found = -1
inIndex , inCt = initem[0], initem[1]
# First check pt
targetItem = -1
for outitem in newOutList:
outIndex, outCt = outitem[0] , outitem[1]
if abs(outCt - inCt) < 0.5*sd and inIndex != outIndex:
targetItem = outIndex
# Second check pt
rejection = False
for outitem in newOutList:
outIndex, outCt = outitem[0] , outitem[1]
if outIndex != targetItem :
if abs(outCt - inCt) <= 2.01*sd :
rejection = True
# Combined check
if not rejection and targetItem != -1:
found = targetItem
else:
found = -1
return found
def determindMatch(inList, outList, myCountDic):
resolvedList = []
newInList , newOutList = [], []
newInList = getCt(inList ,myCountDic)
newOutList = getCt(outList, myCountDic)
sizeList = []
for eachitem in myCountDic:
sizeList.append(myCountDic[eachitem])
sd = np.std(sizeList)
for eachitem in newInList:
found = satisfyMatch(eachitem ,newOutList, sd)
if found != -1 :
resolvedList.append([eachitem[0], found])
return resolvedList
def addEdges(G, resolvedList):
for eachitem in resolvedList:
G.insertEdge(eachitem[0], eachitem[1], 1997)
def abunSplit(folderName, mummerLink, myCountDic):
'''
Input : repeatSpecification.txt , myCountDic.json, improved3.fasta, raw_reads.fasta
Output : abunsplit.fasta
Algorithm :
1. Load data from various sources [various json files]
2. For each repeat interior:
a) identify the abundances associated with in/out contigs
b) perform a split and record the split
3. Use split results to generate contigs [may already exist in newPhasing.py ]
a) use a graph to capture the split results
b) use reads to fill in any gaps
c) read out the contigs
'''
json_data = open(folderName + "phaseRepeat.txt", 'r')
repeatPairs = json.load(json_data)
repeatPairs = obtainNonEmpty(repeatPairs)
N1 = len(myCountDic)*2
G = graphLib.seqGraph(N1)
for eachitem in repeatPairs:
inList, outList = eachitem[0], eachitem[1]
resolvedList = determindMatch(inList, outList, myCountDic)
addEdges(G, resolvedList)
G.condense()
IORobot.extractGraphToContigs(G, folderName, mummerLink, "abun.fasta", "improved3_Double.fasta")
def splitter(folderName, mummerLink):
'''
Input : repeatSpecification.txt , myCountDic.json, improved3.fasta, raw_reads.fasta
Output : abunsplit.fasta
Algorithm :
1. Load data from various sources [various json files]
2. For each repeat interior:
a) identify the abundances associated with in/out contigs
b) perform a split and record the split
3. Use split results to generate contigs [may already exist in newPhasing.py ]
a) use a graph to capture the split results
b) use reads to fill in any gaps
c) read out the contigs
'''
with open(folderName + 'myCountDic.json') as f:
myCountDic = json.load(f)
abunSplit(folderName, mummerLink, myCountDic)
'''
Input : string_graph_3, improved3.fasta, raw_reads.fasta
Output : string_graph_4 with weights [need a data structure to store the weight on node]
Algorithm :
1. Find your favorite mappers to map read back
a. MUMmer, Bowtie, bbmap, any that works V
b. And then write a short parser to parse the results V
2. Calculate count on the abundances
a. Aggregate by taking average [put weights on bin along contigs]
b. Inheritance and a subclass
3. Find your favorite graphical tool to display
a. Use a javascript library [halfviz should just work ! put weight on edge ]
'''
def evaluateCoverage(dataList, lenDic, readLenDic, folderName,mummerLink, continueFilter):
myCountDic = {}
for eachitem in lenDic:
myCountDic[eachitem] = 0
dataList.sort(key = itemgetter(-1))
ctkk, ctbase = 0, 0
toAddBackDic = copy.deepcopy(readLenDic)
for key, items in groupby(dataList, itemgetter(-1)):
maxMatch = -1
bestname = ""
for eachitem in items:
ct = eachitem[6]/100.0 * eachitem[4]
if ct > maxMatch:
maxMatch = ct
bestname = eachitem[-2]
myCountDic[bestname] += readLenDic[key]
ctkk = ctkk + 1
ctbase = ctbase + readLenDic[key]
toAddBackDic[key] = -1
cttot = 0
for eachitem in readLenDic:
cttot = cttot + readLenDic[eachitem]
print "Missed coverage ", (cttot - ctbase)/(4.7*pow(10, 6))
print "percentage miss read", (len(readLenDic) - ctkk)/(1.0*len(readLenDic))
toAddReadList = []
for eachitem in toAddBackDic:
if toAddBackDic[eachitem] >= 0 :
toAddReadList.append(eachitem)
'''
This part need the most parallelism because it is most intense with -l 10
split V, workerList V , combine
'''
if continueFilter:
numberOfFiles= 20
IORobot.putListToFileO(folderName, "raw_reads.fasta" , "selected_raw", toAddReadList)
bindir = os.path.abspath(os.path.dirname(sys.argv[0]))
command = bindir + "/finisherSCCoreLib/fasta-splitter.pl --n-parts " + str(numberOfFiles) + " " + folderName + "selected_raw.fasta"
os.system(command)
workerList = []
for dummyI in range(1, numberOfFiles + 1):
indexOfMum = ""
if dummyI < 10:
indexOfMum = "0" + str(dummyI)
else:
indexOfMum = str(dummyI)
outputName, referenceName, queryName, specialName= "outAbunRefine"+indexOfMum, "improved3.fasta", "selected_raw.part-"+ indexOfMum + ".fasta", "abunMissOut" + indexOfMum
workerList.append([outputName, referenceName, queryName, specialName])
alignerRobot.useMummerAlignBatch(mummerLink, folderName, workerList, houseKeeper.globalParallel ,specialForRaw = True, refinedVersion = True)
alignerRobot.combineMultipleCoorMum( True, mummerLink, folderName, "outAbunRefine", "abunMissOut", numberOfFiles)
for i in range(len(myCountDic)):
eachitem = "Segkk"+str(i)
print eachitem , myCountDic[eachitem]/(1.0*lenDic[eachitem])
myCountDic[eachitem] = myCountDic[eachitem]/(1.0*lenDic[eachitem])
return myCountDic
def generateAbundanceGraph(folderName, mummerLink):
print "generateAbundanceGraph"
'''
1. Find your favorite mappers to map read back
a. MUMmer, Bowtie, bbmap, any that works V
b. And then write a short parser to parse the results V
'''
numberOfFiles = 20
workerList = []
for dummyI in range(1, numberOfFiles + 1):
indexOfMum = ""
if dummyI < 10:
indexOfMum = "0" + str(dummyI)
else:
indexOfMum = str(dummyI)
'''
"outGapFillRefine"+indexOfMum , "smaller_improvedContig.fasta", "relatedReads_Double.part-" + indexOfMum + ".fasta", "fromMumRefine" + indexOfMum
'''
outputName, referenceName, queryName, specialName= "outAbun"+indexOfMum, "improved3.fasta", "raw_reads.part-"+ indexOfMum + ".fasta", "outAbun" + indexOfMum
workerList.append([outputName, referenceName, queryName, specialName])
if True:
alignerRobot.useMummerAlignBatch(mummerLink, folderName, workerList, houseKeeper.globalParallel ,False)
'''
command = mummerLink + "nucmer --maxmatch --nosimplify -p " + folderName + "out " + folderName + "improved3.fasta "+folderName+"raw_reads.part-" + indexOfMum + ".fasta"
os.system(command)
command = mummerLink + "show-coords -r " + folderName + "out.delta > " + folderName + "fromMumAbun" + indexOfMum
os.system(command)
'''
dataList = []
for i in range(1, 1+numberOfFiles):
if i < 10:
indexOfMum = "0" + str(i)
else:
indexOfMum = str(i)
dataList = dataList+ alignerRobot.extractMumData(folderName, "outAbun"+ str(indexOfMum)+"Out")
'''
2. Calculate count on the abundances
a. Aggregate by taking average [put weights on bin along contigs]
b. Inheritance and a subclass
'''
lenDic = IORobot.obtainLength(folderName, "improved3.fasta")
readLenDic = IORobot.obtainLength(folderName , "raw_reads.fasta")
myCountDic = {}
for eachitem in lenDic:
myCountDic[eachitem] = [0 for i in range(lenDic[eachitem])]
thres = 30
lenSum = 0
extraDataList= []
print "len(dataList)", len(dataList)
if not abunHouseKeeper.abunGlobalAvoidrefine:
myCountDic = evaluateCoverage(dataList, lenDic, readLenDic, folderName, mummerLink, True)
extraDataList = alignerRobot.extractMumData(folderName, "abunMissOut" )
else:
extraDataList = []
dataList = dataList + extraDataList
myCountDic = evaluateCoverage(dataList, lenDic, readLenDic, folderName, mummerLink,False)
with open(folderName + 'myCountDic.json', 'w') as f:
json.dump(myCountDic, f)
return myCountDic
def mainFlow(folderName, mummerLink):
print "Hello world"
contigFilename = "improved3"
readsetFilename = "phasingSeedName"
optTypeFileHeader = "phaseString"
contigReadGraph = "phaseStringGraph1"
repeatFilename = "phaseRepeat.txt"
repeatSpec = "repeatSpecification.txt"
optionToRun = "xphase"
if True:
associatedReadFinder.getAllAssociatedReads(folderName, mummerLink,readsetFilename)
readContigGraphFormer.formReadContigStringGraph(folderName, mummerLink,contigFilename, readsetFilename, optTypeFileHeader , contigReadGraph )
repeatFinder.identifyRepeat(folderName, mummerLink,contigFilename,contigReadGraph, repeatFilename, optionToRun )
if True:
myCountDic = generateAbundanceGraph(folderName, mummerLink)
if True :
splitter(folderName, mummerLink)
os.system("cp selected_raw.part-* "+ folderName )
os.system("rm selected_raw.part-*")
|
kakitone/finishingTool
|
experimental/abunSplitter.py
|
Python
|
mit
| 11,578
|
[
"Bowtie"
] |
18e3c3fa04b8d0bf94847899765215d3d7b2c2304b81301708b36de7846d4f2a
|
# Copyright 2006-2010 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
#Nice link:
# http://www.ebi.ac.uk/help/formats_frame.html
"""Sequence input/output as SeqRecord objects.
Bio.SeqIO is also documented at U{http://biopython.org/wiki/SeqIO} and by
a whole chapter in our tutorial:
- U{http://biopython.org/DIST/docs/tutorial/Tutorial.html}
- U{http://biopython.org/DIST/docs/tutorial/Tutorial.pdf}
Input
=====
The main function is Bio.SeqIO.parse(...) which takes an input file handle
(or in recent versions of Biopython alternatively a filename as a string),
and format string. This returns an iterator giving SeqRecord objects:
>>> from Bio import SeqIO
>>> for record in SeqIO.parse("Fasta/f002", "fasta"):
... print record.id, len(record)
gi|1348912|gb|G26680|G26680 633
gi|1348917|gb|G26685|G26685 413
gi|1592936|gb|G29385|G29385 471
Note that the parse() function will invoke the relevant parser for the
format with its default settings. You may want more control, in which case
you need to create a format specific sequence iterator directly.
Input - Single Records
======================
If you expect your file to contain one-and-only-one record, then we provide
the following 'helper' function which will return a single SeqRecord, or
raise an exception if there are no records or more than one record:
>>> from Bio import SeqIO
>>> record = SeqIO.read("Fasta/f001", "fasta")
>>> print record.id, len(record)
gi|3318709|pdb|1A91| 79
This style is useful when you expect a single record only (and would
consider multiple records an error). For example, when dealing with GenBank
files for bacterial genomes or chromosomes, there is normally only a single
record. Alternatively, use this with a handle when downloading a single
record from the internet.
However, if you just want the first record from a file containing multiple
record, use the iterator's next() method:
>>> from Bio import SeqIO
>>> record = SeqIO.parse("Fasta/f002", "fasta").next()
>>> print record.id, len(record)
gi|1348912|gb|G26680|G26680 633
The above code will work as long as the file contains at least one record.
Note that if there is more than one record, the remaining records will be
silently ignored.
Input - Multiple Records
========================
For non-interlaced files (e.g. Fasta, GenBank, EMBL) with multiple records
using a sequence iterator can save you a lot of memory (RAM). There is
less benefit for interlaced file formats (e.g. most multiple alignment file
formats). However, an iterator only lets you access the records one by one.
If you want random access to the records by number, turn this into a list:
>>> from Bio import SeqIO
>>> records = list(SeqIO.parse("Fasta/f002", "fasta"))
>>> len(records)
3
>>> print records[1].id
gi|1348917|gb|G26685|G26685
If you want random access to the records by a key such as the record id,
turn the iterator into a dictionary:
>>> from Bio import SeqIO
>>> record_dict = SeqIO.to_dict(SeqIO.parse("Fasta/f002", "fasta"))
>>> len(record_dict)
3
>>> print len(record_dict["gi|1348917|gb|G26685|G26685"])
413
However, using list() or the to_dict() function will load all the records
into memory at once, and therefore is not possible on very large files.
Instead, for *some* file formats Bio.SeqIO provides an indexing approach
providing dictionary like access to any record. For example,
>>> from Bio import SeqIO
>>> record_dict = SeqIO.index("Fasta/f002", "fasta")
>>> len(record_dict)
3
>>> print len(record_dict["gi|1348917|gb|G26685|G26685"])
413
Many but not all of the supported input file formats can be indexed like
this. For example "fasta", "fastq", "qual" and even the binary format "sff"
work, but alignment formats like "phylip", "clustalw" and "nexus" will not.
In most cases you can also use SeqIO.index to get the record from the file
as a raw string (not a SeqRecord). This can be useful for example to extract
a sub-set of records from a file where SeqIO cannot output the file format
(e.g. the plain text SwissProt format, "swiss") or where it is important to
keep the output 100% identical to the input). For example,
>>> from Bio import SeqIO
>>> record_dict = SeqIO.index("Fasta/f002", "fasta")
>>> len(record_dict)
3
>>> print record_dict.get_raw("gi|1348917|gb|G26685|G26685")
>gi|1348917|gb|G26685|G26685 human STS STS_D11734.
CGGAGCCAGCGAGCATATGCTGCATGAGGACCTTTCTATCTTACATTATGGCTGGGAATCTTACTCTTTC
ATCTGATACCTTGTTCAGATTTCAAAATAGTTGTAGCCTTATCCTGGTTTTACAGATGTGAAACTTTCAA
GAGATTTACTGACTTTCCTAGAATAGTTTCTCTACTGGAAACCTGATGCTTTTATAAGCCATTGTGATTA
GGATGACTGTTACAGGCTTAGCTTTGTGTGAAANCCAGTCACCTTTCTCCTAGGTAATGAGTAGTGCTGT
TCATATTACTNTAAGTTCTATAGCATACTTGCNATCCTTTANCCATGCTTATCATANGTACCATTTGAGG
AATTGNTTTGCCCTTTTGGGTTTNTTNTTGGTAAANNNTTCCCGGGTGGGGGNGGTNNNGAAA
<BLANKLINE>
>>> print record_dict["gi|1348917|gb|G26685|G26685"].format("fasta")
>gi|1348917|gb|G26685|G26685 human STS STS_D11734.
CGGAGCCAGCGAGCATATGCTGCATGAGGACCTTTCTATCTTACATTATGGCTGGGAATC
TTACTCTTTCATCTGATACCTTGTTCAGATTTCAAAATAGTTGTAGCCTTATCCTGGTTT
TACAGATGTGAAACTTTCAAGAGATTTACTGACTTTCCTAGAATAGTTTCTCTACTGGAA
ACCTGATGCTTTTATAAGCCATTGTGATTAGGATGACTGTTACAGGCTTAGCTTTGTGTG
AAANCCAGTCACCTTTCTCCTAGGTAATGAGTAGTGCTGTTCATATTACTNTAAGTTCTA
TAGCATACTTGCNATCCTTTANCCATGCTTATCATANGTACCATTTGAGGAATTGNTTTG
CCCTTTTGGGTTTNTTNTTGGTAAANNNTTCCCGGGTGGGGGNGGTNNNGAAA
<BLANKLINE>
Here the original file and what Biopython would output differ in the line
wrapping.
Input - Alignments
==================
You can read in alignment files as alignment objects using Bio.AlignIO.
Alternatively, reading in an alignment file format via Bio.SeqIO will give
you a SeqRecord for each row of each alignment:
>>> from Bio import SeqIO
>>> for record in SeqIO.parse("Clustalw/hedgehog.aln", "clustal"):
... print record.id, len(record)
gi|167877390|gb|EDS40773.1| 447
gi|167234445|ref|NP_001107837. 447
gi|74100009|gb|AAZ99217.1| 447
gi|13990994|dbj|BAA33523.2| 447
gi|56122354|gb|AAV74328.1| 447
Output
======
Use the function Bio.SeqIO.write(...), which takes a complete set of
SeqRecord objects (either as a list, or an iterator), an output file handle
(or in recent versions of Biopython an output filename as a string) and of
course the file format::
from Bio import SeqIO
records = ...
SeqIO.write(records, "example.faa", "fasta")
Or, using a handle::
from Bio import SeqIO
records = ...
handle = open("example.faa", "w")
SeqIO.write(records, handle, "fasta")
handle.close()
You are expected to call this function once (with all your records) and if
using a handle, make sure you close it to flush the data to the hard disk.
Output - Advanced
=================
The effect of calling write() multiple times on a single file will vary
depending on the file format, and is best avoided unless you have a strong
reason to do so.
If you give a filename, then each time you call write() the existing file
will be overwriten. For sequential files formats (e.g. fasta, genbank) each
"record block" holds a single sequence. For these files it would probably
be safe to call write() multiple times by re-using the same handle.
However, trying this for certain alignment formats (e.g. phylip, clustal,
stockholm) would have the effect of concatenating several multiple sequence
alignments together. Such files are created by the PHYLIP suite of programs
for bootstrap analysis, but it is clearer to do this via Bio.AlignIO instead.
Conversion
==========
The Bio.SeqIO.convert(...) function allows an easy interface for simple
file format conversions. Additionally, it may use file format specific
optimisations so this should be the fastest way too.
In general however, you can combine the Bio.SeqIO.parse(...) function with
the Bio.SeqIO.write(...) function for sequence file conversion. Using
generator expressions or generator functions provides a memory efficient way
to perform filtering or other extra operations as part of the process.
File Formats
============
When specifying the file format, use lowercase strings. The same format
names are also used in Bio.AlignIO and include the following:
- ace - Reads the contig sequences from an ACE assembly file.
- embl - The EMBL flat file format. Uses Bio.GenBank internally.
- fasta - The generic sequence file format where each record starts with
an identifer line starting with a ">" character, followed by
lines of sequence.
- fastq - A "FASTA like" format used by Sanger which also stores PHRED
sequence quality values (with an ASCII offset of 33).
- fastq-sanger - An alias for "fastq" for consistency with BioPerl and EMBOSS
- fastq-solexa - Original Solexa/Illumnia variant of the FASTQ format which
encodes Solexa quality scores (not PHRED quality scores) with an
ASCII offset of 64.
- fastq-illumina - Solexa/Illumnia 1.3+ variant of the FASTQ format which
encodes PHRED quality scores with an ASCII offset of 64 (not 33).
- genbank - The GenBank or GenPept flat file format.
- gb - An alias for "genbank", for consistency with NCBI Entrez Utilities
- ig - The IntelliGenetics file format, apparently the same as the
MASE alignment format.
- imgt - An EMBL like format from IMGT where the feature tables are more
indented to allow for longer feature types.
- phd - Output from PHRED, used by PHRAP and CONSED for input.
- pir - A "FASTA like" format introduced by the National Biomedical
Research Foundation (NBRF) for the Protein Information Resource
(PIR) database, now part of UniProt.
- sff - Standard Flowgram Format (SFF), typical output from Roche 454.
- sff-trim - Standard Flowgram Format (SFF) with given trimming applied.
- swiss - Plain text Swiss-Prot aka UniProt format.
- tab - Simple two column tab separated sequence files, where each
line holds a record's identifier and sequence. For example,
this is used as by Aligent's eArray software when saving
microarray probes in a minimal tab delimited text file.
- qual - A "FASTA like" format holding PHRED quality values from
sequencing DNA, but no actual sequences (usually provided
in separate FASTA files).
- uniprot-xml - The UniProt XML format (replacement for the SwissProt plain
text format which we call "swiss")
Note that while Bio.SeqIO can read all the above file formats, it cannot
write to all of them.
You can also use any file format supported by Bio.AlignIO, such as "nexus",
"phlip" and "stockholm", which gives you access to the individual sequences
making up each alignment as SeqRecords.
"""
__docformat__ = "epytext en" #not just plaintext
#TODO
# - define policy on reading aligned sequences with gaps in
# (e.g. - and . characters) including how the alphabet interacts
#
# - How best to handle unique/non unique record.id when writing.
# For most file formats reading such files is fine; The stockholm
# parser would fail.
#
# - MSF multiple alignment format, aka GCG, aka PileUp format (*.msf)
# http://www.bioperl.org/wiki/MSF_multiple_alignment_format
"""
FAO BioPython Developers
========================
The way I envision this SeqIO system working as that for any sequence file
format we have an iterator that returns SeqRecord objects.
This also applies to interlaced fileformats (like clustal - although that
is now handled via Bio.AlignIO instead) where the file cannot be read record
by record. You should still return an iterator, even if the implementation
could just as easily return a list.
These file format specific sequence iterators may be implemented as:
* Classes which take a handle for __init__ and provide the __iter__ method
* Functions that take a handle, and return an iterator object
* Generator functions that take a handle, and yield SeqRecord objects
It is then trivial to turn this iterator into a list of SeqRecord objects,
an in memory dictionary, or a multiple sequence alignment object.
For building the dictionary by default the id propery of each SeqRecord is
used as the key. You should always populate the id property, and it should
be unique in most cases. For some file formats the accession number is a good
choice. If the file itself contains ambiguous identifiers, don't try and
dis-ambiguate them - return them as is.
When adding a new file format, please use the same lower case format name
as BioPerl, or if they have not defined one, try the names used by EMBOSS.
See also http://biopython.org/wiki/SeqIO_dev
--Peter
"""
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Align import MultipleSeqAlignment
from Bio.Align.Generic import Alignment
from Bio.Alphabet import Alphabet, AlphabetEncoder, _get_base_alphabet
import FastaIO
#Convention for format names is "mainname-subtype" in lower case.
#Please use the same names as BioPerl or EMBOSS where possible.
#
#Note that this simple system copes with defining
#multiple possible iterators for a given format/extension
#with the -subtype suffix
#
#Most alignment file formats will be handled via Bio.AlignIO
_FormatToIterator = {"fasta" : FastaIO.FastaIterator,
}
_FormatToWriter = {"fasta" : FastaIO.FastaWriter,
}
_BinaryFormats = ["sff", "sff-trim"]
def write(sequences, handle, format):
"""Write complete set of sequences to a file.
- sequences - A list (or iterator) of SeqRecord objects, or (if using
Biopython 1.54 or later) a single SeqRecord.
- handle - File handle object to write to, or filename as string
(note older versions of Biopython only took a handle).
- format - lower case string describing the file format to write.
You should close the handle after calling this function.
Returns the number of records written (as an integer).
"""
from Bio import AlignIO
#Try and give helpful error messages:
if not isinstance(format, basestring):
raise TypeError("Need a string for the file format (lower case)")
if not format:
raise ValueError("Format required (lower case string)")
if format != format.lower():
raise ValueError("Format string '%s' should be lower case" % format)
if isinstance(sequences, SeqRecord):
#This raised an exception in order version of Biopython
sequences = [sequences]
if isinstance(handle, basestring):
if format in _BinaryFormats :
handle = open(handle, "wb")
else :
handle = open(handle, "w")
handle_close = True
else:
handle_close = False
#Map the file format to a writer class
if format in _FormatToWriter:
writer_class = _FormatToWriter[format]
count = writer_class(handle).write_file(sequences)
elif format in AlignIO._FormatToWriter:
#Try and turn all the records into a single alignment,
#and write that using Bio.AlignIO
alignment = MultipleSeqAlignment(sequences)
alignment_count = AlignIO.write([alignment], handle, format)
assert alignment_count == 1, "Internal error - the underlying writer " \
+ " should have returned 1, not %s" % repr(alignment_count)
count = len(alignment)
del alignment_count, alignment
elif format in _FormatToIterator or format in AlignIO._FormatToIterator:
raise ValueError("Reading format '%s' is supported, but not writing" \
% format)
else:
raise ValueError("Unknown format '%s'" % format)
assert isinstance(count, int), "Internal error - the underlying %s " \
"writer should have returned the record count, not %s" \
% (format, repr(count))
if handle_close:
handle.close()
return count
def parse(handle, format, alphabet=None):
r"""Turns a sequence file into an iterator returning SeqRecords.
- handle - handle to the file, or the filename as a string
(note older verions of Biopython only took a handle).
- format - lower case string describing the file format.
- alphabet - optional Alphabet object, useful when the sequence type
cannot be automatically inferred from the file itself
(e.g. format="fasta" or "tab")
Typical usage, opening a file to read in, and looping over the record(s):
>>> from Bio import SeqIO
>>> filename = "Fasta/sweetpea.nu"
>>> for record in SeqIO.parse(filename, "fasta"):
... print "ID", record.id
... print "Sequence length", len(record)
... print "Sequence alphabet", record.seq.alphabet
ID gi|3176602|gb|U78617.1|LOU78617
Sequence length 309
Sequence alphabet SingleLetterAlphabet()
For file formats like FASTA where the alphabet cannot be determined, it
may be useful to specify the alphabet explicitly:
>>> from Bio import SeqIO
>>> from Bio.Alphabet import generic_dna
>>> filename = "Fasta/sweetpea.nu"
>>> for record in SeqIO.parse(filename, "fasta", generic_dna):
... print "ID", record.id
... print "Sequence length", len(record)
... print "Sequence alphabet", record.seq.alphabet
ID gi|3176602|gb|U78617.1|LOU78617
Sequence length 309
Sequence alphabet DNAAlphabet()
If you have a string 'data' containing the file contents, you must
first turn this into a handle in order to parse it:
>>> data = ">Alpha\nACCGGATGTA\n>Beta\nAGGCTCGGTTA\n"
>>> from Bio import SeqIO
>>> from StringIO import StringIO
>>> for record in SeqIO.parse(StringIO(data), "fasta"):
... print record.id, record.seq
Alpha ACCGGATGTA
Beta AGGCTCGGTTA
Use the Bio.SeqIO.read(...) function when you expect a single record
only.
"""
#NOTE - The above docstring has some raw \n characters needed
#for the StringIO example, hense the whole docstring is in raw
#string mode (see the leading r before the opening quote).
from Bio import AlignIO
handle_close = False
if isinstance(handle, basestring):
#Hack for SFF, will need to make this more general in future
if format in _BinaryFormats :
handle = open(handle, "rb")
else :
handle = open(handle, "rU")
#TODO - On Python 2.5+ use with statement to close handle
handle_close = True
#Try and give helpful error messages:
if not isinstance(format, basestring):
raise TypeError("Need a string for the file format (lower case)")
if not format:
raise ValueError("Format required (lower case string)")
if format != format.lower():
raise ValueError("Format string '%s' should be lower case" % format)
if alphabet is not None and not (isinstance(alphabet, Alphabet) or \
isinstance(alphabet, AlphabetEncoder)):
raise ValueError("Invalid alphabet, %s" % repr(alphabet))
#Map the file format to a sequence iterator:
if format in _FormatToIterator:
iterator_generator = _FormatToIterator[format]
if alphabet is None:
i = iterator_generator(handle)
else:
try:
i = iterator_generator(handle, alphabet=alphabet)
except TypeError:
i = _force_alphabet(iterator_generator(handle), alphabet)
elif format in AlignIO._FormatToIterator:
#Use Bio.AlignIO to read in the alignments
#TODO - Can this helper function can be replaced with a generator
#expression, or something from itertools?
i = _iterate_via_AlignIO(handle, format, alphabet)
else:
raise ValueError("Unknown format '%s'" % format)
#This imposes some overhead... wait until we drop Python 2.4 to fix it
for r in i:
yield r
if handle_close:
handle.close()
#This is a generator function
def _iterate_via_AlignIO(handle, format, alphabet):
"""Iterate over all records in several alignments (PRIVATE)."""
from Bio import AlignIO
for align in AlignIO.parse(handle, format, alphabet=alphabet):
for record in align:
yield record
def _force_alphabet(record_iterator, alphabet):
"""Iterate over records, over-riding the alphabet (PRIVATE)."""
#Assume the alphabet argument has been pre-validated
given_base_class = _get_base_alphabet(alphabet).__class__
for record in record_iterator:
if isinstance(_get_base_alphabet(record.seq.alphabet),
given_base_class):
record.seq.alphabet = alphabet
yield record
else:
raise ValueError("Specified alphabet %s clashes with "\
"that determined from the file, %s" \
% (repr(alphabet), repr(record.seq.alphabet)))
def read(handle, format, alphabet=None):
"""Turns a sequence file into a single SeqRecord.
- handle - handle to the file, or the filename as a string
(note older verions of Biopython only took a handle).
- format - string describing the file format.
- alphabet - optional Alphabet object, useful when the sequence type
cannot be automatically inferred from the file itself
(e.g. format="fasta" or "tab")
This function is for use parsing sequence files containing
exactly one record. For example, reading a GenBank file:
>>> from Bio import SeqIO
>>> record = SeqIO.read("GenBank/arab1.gb", "genbank")
>>> print "ID", record.id
ID AC007323.5
>>> print "Sequence length", len(record)
Sequence length 86436
>>> print "Sequence alphabet", record.seq.alphabet
Sequence alphabet IUPACAmbiguousDNA()
If the handle contains no records, or more than one record,
an exception is raised. For example:
>>> from Bio import SeqIO
>>> record = SeqIO.read("GenBank/cor6_6.gb", "genbank")
Traceback (most recent call last):
...
ValueError: More than one record found in handle
If however you want the first record from a file containing
multiple records this function would raise an exception (as
shown in the example above). Instead use:
>>> from Bio import SeqIO
>>> record = SeqIO.parse("GenBank/cor6_6.gb", "genbank").next()
>>> print "First record's ID", record.id
First record's ID X55053.1
Use the Bio.SeqIO.parse(handle, format) function if you want
to read multiple records from the handle.
"""
iterator = parse(handle, format, alphabet)
try:
first = iterator.next()
except StopIteration:
first = None
if first is None:
raise ValueError("No records found in handle")
try:
second = iterator.next()
except StopIteration:
second = None
if second is not None:
raise ValueError("More than one record found in handle")
return first
def to_dict(sequences, key_function=None):
"""Turns a sequence iterator or list into a dictionary.
- sequences - An iterator that returns SeqRecord objects,
or simply a list of SeqRecord objects.
- key_function - Optional callback function which when given a
SeqRecord should return a unique key for the dictionary.
e.g. key_function = lambda rec : rec.name
or, key_function = lambda rec : rec.description.split()[0]
If key_function is ommitted then record.id is used, on the assumption
that the records objects returned are SeqRecords with a unique id.
If there are duplicate keys, an error is raised.
Example usage, defaulting to using the record.id as key:
>>> from Bio import SeqIO
>>> filename = "GenBank/cor6_6.gb"
>>> format = "genbank"
>>> id_dict = SeqIO.to_dict(SeqIO.parse(filename, format))
>>> print sorted(id_dict)
['AF297471.1', 'AJ237582.1', 'L31939.1', 'M81224.1', 'X55053.1', 'X62281.1']
>>> print id_dict["L31939.1"].description
Brassica rapa (clone bif72) kin mRNA, complete cds.
A more complex example, using the key_function argument in order to
use a sequence checksum as the dictionary key:
>>> from Bio import SeqIO
>>> from Bio.SeqUtils.CheckSum import seguid
>>> filename = "GenBank/cor6_6.gb"
>>> format = "genbank"
>>> seguid_dict = SeqIO.to_dict(SeqIO.parse(filename, format),
... key_function = lambda rec : seguid(rec.seq))
>>> for key, record in sorted(seguid_dict.iteritems()):
... print key, record.id
/wQvmrl87QWcm9llO4/efg23Vgg AJ237582.1
BUg6YxXSKWEcFFH0L08JzaLGhQs L31939.1
SabZaA4V2eLE9/2Fm5FnyYy07J4 X55053.1
TtWsXo45S3ZclIBy4X/WJc39+CY M81224.1
l7gjJFE6W/S1jJn5+1ASrUKW/FA X62281.1
uVEYeAQSV5EDQOnFoeMmVea+Oow AF297471.1
This approach is not suitable for very large sets of sequences, as all
the SeqRecord objects are held in memory. Instead, consider using the
Bio.SeqIO.index() function (if it supports your particular file format).
"""
if key_function is None:
key_function = lambda rec : rec.id
d = dict()
for record in sequences:
key = key_function(record)
if key in d:
raise ValueError("Duplicate key '%s'" % key)
d[key] = record
return d
def index(filename, format, alphabet=None, key_function=None):
"""Indexes a sequence file and returns a dictionary like object.
- filename - string giving name of file to be indexed
- format - lower case string describing the file format
- alphabet - optional Alphabet object, useful when the sequence type
cannot be automatically inferred from the file itself
(e.g. format="fasta" or "tab")
- key_function - Optional callback function which when given a
SeqRecord identifier string should return a unique
key for the dictionary.
This indexing function will return a dictionary like object, giving the
SeqRecord objects as values:
>>> from Bio import SeqIO
>>> records = SeqIO.index("Quality/example.fastq", "fastq")
>>> len(records)
3
>>> sorted(records)
['EAS54_6_R1_2_1_413_324', 'EAS54_6_R1_2_1_443_348', 'EAS54_6_R1_2_1_540_792']
>>> print records["EAS54_6_R1_2_1_540_792"].format("fasta")
>EAS54_6_R1_2_1_540_792
TTGGCAGGCCAAGGCCGATGGATCA
<BLANKLINE>
>>> "EAS54_6_R1_2_1_540_792" in records
True
>>> print records.get("Missing", None)
None
Note that this psuedo dictionary will not support all the methods of a
true Python dictionary, for example values() is not defined since this
would require loading all of the records into memory at once.
When you call the index function, it will scan through the file, noting
the location of each record. When you access a particular record via the
dictionary methods, the code will jump to the appropriate part of the
file and then parse that section into a SeqRecord.
Note that not all the input formats supported by Bio.SeqIO can be used
with this index function. It is designed to work only with sequential
file formats (e.g. "fasta", "gb", "fastq") and is not suitable for any
interlaced file format (e.g. alignment formats such as "clustal").
For small files, it may be more efficient to use an in memory Python
dictionary, e.g.
>>> from Bio import SeqIO
>>> records = SeqIO.to_dict(SeqIO.parse(open("Quality/example.fastq"), "fastq"))
>>> len(records)
3
>>> sorted(records)
['EAS54_6_R1_2_1_413_324', 'EAS54_6_R1_2_1_443_348', 'EAS54_6_R1_2_1_540_792']
>>> print records["EAS54_6_R1_2_1_540_792"].format("fasta")
>EAS54_6_R1_2_1_540_792
TTGGCAGGCCAAGGCCGATGGATCA
<BLANKLINE>
As with the to_dict() function, by default the id string of each record
is used as the key. You can specify a callback function to transform
this (the record identifier string) into your prefered key. For example:
>>> from Bio import SeqIO
>>> def make_tuple(identifier):
... parts = identifier.split("_")
... return int(parts[-2]), int(parts[-1])
>>> records = SeqIO.index("Quality/example.fastq", "fastq",
... key_function=make_tuple)
>>> len(records)
3
>>> sorted(records)
[(413, 324), (443, 348), (540, 792)]
>>> print records[(540, 792)].format("fasta")
>EAS54_6_R1_2_1_540_792
TTGGCAGGCCAAGGCCGATGGATCA
<BLANKLINE>
>>> (540, 792) in records
True
>>> "EAS54_6_R1_2_1_540_792" in records
False
>>> print records.get("Missing", None)
None
Another common use case would be indexing an NCBI style FASTA file,
where you might want to extract the GI number from the FASTA identifer
to use as the dictionary key.
Notice that unlike the to_dict() function, here the key_function does
not get given the full SeqRecord to use to generate the key. Doing so
would impose a severe performance penalty as it would require the file
to be completely parsed while building the index. Right now this is
usually avoided.
"""
#Try and give helpful error messages:
if not isinstance(filename, basestring):
raise TypeError("Need a filename (not a handle)")
if not isinstance(format, basestring):
raise TypeError("Need a string for the file format (lower case)")
if not format:
raise ValueError("Format required (lower case string)")
if format != format.lower():
raise ValueError("Format string '%s' should be lower case" % format)
if alphabet is not None and not (isinstance(alphabet, Alphabet) or \
isinstance(alphabet, AlphabetEncoder)):
raise ValueError("Invalid alphabet, %s" % repr(alphabet))
#Map the file format to a sequence iterator:
import _index #Lazy import
try:
indexer = _index._FormatToIndexedDict[format]
except KeyError:
raise ValueError("Unsupported format '%s'" % format)
return indexer(filename, format, alphabet, key_function)
def to_alignment(sequences, alphabet=None, strict=True):
"""Returns a multiple sequence alignment (DEPRECATED).
- sequences -An iterator that returns SeqRecord objects,
or simply a list of SeqRecord objects. All
the record sequences must be the same length.
- alphabet - Optional alphabet. Stongly recommended.
- strict - Dummy argument, used to enable strict error
checking of sequence lengths and alphabets.
This is now always done.
Using this function is now discouraged. You are now encouraged to use
Bio.AlignIO instead, e.g.
>>> from Bio import AlignIO
>>> filename = "Clustalw/protein.aln"
>>> alignment = AlignIO.read(filename, "clustal")
"""
import warnings
import Bio
warnings.warn("The Bio.SeqIO.to_alignment(...) function is deprecated. "
"Please use the Bio.Align.MultipleSeqAlignment(...) object "
"directly instead.", Bio.BiopythonDeprecationWarning)
return MultipleSeqAlignment(sequences, alphabet)
def convert(in_file, in_format, out_file, out_format, alphabet=None):
"""Convert between two sequence file formats, return number of records.
- in_file - an input handle or filename
- in_format - input file format, lower case string
- out_file - an output handle or filename
- out_format - output file format, lower case string
- alphabet - optional alphabet to assume
NOTE - If you provide an output filename, it will be opened which will
overwrite any existing file without warning. This may happen if even
the conversion is aborted (e.g. an invalid out_format name is given).
For example, going from a filename to a handle:
>>> from Bio import SeqIO
>>> from StringIO import StringIO
>>> handle = StringIO("")
>>> SeqIO.convert("Quality/example.fastq", "fastq", handle, "fasta")
3
>>> print handle.getvalue()
>EAS54_6_R1_2_1_413_324
CCCTTCTTGTCTTCAGCGTTTCTCC
>EAS54_6_R1_2_1_540_792
TTGGCAGGCCAAGGCCGATGGATCA
>EAS54_6_R1_2_1_443_348
GTTGCTTCTGGCGTGGGTGGGGGGG
<BLANKLINE>
"""
if isinstance(in_file, basestring):
#Hack for SFF, will need to make this more general in future
if in_format in _BinaryFormats :
in_handle = open(in_file, "rb")
else :
in_handle = open(in_file, "rU")
in_close = True
else:
in_handle = in_file
in_close = False
#Don't open the output file until we've checked the input is OK?
if isinstance(out_file, basestring):
if out_format in ["sff", "sff_trim"] :
out_handle = open(out_file, "wb")
else :
out_handle = open(out_file, "w")
out_close = True
else:
out_handle = out_file
out_close = False
#This will check the arguments and issue error messages,
#after we have opened the file which is a shame.
from _convert import _handle_convert #Lazy import
count = _handle_convert(in_handle, in_format,
out_handle, out_format,
alphabet)
#Must now close any handles we opened
if in_close:
in_handle.close()
if out_close:
out_handle.close()
return count
def _test():
"""Run the Bio.SeqIO module's doctests.
This will try and locate the unit tests directory, and run the doctests
from there in order that the relative paths used in the examples work.
"""
import doctest
import os
if os.path.isdir(os.path.join("..", "..", "Tests")):
print "Runing doctests..."
cur_dir = os.path.abspath(os.curdir)
os.chdir(os.path.join("..", "..", "Tests"))
doctest.testmod()
os.chdir(cur_dir)
del cur_dir
print "Done"
elif os.path.isdir(os.path.join("Tests", "Fasta")):
print "Runing doctests..."
cur_dir = os.path.abspath(os.curdir)
os.chdir(os.path.join("Tests"))
doctest.testmod()
os.chdir(cur_dir)
del cur_dir
print "Done"
if __name__ == "__main__":
#Run the doctests
_test()
|
ktokolwiek/Genetic-Stability-Tool
|
Bio/SeqIO/__init__.py
|
Python
|
gpl-3.0
| 34,823
|
[
"BioPerl",
"Biopython"
] |
2e611a9292b5e58e06c9b6be556bebd1ddea4b3cc60d9d3692ec9edf5763655b
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
import shutil
import subprocess
import time
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname, getfqdn
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not
from charms.kubernetes.common import get_version
from charms.reactive.helpers import data_changed, any_file_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
kubeclientconfig_path = '/root/.kube/config'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
check_resources_for_upgrade_needed()
# Remove the RC for nginx ingress if it exists
if hookenv.config().get('ingress'):
kubectl_success('delete', 'rc', 'nginx-ingress-controller')
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
remove_state('kubernetes-worker.gpu.enabled')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
remove_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed')
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
resources = ['kubectl', 'kubelet', 'kube-proxy']
paths = [hookenv.resource_get(resource) for resource in resources]
if any_file_changed(paths):
set_upgrade_needed()
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-needed')
@when_not('kubernetes-worker.snaps.upgrade-specified')
def upgrade_needed_status():
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
check_resources_for_upgrade_needed()
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
set_state('kubernetes-worker.snaps.installed')
set_state('kubernetes-worker.restart-needed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the worker services
'''
try:
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', gethostname().lower())
except CalledProcessError:
hookenv.log('Failed to unregister node.')
service_stop('snap.kubelet.daemon')
service_stop('snap.kube-proxy.daemon')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
resource_name = 'cni-{}'.format(arch())
archive = hookenv.resource_get(resource_name)
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when_not('kube-control.dns.available')
def notify_user_transient_status():
''' Notify to the user we are in a transient state and the application
is still converging. Potentially remotely, or we may be in a detached loop
wait state '''
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool waiting
# to self host the dns pod, and configure itself to query the dns service
# declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
@when('kubernetes-worker.snaps.installed',
'kube-control.dns.available')
@when_not('kubernetes-worker.snaps.upgrade-needed')
def charm_status(kube_control):
'''Update the status message with the current status of kubelet.'''
update_kubelet_status()
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
services = [
'kubelet',
'kube-proxy'
]
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not _systemctl_is_active(daemon):
failing_services.append(service)
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes worker running.')
else:
msg = 'Waiting for {} to start.'.format(','.join(failing_services))
hookenv.status_set('waiting', msg)
def get_ingress_address(relation):
try:
network_info = hookenv.network_get(relation.relation_name)
except NotImplementedError:
network_info = []
if network_info and 'ingress-addresses' in network_info:
# just grab the first one for now, maybe be more robust here?
return network_info['ingress-addresses'][0]
else:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
@when('certificates.available', 'kube-control.connected')
def send_data(tls, kube_control):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
ingress_ip = get_ingress_address(kube_control)
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
ingress_ip,
gethostname()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved',
'kube-control.dns.available', 'kube-control.auth.available',
'cni.available', 'kubernetes-worker.restart-needed',
'worker.auth.bootstrapped')
def start_worker(kube_api, kube_control, auth_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
ingress_ip = get_ingress_address(kube_control)
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
creds = db.get('credentials')
data_changed('kube-control.creds', creds)
# set --allow-privileged flag for kubelet
set_privileged()
create_config(random.choice(servers), creds)
configure_kubelet(dns, ingress_ip)
configure_kube_proxy(servers, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
set_state('kubernetes-worker.label-config-required')
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress daemon set enabled, launch the ingress load
balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-daemon-set.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('config.changed.labels')
def handle_labels_changed():
set_state('kubernetes-worker.label-config-required')
@when('kubernetes-worker.label-config-required',
'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the
node. '''
# Get the user's configured labels.
config = hookenv.config()
user_labels = {}
for item in config.get('labels').split(' '):
if '=' in item:
key, val = item.split('=')
user_labels[key] = val
else:
hookenv.log('Skipping malformed option: {}.'.format(item))
# Collect the current label state.
current_labels = db.get('current_labels') or {}
# Remove any labels that the user has removed from the config.
for key in list(current_labels.keys()):
if key not in user_labels:
try:
remove_label(key)
del current_labels[key]
db.set('current_labels', current_labels)
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Add any new labels.
for key, val in user_labels.items():
try:
set_label(key, val)
current_labels[key] = val
db.set('current_labels', current_labels)
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Set the juju-application label.
try:
set_label('juju-application', hookenv.service_name())
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Label configuration complete.
remove_state('kubernetes-worker.label-config-required')
@when_any('config.changed.kubelet-extra-args',
'config.changed.proxy-extra-args')
def extra_args_changed():
set_state('kubernetes-worker.restart-needed')
@when('config.changed.docker-logins')
def docker_logins_changed():
"""Set a flag to handle new docker login options.
If docker daemon options have also changed, set a flag to ensure the
daemon is restarted prior to running docker login.
"""
config = hookenv.config()
if data_changed('docker-opts', config['docker-opts']):
hookenv.log('Found new docker daemon options. Requesting a restart.')
# State will be removed by layer-docker after restart
set_state('docker.restart')
set_state('kubernetes-worker.docker-login')
@when('kubernetes-worker.docker-login')
@when_not('docker.restart')
def run_docker_login():
"""Login to a docker registry with configured credentials."""
config = hookenv.config()
previous_logins = config.previous('docker-logins')
logins = config['docker-logins']
logins = json.loads(logins)
if previous_logins:
previous_logins = json.loads(previous_logins)
next_servers = {login['server'] for login in logins}
previous_servers = {login['server'] for login in previous_logins}
servers_to_logout = previous_servers - next_servers
for server in servers_to_logout:
cmd = ['docker', 'logout', server]
subprocess.check_call(cmd)
for login in logins:
server = login['server']
username = login['username']
password = login['password']
cmd = ['docker', 'login', server, '-u', username, '-p', password]
subprocess.check_call(cmd)
remove_state('kubernetes-worker.docker-login')
set_state('kubernetes-worker.restart-needed')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server, creds):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca,
token=creds['client_token'], user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig(kubeclientconfig_path, server, ca,
token=creds['client_token'], user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca,
token=creds['kubelet_token'], user='kubelet')
create_kubeconfig(kubeproxyconfig_path, server, ca,
token=creds['proxy_token'], user='kube-proxy')
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(service, base_args, extra_args_key):
db = unitdata.kv()
prev_args_key = 'kubernetes-worker.prev_args.' + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def configure_kubelet(dns, ingress_ip):
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = {}
kubelet_opts['require-kubeconfig'] = 'true'
kubelet_opts['kubeconfig'] = kubeconfig_path
kubelet_opts['network-plugin'] = 'cni'
kubelet_opts['v'] = '0'
kubelet_opts['address'] = '0.0.0.0'
kubelet_opts['port'] = '10250'
kubelet_opts['cluster-domain'] = dns['domain']
kubelet_opts['anonymous-auth'] = 'false'
kubelet_opts['client-ca-file'] = ca_cert_path
kubelet_opts['tls-cert-file'] = server_cert_path
kubelet_opts['tls-private-key-file'] = server_key_path
kubelet_opts['logtostderr'] = 'true'
kubelet_opts['fail-swap-on'] = 'false'
kubelet_opts['node-ip'] = ingress_ip
if (dns['enable-kube-dns']):
kubelet_opts['cluster-dns'] = dns['sdn-ip']
privileged = is_state('kubernetes-worker.privileged')
kubelet_opts['allow-privileged'] = 'true' if privileged else 'false'
if is_state('kubernetes-worker.gpu.enabled'):
if get_version('kubelet') < (1, 6):
hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet')
kubelet_opts['experimental-nvidia-gpus'] = '1'
else:
hookenv.log('Adding --feature-gates=Accelerators=true to kubelet')
kubelet_opts['feature-gates'] = 'Accelerators=true'
configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args')
def configure_kube_proxy(api_servers, cluster_cidr):
kube_proxy_opts = {}
kube_proxy_opts['cluster-cidr'] = cluster_cidr
kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
kube_proxy_opts['logtostderr'] = 'true'
kube_proxy_opts['v'] = '0'
kube_proxy_opts['master'] = random.choice(api_servers)
kube_proxy_opts['hostname-override'] = get_node_name()
if b'lxc' in check_output('virt-what', shell=True):
kube_proxy_opts['conntrack-max-per-core'] = '0'
configure_kubernetes_service('kube-proxy', kube_proxy_opts,
'proxy-extra-args')
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
@when_any('config.changed.default-backend-image',
'config.changed.nginx-image')
@when('kubernetes-worker.config.created')
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
config = hookenv.config()
# need to test this in case we get in
# here from a config change to the image
if not config.get('ingress'):
return
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
context['defaultbackend_image'] = config.get('default-backend-image')
if (context['defaultbackend_image'] == "" or
context['defaultbackend_image'] == "auto"):
if context['arch'] == 's390x':
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-s390x:1.4"
else:
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend:1.4"
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress daemon set controller manifest
context['ingress_image'] = config.get('nginx-image')
if context['ingress_image'] == "" or context['ingress_image'] == "auto":
if context['arch'] == 's390x':
context['ingress_image'] = \
"docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13"
else:
context['ingress_image'] = \
"k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.15" # noqa
context['juju_application'] = hookenv.service_name()
manifest = addon_path.format('ingress-daemon-set.yaml')
render('ingress-daemon-set.yaml', manifest, context)
hookenv.log('Creating the ingress daemon set.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if successful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Update the allow-privileged flag for kubelet.
"""
privileged = hookenv.config('allow-privileged').lower()
if privileged == 'auto':
gpu_enabled = is_state('kubernetes-worker.gpu.enabled')
privileged = 'true' if gpu_enabled else 'false'
if privileged == 'true':
set_state('kubernetes-worker.privileged')
else:
remove_state('kubernetes-worker.privileged')
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('cuda.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
# Apply node labels
set_label('gpu', 'true')
set_label('cuda', 'true')
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('kubernetes-worker.privileged')
@when_not('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
This handler fires when we're running in gpu mode, and then the operator
sets allow-privileged="false". Since we can no longer run privileged
containers, we need to disable gpu mode.
"""
hookenv.log('Disabling gpu mode')
# Remove node labels
remove_label('gpu')
remove_label('cuda')
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when('kube-control.connected')
@when('config.changed.kubelet-extra-args')
def maybe_request_new_credentials(kube_control):
kubelet_extra_args = parse_extra_args('kubelet-extra-args')
cloud_provider = kubelet_extra_args.get('cloud-provider', '')
if data_changed('cloud_provider', cloud_provider):
request_kubelet_and_proxy_credentials(kube_control)
@when('kube-control.connected')
def request_kubelet_and_proxy_credentials(kube_control):
""" Request kubelet node authorization with a well formed kubelet user.
This also implies that we are requesting kube-proxy auth. """
# The kube-cotrol interface is created to support RBAC.
# At this point we might as well do the right thing and return the hostname
# even if it will only be used when we enable RBAC
nodeuser = 'system:node:{}'.format(get_node_name().lower())
kube_control.set_auth_request(nodeuser)
@when('kube-control.connected')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
nodeuser = 'system:node:{}'.format(get_node_name().lower())
creds = kube_control.get_auth_credentials(nodeuser)
if creds and creds['user'] == nodeuser:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('worker.auth.bootstrapped')
if data_changed('kube-control.creds', creds):
set_state('kubernetes-worker.restart-needed')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
@when('docker.ready')
def fix_iptables_for_docker_1_13():
""" Fix iptables FORWARD policy for Docker >=1.13
https://github.com/kubernetes/kubernetes/issues/40182
https://github.com/kubernetes/kubernetes/issues/39823
"""
cmd = ['iptables', '-w', '300', '-P', 'FORWARD', 'ACCEPT']
check_call(cmd)
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
def get_node_name():
kubelet_extra_args = parse_extra_args('kubelet-extra-args')
cloud_provider = kubelet_extra_args.get('cloud-provider', '')
if cloud_provider == 'aws':
return getfqdn()
else:
return gethostname()
class ApplyNodeLabelFailed(Exception):
pass
def persistent_call(cmd, retry_message):
deadline = time.time() + 180
while time.time() < deadline:
code = subprocess.call(cmd)
if code == 0:
return True
hookenv.log(retry_message)
time.sleep(1)
else:
return False
def set_label(label, value):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}={3} --overwrite'
cmd = cmd.format(kubeconfig_path, nodename, label, value)
cmd = cmd.split()
retry = 'Failed to apply label %s=%s. Will retry.' % (label, value)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
def remove_label(label):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}-'
cmd = cmd.format(kubeconfig_path, nodename, label)
cmd = cmd.split()
retry = 'Failed to remove label {0}. Will retry.'.format(label)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
|
squeed/kubernetes
|
cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py
|
Python
|
apache-2.0
| 37,815
|
[
"CDK"
] |
25096878ae938c16493fca0a8560cb18f0eb2cab5f1bc4a6c2a6fe572923a0ba
|
# antlr4_tex2sym.py Author: Akira Hakuta, Date: 2019/04/05
# python.exe antlr4_tex2sym.py
import sys
import antlr4
from gen.TeX2SymLexer import TeX2SymLexer
from gen.TeX2SymParser import TeX2SymParser
from gen.TeX2SymVisitor import TeX2SymVisitor
from sympy import *
# variable : a,b,...,z, A,B,..,Z, \\alpha, \\beta,..,\pi,... , \\omega (except E, I, N, O, S, zeta, omicron)
# math constant : pi; --> \\ppi, i --> \\ii, e --> \\ee
# LaTeX Code Style: \\sin{x}, \\log\_{2}{8}, \\sum\_{k=1}\^{n}{k(k+1)\^2},...
class LaTeX2SymPyVisitor(TeX2SymVisitor):
def visitPrintExpr(self, ctx):
value = self.visit(ctx.expr())
return value
def visitInt(self, ctx):
return ctx.INT().getText()
def visitFloat(self, ctx):
float_str=ctx.FLOAT().getText()
return 'nsimplify({:s})'.format(float_str)
def visitAlphabet(self, ctx):
return ctx.ALPHABET().getText()
def visitGreek(self, ctx):
return ctx.GREEK().getText()
def visitMult(self, ctx):
left = self.visit(ctx.expr(0))
right = self.visit(ctx.expr(1))
return '{:s}*{:s}'.format(left,right)
def visitDiv(self, ctx):
left = self.visit(ctx.expr(0))
right = self.visit(ctx.expr(1))
return '{:s}*{:s}**(-1)'.format(left,right)
def visitMull(self, ctx):
left = self.visit(ctx.expr(0))
right = self.visit(ctx.expr(1))
return '{:s}*{:s}'.format(left,right)
def visitAddSub(self, ctx):
left = self.visit(ctx.expr(0))
right = self.visit(ctx.expr(1))
if ctx.op.type == TeX2SymParser.PLUS:
return '{:s}+{:s}'.format(left,right)
else:
return '{:s}-{:s}'.format(left,right)
def visitCs_parens(self, ctx):
expr = self.visit(ctx.expr())
return '({:s})'.format(expr)
def visitParens(self, ctx):
expr = self.visit(ctx.expr())
return '({:s})'.format(expr)
def visitCs_bs_braces(self, ctx):
expr = self.visit(ctx.expr())
return '({:s})'.format(expr)
def visitBs_braces(self, ctx):
expr = self.visit(ctx.expr())
return '({:s})'.format(expr)
def visitBraces(self, ctx):
expr = self.visit(ctx.expr())
return '({:s})'.format(expr)
def visitPower(self, ctx):
left = self.visit(ctx.expr(0))
right = self.visit(ctx.expr(1))
return '{:s}**{:s}'.format(left,right)
def visitFactrial(self, ctx):
expr = self.visit(ctx.expr())
return 'factorial({:s})'.format(expr)
def visitFunc(self, ctx):
expr = self.visit(ctx.expr())
if ctx.func.type == TeX2SymParser.SQRT:
result='sqrt({:s})'.format(expr)
elif ctx.func.type == TeX2SymParser.SIN:
result='sin({:s})'.format(expr)
elif ctx.func.type == TeX2SymParser.COS:
result='cos({:s})'.format(expr)
elif ctx.func.type == TeX2SymParser.TAN:
result='tan({:s})'.format(expr)
elif ctx.func.type == TeX2SymParser.LOG:
result='log({:s})'.format(expr)
return result
def visitSqrtn(self, ctx):
expr1 = self.visit(ctx.expr(0))
expr2 = self.visit(ctx.expr(1))
return '(({:s})**(({:s})**(-1)))'.format(expr2,expr1)
def visitLogub(self, ctx):
expr0 = self.visit(ctx.expr(0))
expr1 = self.visit(ctx.expr(1))
return 'log({})*(log({})**(-1))'.format(expr1,expr0)
def visitAbs(self, ctx):
expr = self.visit(ctx.expr())
return 'Abs({})'.format(expr)
def visitTrign(self, ctx):
expr1 = self.visit(ctx.expr(0))
expr2 = self.visit(ctx.expr(1))
if ctx.func.type == TeX2SymParser.SIN:
result='(sin({:s}))**({:s})'.format(expr2,expr1)
elif ctx.func.type == TeX2SymParser.COS:
result='(cos({:s}))**({:s})'.format(expr2,expr1)
elif ctx.func.type == TeX2SymParser.TAN:
result='(tan({:s}))**({:s})'.format(expr2,expr1)
return result
def visitFrac(self, ctx):
left = self.visit(ctx.expr(0))
right = self.visit(ctx.expr(1))
return '({:s})*({:s})**(-1)'.format(left, right)
def visitSum(self, ctx):
expr0= self.visit(ctx.expr(0))
expr1 = self.visit(ctx.expr(1))
expr2 = self.visit(ctx.expr(2))
expr3 = self.visit(ctx.expr(3))
return 'summation({:s},({:s},{:s},{:s}))'.format(expr3,expr0,expr1,expr2)
def visitDiff(self, ctx):
expr = self.visit(ctx.expr())
if ctx.dxg.type == TeX2SymParser.DX:
symb = ctx.DX().getText()[1]
elif ctx.dxg.type == TeX2SymParser.DGREEK:
symb = ctx.DGREEK().getText()[1:]
return 'diff({:s},{:s})'.format(expr,symb)
def visitDiffn1(self, ctx):
expr0 = self.visit(ctx.expr(0))
expr1 = self.visit(ctx.expr(1))
if ctx.dxg.type == TeX2SymParser.DX:
symb = ctx.DX().getText()[1]
elif ctx.dxg.type == TeX2SymParser.DGREEK:
symb = ctx.DGREEK().getText()[1:]
return 'diff({:s},{:s},{:s})'.format(expr1,symb,expr0)
def visitDiffn2(self, ctx):
expr0 = self.visit(ctx.expr(0))
expr1 = self.visit(ctx.expr(1))
expr2 = self.visit(ctx.expr(2))
if expr0 != expr1:
return None
if ctx.dxg.type == TeX2SymParser.DX:
symb = ctx.DX().getText()[1]
elif ctx.dxg.type == TeX2SymParser.DGREEK:
symb = ctx.DGREEK().getText()[1:]
return 'diff({:s},{:s},{:s})'.format(expr2,symb,expr0)
def visitIntegrate(self, ctx):
expr = self.visit(ctx.expr())
if ctx.dxg.type == TeX2SymParser.DX:
symb = ctx.DX().getText()[1]
elif ctx.dxg.type == TeX2SymParser.DGREEK:
symb = ctx.DGREEK().getText()[1:]
return 'integrate({:s},{:s})'.format(expr,symb)
def visitDintegrate(self, ctx):
expr0 = self.visit(ctx.expr(0))
expr1 = self.visit(ctx.expr(1))
expr2 = self.visit(ctx.expr(2))
if ctx.dxg.type == TeX2SymParser.DX:
symb = ctx.DX().getText()[1]
elif ctx.dxg.type == TeX2SymParser.DGREEK:
symb = ctx.DGREEK().getText()[1:]
return 'integrate({:s},({:s},{:s},{:s}))'.format(expr2,symb,expr0,expr1)
def visitLim(self, ctx):
expr0 = self.visit(ctx.expr(0))
expr1 = self.visit(ctx.expr(1))
expr2 = self.visit(ctx.expr(2))
return 'limit({:s}, {:s}, {:s})'.format(expr2,expr0,expr1)
def visitCombi_permu(self, ctx):
left = self.visit(ctx.expr(0))
right = self.visit(ctx.expr(1))
if ctx.cp.type == TeX2SymParser.COMBI:
result='binomial({:s},{:s})'.format(left,right)
elif ctx.cp.type == TeX2SymParser.PERMU:
result='ff({:s},{:s})'.format(left,right)
return result
def visitSeqterm(self, ctx):
expr = self.visit(ctx.expr())
return 'F({})'.format(expr)
def visitFunction(self, ctx):
expr = self.visit(ctx.expr())
return 'f({})'.format(expr)
def visitGammaf_zetaf(self, ctx):
expr = self.visit(ctx.expr())
if ctx.gz.type == TeX2SymParser.GAMMAF:
result='gamma({})'.format(expr)
elif ctx.gz.type == TeX2SymParser.ZETAF:
result='zeta({})'.format(expr)
return result
def visitPlusExpr(self, ctx):
expr = self.visit(ctx.expr())
return expr
def visitMinusExpr(self, ctx):
expr = self.visit(ctx.expr())
return '(-1)*' + expr
def visitMathconst(self, ctx):
if ctx.const.type == TeX2SymParser.PI:
result='S.Pi'
elif ctx.const.type == TeX2SymParser.IMAGINARY_UNIT:
result='S.ImaginaryUnit'
elif ctx.const.type == TeX2SymParser.NAPIER_CONSTANT:
result='S.Exp1'
elif ctx.const.type == TeX2SymParser.INFTY:
result='oo'
return result
def visitEqual(self, ctx):
left = self.visit(ctx.expr(0))
right = self.visit(ctx.expr(1))
return 'Eq({:s},{:s})'.format(left,right)
def visitRelation(self, ctx):
left = self.visit(ctx.expr(0))
right = self.visit(ctx.expr(1))
if ctx.op.type == TeX2SymParser.GT:
return '{:s}>{:s}'.format(left,right)
elif ctx.op.type == TeX2SymParser.LT:
return '{:s}<{:s}'.format(left,right)
elif ctx.op.type == TeX2SymParser.GEQQ:
return '{:s}>={:s}'.format(left,right)
elif ctx.op.type == TeX2SymParser.LEQQ:
return '{:s}<={:s}'.format(left,right)
greek_list = [['\\alpha', 'aalpha'], ['\\beta', 'bbeta'], ['\\gamma', 'ggamma'], ['\\delta', 'ddelta'], ['\\epsilon', 'eepsilon'],
['\\eta', 'eeta'], ['\\theta', 'ttheta'], ['\\iota', 'iiota'], ['\\kappa', 'kkappa'], ['\\lambda', 'llambda'], ['\\mu', 'mmu'],
['\\nu', 'nnu'], ['\\xi', 'xxi'], ['\\omicron', 'oomicron'], ['\\pi', 'pppi'], ['\\rho', 'rrho'], ['\\sigma', 'ssigma'],
['\\tau', 'ttau'], ['\\upsilon', 'uupsilon'], ['\\phi', 'pphi'], ['\\chi', 'cchi'], ['\\psi', 'ppsi'], ['\\omega', 'oomega']]
def tex2sym(texexpr):
if texexpr == '':
return ''
for le in greek_list:
texexpr=texexpr.replace(le[0],le[1])
expr=antlr4.InputStream(texexpr+'\n')
lexer = TeX2SymLexer(expr)
token_stream = antlr4.CommonTokenStream(lexer)
#token_stream.fill()
#print('tokens:')
#for tk in token_stream.tokens:
# print(tk)
parser = TeX2SymParser(token_stream)
tree = parser.prog()
visitor = LaTeX2SymPyVisitor()
result=visitor.visit(tree)
return result
def mylatex(sympyexpr):
texexpr = latex(sympyexpr)
for le in greek_list:
texexpr=texexpr.replace(le[1],le[0]+' ')
return texexpr
def mylatexstyle(texexpr):
replace_list=[['\\ii',' i '],['\\ee',' e '],['\\ppi','\\pi '],['\\C','\\mathrm{C}'],['\\P','\\mathrm{P}']]
for le in replace_list:
texexpr=texexpr.replace(le[0],le[1])
return texexpr
def test(texexpr):
print(texexpr.replace('\\','\\\\')+' --> '+ tex2sym(texexpr))
if __name__ == '__main__':
print('tex2sym: LaTeX math expression --> SymPy form')
test('-2-3+4')
test('2\\times3^4')
test('0.5 \\times 3 \\div 5a\\cdot 4')
test('2\\times3!')
test('2ab^2(x+y)^3')
test('\\sqrt{3x}')
test('\\frac{2}{3}a')
test('\\dfrac{2}{3}a')
test('\\sin {\\ppi x}')
test('\\log{\\ee^3}')
test('\\frac{d}{dx}{x^5}')
test('\\int{\\sin^{2}{\\theta} d\\theta}')
test('\\sum_{k=1}^{n}{k^3}')
test('2x^2+3x+4=0')
test('3x^2-4x+5 \\geqq 0')
test('\\frac{d^{2}}{dx^{2}}{f(x)}=-f(x)')
test('\\alpha\\beta\\gamma\\delta\\epsilon\\eta\\theta\\iota\\kappa\\lambda\\mu\\nu\\xi\\pi\\rho\\sigma\\tau\\upsilon\\phi\\chi\\psi\\omega\\ppi')
test('(a\\!aa\\,a\\:a\\;a~a)^3')
test('\\{\\dfrac{1}{~2~}a-(\\dfrac{1}{~3~}b-\\dfrac{1}{~4~}c)\\}^2')
test(r'\left\{\dfrac{1}{~2~}a-\left(\dfrac{1}{~3~}b-\dfrac{1}{~4~}c\right)\right\}^2')
|
AkiraHakuta/antlr4_tex2sym
|
antlr4_tex2sym.py
|
Python
|
mit
| 11,716
|
[
"VisIt"
] |
2c102f54a7754a5fd28650ad50a650d172c4ced20dcd799b86c6ee4d69c29e60
|
import fokabot
import random
import glob
import serverPackets
import exceptions
import userHelper
import time
import systemHelper
"""
Commands callbacks
Must have fro, chan and messages as arguments
fro -- name of who triggered the command
chan -- channel where the message was sent
message -- list containing arguments passed from the message
[0] = first argument
[1] = second argument
. . .
return the message or **False** if there's no response by the bot
"""
def faq(fro, chan, message):
if message[0] == "rules":
return "Please make sure to check (Ripple's rules)[http://ripple.moe/?p=23]."
elif message[0] == "rules":
return "Please make sure to check (Ripple's rules)[http://ripple.moe/?p=23]."
elif message[0] == "swearing":
return "Please don't abuse swearing"
elif message[0] == "spam":
return "Please don't spam"
elif message[0] == "offend":
return "Please don't offend other players"
elif message[0] == "github":
return "(Ripple's Github page!)[https://github.com/osuripple/ripple]"
elif message[0] == "discord":
return "(Join Ripple's Discord!)[https://discord.gg/0rJcZruIsA6rXuIx]"
elif message[0] == "blog":
return "You can find the latest Ripple news on the (blog)[https://ripple.moe/blog/]!"
elif message[0] == "changelog":
return "Check the (changelog)[https://ripple.moe/index.php?p=17] !"
elif message[0] == "status":
return "Check the server status (here!)[https://ripple.moe/index.php?p=27]"
def roll(fro, chan, message):
maxPoints = 100
if len(message) >= 1:
if message[0].isdigit() == True and int(message[0]) > 0:
maxPoints = int(message[0])
points = random.randrange(0,maxPoints)
return "{} rolls {} points!".format(fro, str(points))
def ask(fro, chan, message):
return random.choice(["yes", "no", "maybe"])
def alert(fro, chan, message):
glob.tokens.enqueueAll(serverPackets.notification(' '.join(message[:])))
return False
def moderated(fro, chan, message):
try:
# Make sure we are in a channel and not PM
if chan.startswith("#") == False:
raise exceptions.moderatedPMException
# Get on/off
enable = True
if len(message) >= 1:
if message[0] == "off":
enable = False
# Turn on/off moderated mode
glob.channels.channels[chan].moderated = enable
return "This channel is {} in moderated mode!".format("now" if enable else "no longer")
except exceptions.moderatedPMException:
return "You are trying to put a private chat in moderated mode. Are you serious?!? You're fired."
def kickAll(fro, chan, message):
# Kick everyone but mods/admins
toKick = []
for key, value in glob.tokens.tokens.items():
if value.rank < 3:
toKick.append(key)
# Loop though users to kick (we can't change dictionary size while iterating)
for i in toKick:
if i in glob.tokens.tokens:
glob.tokens.tokens[i].kick()
return "Whoops! Rip everyone."
def kick(fro, chan, message):
# Get parameters
target = message[0].replace("_", " ")
# Get target token and make sure is connected
targetToken = glob.tokens.getTokenFromUsername(target)
if targetToken == None:
return "{} is not online".format(target)
# Kick user
targetToken.kick()
# Bot response
return "{} has been kicked from the server.".format(target)
def fokabotReconnect(fro, chan, message):
# Check if fokabot is already connected
if glob.tokens.getTokenFromUserID(999) != None:
return"Fokabot is already connected to Bancho"
# Fokabot is not connected, connect it
fokabot.connect()
return False
def silence(fro, chan, message):
for i in message:
i = i.lower()
target = message[0].replace("_", " ")
amount = message[1]
unit = message[2]
reason = ' '.join(message[3:])
# Get target user ID
targetUserID = userHelper.getID(target)
# Make sure the user exists
if targetUserID == False:
return "{}: user not found".format(target)
# Calculate silence seconds
if unit == 's':
silenceTime = int(amount)
elif unit == 'm':
silenceTime = int(amount)*60
elif unit == 'h':
silenceTime = int(amount)*3600
elif unit == 'd':
silenceTime = int(amount)*86400
else:
return "Invalid time unit (s/m/h/d)."
# Max silence time is 7 days
if silenceTime > 604800:
return "Invalid silence time. Max silence time is 7 days."
# Calculate silence end time
endTime = int(time.time())+silenceTime
# Update silence end in db
userHelper.silence(targetUserID, endTime, reason)
# Send silence packet to target if he's connected
targetToken = glob.tokens.getTokenFromUsername(target)
if targetToken != None:
targetToken.enqueue(serverPackets.silenceEndTime(silenceTime))
return "{} has been silenced for the following reason: {}".format(target, reason)
def removeSilence(fro, chan, message):
# Get parameters
for i in message:
i = i.lower()
target = message[0].replace("_", " ")
# Make sure the user exists
targetUserID = userHelper.getID(target)
if targetUserID == False:
return "{}: user not found".format(target)
# Reset user silence time and reason in db
userHelper.silence(targetUserID, 0, "")
# Send new silence end packet to user if he's online
targetToken = glob.tokens.getTokenFromUsername(target)
if targetToken != None:
targetToken.enqueue(serverPackets.silenceEndTime(0))
return "{}'s silence reset".format(target)
def restartShutdown(restart):
"""Restart (if restart = True) or shutdown (if restart = False) pep.py safely"""
msg = "We are performing some maintenance. Bancho will {} in 5 seconds. Thank you for your patience.".format("restart" if restart else "shutdown")
systemHelper.scheduleShutdown(5, restart, msg)
return msg
def systemRestart(fro, chan, message):
return restartShutdown(True)
def systemShutdown(fro, chan, message):
return restartShutdown(False)
def systemReload(fro, chan, message):
#Reload settings from bancho_settings
glob.banchoConf.loadSettings()
# Reload channels too
glob.channels.loadChannels()
# Send new channels and new bottom icon to everyone
glob.tokens.enqueueAll(serverPackets.mainMenuIcon(glob.banchoConf.config["menuIcon"]))
glob.tokens.enqueueAll(serverPackets.channelInfoEnd())
for key, _ in glob.channels.channels.items():
glob.tokens.enqueueAll(serverPackets.channelInfo(key))
return "Bancho settings reloaded!"
def systemMaintenance(fro, chan, message):
# Turn on/off bancho maintenance
maintenance = True
# Get on/off
if len(message) >= 2:
if message[1] == "off":
maintenance = False
# Set new maintenance value in bancho_settings table
glob.banchoConf.setMaintenance(maintenance)
if maintenance == True:
# We have turned on maintenance mode
# Users that will be disconnected
who = []
# Disconnect everyone but mod/admins
for _, value in glob.tokens.tokens.items():
if value.rank < 3:
who.append(value.userID)
glob.tokens.enqueueAll(serverPackets.notification("Our bancho server is in maintenance mode. Please try to login again later."))
glob.tokens.multipleEnqueue(serverPackets.loginError(), who)
msg = "The server is now in maintenance mode!"
else:
# We have turned off maintenance mode
# Send message if we have turned off maintenance mode
msg = "The server is no longer in maintenance mode!"
# Chat output
return msg
def systemStatus(fro, chan, message):
# Print some server info
data = systemHelper.getSystemInfo()
# Final message
msg = "=== PEP.PY STATS ===\n"
msg += "Running pep.py server\n"
msg += "Webserver: {}\n".format(data["webServer"])
msg += "\n"
msg += "=== BANCHO STATS ===\n"
msg += "Connected users: {}\n".format(str(data["connectedUsers"]))
msg += "\n"
msg += "=== SYSTEM STATS ===\n"
msg += "CPU: {}%\n".format(str(data["cpuUsage"]))
msg += "RAM: {}GB/{}GB\n".format(str(data["usedMemory"]), str(data["totalMemory"]))
if data["unix"] == True:
msg += "Load average: {}/{}/{}\n".format(str(data["loadAverage"][0]), str(data["loadAverage"][1]), str(data["loadAverage"][2]))
return msg
"""
Commands list
trigger: message that triggers the command
callback: function to call when the command is triggered. Optional.
response: text to return when the command is triggered. Optional.
syntax: command syntax. Arguments must be separated by spaces (eg: <arg1> <arg2>)
minRank: minimum rank to execute that command. Optional (default = 1)
You MUST set trigger and callback/response, or the command won't work.
"""
commands = [
{
"trigger": "!roll",
"callback": roll
}, {
"trigger": "!faq",
"syntax": "<name>",
"callback": faq
}, {
"trigger": "!report",
"response": "Report command isn't here yet :c"
}, {
"trigger": "!help",
"response": "Click (here)[https://ripple.moe/index.php?p=16&id=4] for FokaBot's full command list"
}, {
"trigger": "!ask",
"syntax": "<question>",
"callback": ask
}, {
"trigger": "!mm00",
"response": random.choice(["meme", "MA MAURO ESISTE?"])
}, {
"trigger": "!alert",
"syntax": "<message>",
"minRank": 4,
"callback": alert
}, {
"trigger": "!moderated",
"minRank": 3,
"callback": moderated
}, {
"trigger": "!kickall",
"minRank": 4,
"callback": kickAll
}, {
"trigger": "!kick",
"syntax": "<target>",
"minRank": 3,
"callback": kick
}, {
"trigger": "!fokabot reconnect",
"minRank": 3,
"callback": fokabotReconnect
}, {
"trigger": "!silence",
"syntax": "<target> <amount> <unit(s/m/h/d)> <reason>",
"minRank": 3,
"callback": silence
}, {
"trigger": "!removesilence",
"syntax": "<target>",
"minRank": 3,
"callback": removeSilence
}, {
"trigger": "!system restart",
"minRank": 4,
"callback": systemRestart
}, {
"trigger": "!system shutdown",
"minRank": 4,
"callback": systemShutdown
}, {
"trigger": "!system reload",
"minRank": 3,
"callback": systemReload
}, {
"trigger": "!system maintenance",
"minRank": 3,
"callback": systemMaintenance
}, {
"trigger": "!system status",
"minRank": 3,
"callback": systemStatus
}
]
# Commands list default values
for cmd in commands:
cmd.setdefault("syntax", "")
cmd.setdefault("minRank", 1)
cmd.setdefault("callback", None)
cmd.setdefault("response", "u w0t m8?")
|
osuripple/ripple
|
c.ppy.sh/fokabotCommands.py
|
Python
|
mit
| 10,048
|
[
"MOE"
] |
f61dadfdb47a54e742afd9a8ae59a0e9cadef9d3dfc732207e73d60e313be689
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A couple of helper functions for doing numpy-like stuff with numpy.
"""
from mathics.core.expression import Expression
from functools import reduce
import numpy
import ast
import inspect
import sys
#
# INTERNAL FUNCTIONS
#
def _promote(x, shape):
if isinstance(x, (int, float)):
data = numpy.ndarray(shape)
data.fill(x)
return data
else:
return x
def _is_scalar(x):
return not isinstance(x, (list, tuple, numpy.ndarray))
#
# ARRAY CREATION AND REORGANIZATION: STACK, UNSTACK, CONCAT, ...
#
def array(a):
return numpy.array(a)
def unstack(a):
a = array(a)
b = array(numpy.split(a, a.shape[-1], axis=-1))
if b.shape[-1] == 1:
b = b.reshape(b.shape[:-1])
return b
def stack(*a):
# numpy.stack with axis=-1 stacks arrays along the most inner axis:
# e.g. numpy.stack([ [1, 2], [3, 4] ], axis=-1)
# gives: array([ [1, 3], [2, 4] ])
# e.g. numpy.stack([ [[1, 2], [3, 4]], [[4, 5], [6, 7]] ], axis=-1)
# gives: array([[[1, 4], [2, 5]], [[3, 6], [4, 7]]])
a = [array(x) for x in a]
b = numpy.stack(a, axis=-1)
return b
def stacked(f, a):
a = array(a)
unwrap = False
if len(a.shape) == 1:
a = array([a])
unwrap = True
components = unstack(a)
result = f(*components)
result = stack(*result)
if unwrap:
result = result[0]
return result
def concat(*a):
a = [array(x) for x in a]
a = [x for x in a if x.shape[0]] # skip empty
return numpy.concatenate(a, axis=-1)
def vectorize(a, depth, f):
return f(a)
#
# MATHEMATICAL OPERATIONS
#
def clip(a, t0, t1):
return numpy.clip(array(a), t0, t1)
def dot_t(u, v):
return numpy.dot(array(u), array(v).T)
def mod(a, b):
return numpy.mod(a, b)
def sin(a):
return numpy.sin(array(a))
def cos(a):
return numpy.cos(array(a))
def arctan2(y, x):
return numpy.arctan2(array(y), array(x))
def sqrt(a):
return numpy.sqrt(array(a))
def floor(a):
return numpy.floor(array(a))
def maximum(*a):
return reduce(numpy.maximum, [array(x) for x in a])
def minimum(*a):
return reduce(numpy.minimum, [array(x) for x in a])
#
# PUBLIC HELPER FUNCTIONS
#
def is_numpy_available():
return True
def allclose(a, b):
return numpy.allclose(array(a), array(b))
def errstate(**kwargs):
return numpy.errstate(**kwargs)
def instantiate_elements(a, new_element, d=1):
# given a numpy array 'a' and a python element constructor 'new_element', generate a python array of the
# same shape as 'a' with python elements constructed through 'new_element'. 'new_element' will get called
# if an array of dimension 'd' is reached.
if len(a.shape) == d:
leaves = [new_element(x) for x in a]
else:
leaves = [instantiate_elements(e, new_element, d) for e in a]
return Expression('List', *leaves)
#
# CONDITIONALS AND PROGRAM FLOW
#
# @conditional is an annotation that basically invoked a mini compiler in order to compile numpy
# conditional expressions from code that would run as regular Python code in the no-numpy case.
# the idea is to have readable Python code that states what happens in both numpy and no-numpy
# cases. the alternative would be using wrapper functions that are kind of ugly and hard to read
# for the numpy case.
#
# as an example, take the following code:
#
# @conditional
# def f(a):
# if a > 10:
# return a + 1.5
# elif a > 9:
# return 7
# else:
# return 2
#
# if numpy is not available, f() will just take one scalar and work as expected. if numpy is
# available, @conditional will recompile the function into something that allows "a" (or any
# other parameters of f) to be numpy arrays.
#
# this is necessary, as for numpy arrays, there is no single execution branch in conditionals
# as above, since the conditional has to be evaluated for each element of the numpy array.
# internally, in the numpy case, the function above will be transformed to something like:
#
# a[a > 10] = a + 1.5
# ...
#
# in general, and to make this transformation as simple as possible, @conditional expects the
# function that is annotated with it to be of the following restrained form:
#
# if [binary comparisons or variable]:
# return [expression1]
# elif [binary comparisons or variable]: # optional
# return [expression2]
# else: # optional
# return [expression3]
#
# all relevant rules for @conditional functions are:
# - all "if" branches must exit immediately with "return".
# - "if"s must rely on simple binary comparisons, e.g. "b < 4" or "4 > b", or variables
# - the occurence of "elif" is optional, as is the occurence of "else"
# - if "else" is not provided, the provided "if" cases must cover all possible cases,
# otherwise there will be undefined results.
# - code in @conditional must not reference global variables.
#
# if a function does not adhere to the rules described above, a MalformedConditional is thrown when
# constructing the @vectorized function.
def choose(i, *options):
assert options
dim = len(options[0])
columns = [[o[d] for o in options] for d in range(dim)]
if isinstance(i, (int, float)):
return [column[int(i)] for column in columns] # int cast needed for PyPy
else:
assert len(options) < 256
i_int = array(i).astype(numpy.uint8)
return [numpy.choose(i_int, column) for column in columns]
_else_case_id = 'else_case'
def _numpy_conditional(shape_id, *paths):
# called during runtime when we actually evaluate a conditional.
if _is_scalar(shape_id):
for test, args, comp in paths:
if test == _else_case_id or test():
return comp(*args)
assert False # one case must be true
shape = shape_id.shape
result = numpy.ndarray(shape)
has_else = paths[-1][0] == _else_case_id
if_paths = paths[:-1] if has_else else paths
masks = [path[0]() for path in if_paths]
def efficient_masks():
rest = ~masks[0]
yield masks[0]
for mask in masks[1:]:
yield rest & mask
rest &= ~mask
if has_else:
if len(paths) == 2: # simple case: just 1 if, and 1 else
masks.append(~masks[0])
else: # n ifs, and 1 else
masks.append(numpy.ones(masks[0].shape, dtype=bool))
masks = list(efficient_masks())
else:
masks = list(efficient_masks())
# process in reverse order in order to reflect order that is written
# down in Python code. for example, writing
#
# if a > 5:
# b = True
# elif a > 4:
# b = False
#
# in Python, will always set b to True, if a > 5. if we evaluated the
# statements above in non-reversed order for numpy, for some element
# a > 5, we would first set b = True, and then b = False, which would
# be wrong.
for mask, path in zip(reversed(masks), reversed(paths)):
test, args, comp = path
result[mask] = comp(*[x if _is_scalar(x) else x[mask] for x in args])
return result
class MalformedConditional(Exception):
def __init__(self, func, node, error):
Exception.__init__(self, 'in function %s in line %d: %s' % (func.__name__, node.lineno, error))
class _NameCollector(ast.NodeVisitor):
def __init__(self):
self.names = set()
def visit_Name(self, node):
assert isinstance(node.ctx, ast.Load)
self.names.add(node.id)
def _create_ast_lambda(names, body):
if sys.version_info >= (3, 0): # change in AST structure for Python 3
args = [ast.arg(arg=name, annotation=None) for name in names]
else:
args = [ast.Name(id=name, ctx=ast.Load()) for name in names]
return ast.Lambda(args=ast.arguments(
args=args, vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[]), body=body)
def _expression_lambda(node):
# convert some expression, e.g. b + a * 2 + b * 7, into a lambda with with all values used in the
# expression as parameters, e.g. lambda a, b: b + a * 2 + b * 7
collector = _NameCollector()
collector.visit(node)
names = list(collector.names)
elements = [ast.Name(id=name, ctx=ast.Load()) for name in names]
# return (1) a tuple of all the variable names in the expression, (2) a lambda that takes values
# for these variables as parameters and evaluates the expression.
return ast.Tuple(elts=elements, ctx=ast.Load()), _create_ast_lambda(names, node)
class _ConditionalTransformer(ast.NodeTransformer):
def __init__(self, f):
self._func = f
def transform(self):
tree = ast.parse(inspect.getsource(self._func))
self._expect(tree, 'function not a Module', type(tree), ast.Module)
self._expect(tree, 'Module body too large', len(tree.body), 1)
self._expect(tree, 'FunctionDef not found', type(tree.body[0]), ast.FunctionDef)
func_def = tree.body[0]
func_name = func_def.name
self._expect(func_def, 'FunctionDef body too large', len(func_def.body), 1)
self._expect(func_def, 'function must start with "if"', type(func_def.body[0]), ast.If)
tree = self.visit(tree)
tree = ast.fix_missing_locations(tree)
code = compile(tree, '<conditional:%s>'% func_name, 'exec')
data = {}
eval(code, globals(), data)
return data[func_name]
def _expect(self, node, error, value, expected):
if value != expected:
raise MalformedConditional(self._func, node, "%s (%s != %s)" % (error, expected, value))
def visit_FunctionDef(self, node):
assert len(node.decorator_list) == 1 # we expect that we are the only decorator
assert isinstance(node.decorator_list[0], ast.Name)
assert node.decorator_list[0].id == 'conditional'
return ast.FunctionDef(name=node.name, args=node.args,
body=[self.visit(x) for x in node.body], decorator_list=[])
def visit_If(self, node):
blocks = []
tests = []
shapes = []
while True:
body = node.body
self._expect(node, '"if" code body must contain exactly 1 element', len(body), 1)
blocks.append(body[0])
test = node.test
tests.append(test)
if type(test) == ast.Name:
shapes.append(test)
elif type(test) == ast.Compare:
if isinstance(test.left, ast.Name):
shapes.append(test.left)
elif isinstance(test.right, ast.Name):
shapes.append(test.right)
else:
MalformedConditional(self._func, test, 'expected variable in comparison')
else:
self._expect(test, 'expected single comparison or name', type(test), ast.Compare)
or_elses = node.orelse
if not or_elses:
break
self._expect(node, '"else" code body must contain 1 element', len(or_elses), 1)
or_else = or_elses[0]
if isinstance(or_else, ast.If):
node = or_else
else:
blocks.append(or_else)
tests.append(_else_case_id)
break
for block in blocks:
self._expect(block, '"if" blocks must exit with "return"', type(block), ast.Return)
# now build a call to _numpy_conditional() using cond_args as arguments, that will handle
# the runtime evaluation of the conditional.
cond_args = [shapes[0]]
for test, value in zip(tests, (block.value for block in blocks)):
elements = []
if test == _else_case_id:
elements.append(ast.Str(s=test))
else:
elements.append(_create_ast_lambda([], test))
elements.extend(_expression_lambda(value))
cond_args.append(ast.Tuple(elts=elements, ctx=ast.Load()))
return ast.Return(value=ast.Call(
func=ast.Name(id='_numpy_conditional', ctx=ast.Load()), keywords=[], args=cond_args))
def conditional(*args, **kwargs):
if len(args) == 1 and callable(args[0]):
f = args[0] # @conditional without arguments?
else:
return lambda f: conditional(f) # with arguments
if not inspect.isfunction(f):
raise Exception('@conditional can only be applied to functions')
transformer = _ConditionalTransformer(f)
f_transformed = transformer.transform()
def wrapper(*a):
a = [numpy.array(x) if isinstance(x, (list, tuple)) else x for x in a]
return f_transformed(*a)
return wrapper
|
pqtoan/mathics
|
mathics/builtin/numpy_utils/with_numpy.py
|
Python
|
gpl-3.0
| 12,764
|
[
"VisIt"
] |
9879e104bda221c61f83fa42f84492d16a17769824310af6a93d0b889434aec7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.