text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python2
#
# Copyright (C) 2013-2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# -*- coding: utf-8 -*-
import espressopp
from espressopp import Real3D
d = 0.85
Nchains = 10
Mmonomers = 10
N = Nchains * Mmonomers
L = pow(N/d, 1.0/3)
system, integrator = espressopp.standard_system.PolymerMelt(Nchains, Mmonomers,(10,10,10), dt = 0.005, temperature=1.0)
print "starting warmup"
org_dt = integrator.dt
pot = system.getInteraction(0).getPotential(0,0)
print pot
print "Nint = ", system.getNumberOfInteractions()
final_sigma = pot.sigma
final_epsilon = pot.epsilon
print "sigma=",pot.sigma, "epsilon=",pot.epsilon
maxParticleID = int(espressopp.analysis.MaxPID(system).compute())
N = 1
number = 50
for k in range(number):
if k < 10:
continue
force_capping = espressopp.integrator.CapForce(system, 1000000.0/number*k)
integrator.addExtension(force_capping)
pot.sigma = final_sigma/number*k
pot.epsilon = final_epsilon/number*k
integrator.dt = 0.0001
espressopp.tools.analyse.info(system, integrator)
integrator.run(N)
espressopp.tools.analyse.info(system, integrator)
integrator.dt = org_dt
pot.sigma = final_sigma
pot.epsilon = final_epsilon
force_capping.disconnect()
for k in range(10):
integrator.run(70)
espressopp.tools.analyse.info(system, integrator)
integrator.step = 0
print "warmup finished"
for k in range(10):
integrator.run(100)
espressopp.tools.analyse.info(system, integrator)
|
kkreis/espressopp
|
testsuite/pickle_potential/testwarmup.py
|
Python
|
gpl-3.0
| 2,149
|
[
"ESPResSo"
] |
f1ecf82cf86ea9717bec3ff6ec316da28096220e858729a70627f2921acd260d
|
"""
Downloads SDSS DR8 photometric and spectroscopic information.
:note: SamPy.db.sdss may need editing as this is the file where
the server is defined, and thus defines whether this script
calls DR7 or 8.
:requires: SamPy
:author: Sami-Matias Niemi
:contact: sniemi@unc.edu
:version: 0.1
"""
import sqlite3
import SamPy.db.sdss as sdss
import SamPy.log.Logger as lg
import SamPy.db.sqlite as sq
def chunks(l, n):
return [l[i:i+n] for i in range(0, len(l), n)]
def getIds(column, database='catalogs.db'):
"""
Recover ids from RESOLVEmasterfile table.
:param column: name of the id column
:type column: string
:param database: name of the SQLite3 database file
:type database: string
:return: ids
:rtype: ndarray
"""
query = 'SELECT %s from RESOLVEmasterfile where %s > 0' % (column, column)
data = sq.get_data_sqliteSMNfunctions('./', database, query, toNumpy=False)
return data
def buildQuery(ids):
"""
Builds a query.
:param ids: a list of ids to match
:type ids: list or ndarray
"""
idlist = 's.specobjid in ('
for id in ids:
idlist += str(id[0]) + ', '
idlist = idlist[:-2] + ')'
query = "SELECT s.specobjid, p.objid, \
p.petroMag_u, p.petroMag_g, p.petroMag_r, p.petroMag_i, p.petroMag_z, \
p.petroMagErr_u, p.petroMagErr_g, p.petroMagErr_r, p.petroMagErr_i, p.petroMagErr_z,\
p.psfMag_u, p.psfMag_g, p.psfMag_r, p.psfMag_i, p.psfMag_z, \
p.psfMagErr_u, p.psfMagErr_g, p.psfMagErr_r, p.psfMagErr_i, p.psfMagErr_z, \
p.petroR90_u, p.petroR90_g, p.petroR90_r, p.petroR90_i, p.petroR90_z, \
p.petroR90Err_u, p.petroR90Err_g, p.petroR90Err_r, p.petroR90Err_i, p.petroR90Err_z, \
p.petroR50_u, p.petroR50_g, p.petroR50_r, p.petroR50_i, p.petroR50_z, \
p.petroR50Err_u, p.petroR50Err_g, p.petroR50Err_r, p.petroR50Err_i, p.petroR50Err_z, \
s.h_alpha_flux, s.h_alpha_flux_err, s.h_alpha_eqw, s.h_alpha_eqw_err, \
s.h_beta_flux, s.h_beta_flux_err, s.h_beta_eqw, s.h_beta_eqw_err, \
s.oii_3726_flux, s.oii_3726_flux_err, s.oii_3726_eqw, s.oii_3726_eqw_err, \
s.neiii_3869_flux, s.neiii_3869_flux_err, s.neiii_3869_eqw, s.neiii_3869_eqw_err, \
s.oiii_4959_flux, s.oiii_4959_flux_err, s.oiii_4959_eqw, s.oiii_4959_eqw_err, \
s.oiii_5007_flux, s.oiii_5007_flux_err, s.oiii_5007_eqw, s.oiii_5007_eqw_err, \
s.nii_6548_flux, s.nii_6548_flux_err, s.nii_6548_eqw, s.nii_6548_eqw_err, \
s.nii_6584_flux, s.nii_6584_flux_err, s.nii_6584_eqw, s.nii_6584_eqw_err \
from Galaxy as p, galSpecLine as s \
WHERE p.specobjid = s.specobjid and {0:>s}".format(idlist)
lines = sdss.query(query).readlines()
fh = open('dr8data.txt', 'a')
for line in lines:
fh.write(line)
fh.close()
if __name__ == '__main__':
log_filename = 'SDSSqueryscriptDR8.log'
log = lg.setUpLogger(log_filename)
log.info('Starting script')
ids = getIds('dr8specobjid')
log.info('DR8 IDs recovered from the RESOLVE database')
#need to split
spl = chunks(ids, 300)
for id in spl:
buildQuery(id)
log.info('data recovered from the DR8 SDSS database')
|
sniemi/SamPy
|
resolve/catalogs/getSDSSphotandSpecDR8.py
|
Python
|
bsd-2-clause
| 3,330
|
[
"Galaxy"
] |
1440c56606ccac37f428e787ad80984b3d1d7e516b40c13424996c827891ff09
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builds the CIFAR-10 network with additional variables to support pruning.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow.compat.v1 as tf
from model_pruning.examples.cifar10 import cifar10_input
from model_pruning.python import pruning
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN # pylint: disable=line-too-long
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
BATCH_SIZE = 128
DATA_DIR = '/tmp/cifar10_data'
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight decay
is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float32
var = _variable_on_cpu(
name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not DATA_DIR:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(DATA_DIR, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(
data_dir=data_dir, batch_size=BATCH_SIZE)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not DATA_DIR:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(DATA_DIR, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(
eval_data=eval_data, data_dir=data_dir, batch_size=BATCH_SIZE)
return images, labels
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.compat.v1.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.compat.v1.get_variable() with tf.Variable().
#
# While instantiating conv and local layers, we add mask and threshold
# variables to the layer by calling the pruning.apply_mask() function.
# Note that the masks are applied only to the weight tensors
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay(
'weights', shape=[5, 5, 3, 64], stddev=5e-2, wd=0.0)
conv = tf.nn.conv2d(
images, pruning.apply_mask(kernel, scope), [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(
conv1,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool1')
# norm1
norm1 = tf.nn.lrn(
pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay(
'weights', shape=[5, 5, 64, 64], stddev=5e-2, wd=0.0)
conv = tf.nn.conv2d(
norm1, pruning.apply_mask(kernel, scope), [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(
conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
# pool2
pool2 = tf.nn.max_pool(
norm2,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [BATCH_SIZE, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay(
'weights', shape=[dim, 384], stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(
tf.matmul(reshape, pruning.apply_mask(weights, scope)) + biases,
name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay(
'weights', shape=[384, 192], stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(
tf.matmul(local3, pruning.apply_mask(weights, scope)) + biases,
name=scope.name)
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay(
'weights', [192, NUM_CLASSES], stddev=1 / 192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(
tf.matmul(local4, pruning.apply_mask(weights, scope)),
biases,
name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor of shape
[batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / BATCH_SIZE
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(
INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,
global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = DATA_DIR
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write(
'\r>> Downloading %s %.1f%%' %
(filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
google-research/google-research
|
model_pruning/examples/cifar10/cifar10_pruning.py
|
Python
|
apache-2.0
| 13,794
|
[
"Gaussian"
] |
054ee1aa0fdd0aae195746fc7a856138a172413db6d72c93f0a4d389122cf19f
|
import util,collections,sys
import traceback
from copy import deepcopy,copy
from daytime import DayTimeRange
from termweek import TermWeek
import pparser
import tracker
# hours 1-7 -> 13-19
class PatternAtom(object):
dayname = ['M','Tu','W','Th','F','Sa','Su']
terms = ['Mi','Le','Ea']
def __init__(self,template):
self._daytimes = []
self._termweeks = TermWeek()
self._template = template # currently unused, but should be useful in reducing confusion in this class re omissions
def addTermWeek(self,term,week):
self._termweeks.addTermWeek(term,week)
def addDayTimeRange(self,dy,fh,fm,th,tm):
self._daytimes.append(DayTimeRange(dy,fh,fm,th,tm))
def addDayTimeRangeDirect(self,c):
self._daytimes.append(c)
def getDayTimesRaw(self):
return self._daytimes
def getTermWeeks(self):
return self._termweeks
def getTerms(self):
return self._termweeks.getTerms()
def restrictToTerm(self,t):
self._termweeks.restrictToTerm(t)
def copyRestrictedToTerm(self,term):
out = copy(self)
out._termweeks = self._termweeks.copyRestrictedToTerm(term)
return out
def restrictToDayTimeRange(self,dy,fh,fm,th,tm):
r = DayTimeRange(dy,fh,fm,th,tm)
out = []
for dt in self._daytimes:
if dt.intersect_test(r):
out.append(dt)
self._daytimes = out
def removeDayTimeRangeDirect(self,target):
hit = target in self._daytimes
self._daytimes = filter(lambda x: x != target,self._daytimes)
return hit
def addTermWeeksFrom(self,src):
self._termweeks.merge(src._termweeks)
def addDayTimesFrom(self,src):
self._daytimes.extend(deepcopy(src._daytimes))
def setAllYear(self):
self._termweeks.set_all()
def setAllInTerm(self,term):
self._termweeks.set_all_in_term(term)
def empty(self):
if len(self._daytimes) == 0:
return True
return not len(self._termweeks)
def firstDayTime(self):
return min(self._daytimes)
def firstTermWeek(self):
return self._termweeks.first()
def lastDayTime(self):
return max(self._daytimes)
def key(self):
(first_term,first_week) = self._termweeks.first()
day = set([x.day for x in self._daytimes])
first_day = min([(x+4)%7 for x in day])
m_day = (first_day+3)%7
min_time = set()
for d in [x for x in self._daytimes if x.day == m_day]:
min_time.add(d.startval())
if len(min_time):
min_time = min(min_time)
else:
min_time = 0
return "%1.1d%2.2d%1.1d%4.4d" % (first_term,first_week,first_day,min_time)
def merge(self,other):
# If dts are equivalent, can merge tws
if set(self._daytimes) == set(other._daytimes):
# Merge tws
out = deepcopy(self)
out._termweeks.merge(other._termweeks)
return out
# If tws are equivalent, can merge dts
if self._termweeks == other._termweeks:
out = deepcopy(self)
out._daytimes.extend(deepcopy(other._daytimes))
return out
return None
def _after(self,(term,week),cur):
x = util.successor(self._daytimes,cur)
if x is None:
(term,week) = self._termweeks.successor(term,week)
if term is None and week is None:
return None
return ((term,week),self.firstDayTime())
else:
return ((term,week),x)
def plus(self,parent,offset):
(term,week) = self._termweeks.last()
pos = ((term,week),self.lastDayTime())
for i in range(0,offset):
pos = apply(parent._after,pos)
if pos is None:
return None
((term,week),next_dt) = pos
out = PatternAtom(False)
out.addTermWeek(term,week)
out.addDayTimeRangeDirect(next_dt)
return out
def first(self):
(term,week) = self.firstTermWeek()
out = PatternAtom(False)
out.addTermWeek(term,week)
out.addDayTimeRangeDirect(self.firstDayTime())
return out
def __repr__(self):
return self.format_atom()
def _format_termweeks(self,reduction):
if reduction is not None and self._termweeks == reduction._termweeks:
return ""
out = []
if not self._termweeks.all_weeks_of_year_test():
for term in self._termweeks.each_term():
weeks = self._termweeks.weeks_of_term(term)
if self._termweeks.all_weeks_of_term_test(term):
s = ''
else:
s = util.number_range_text(weeks)
out.append("%s%s" % (self.terms[term],s))
return " ".join(out)
def _format_daytimes(self,reduction):
if reduction is not None and set(reduction._daytimes) == set(self._daytimes):
return ""
# collate by time
daysbytime = collections.defaultdict(set)
for dt in self._daytimes:
(day,time) = dt.rep2()
daysbytime[time].add(day)
# sort by time. For now we only care about stability, should also care about sensibleness XXX
keys = sorted(daysbytime.keys())
# emit
out = []
for time in keys:
days = daysbytime[time]
out.append(util.hide_commas(util.number_range_text(days,self.dayname)))
out.append(time)
return " ".join(out)
# XXX force verbosity on overriding
def format_atom(self,reduction = None):
if not reduction:
reduction = pparser.parseone("MiLeEa")
out = []
ts = self._format_termweeks(reduction)
if ts:
out.append(ts)
# days and times
dts = self._format_daytimes(reduction)
if dts:
out.append(dts)
return " ".join(out)
def count(self):
return len(self._termweeks) * len(self._daytimes)
def blast(self):
out = []
for (term,week) in self._termweeks.each():
for dt in self._daytimes:
p = PatternAtom(False)
p.addTermWeek(term,week)
p.addDayTimeRangeDirect(dt)
out.append(p)
return sorted(out,key = lambda x: x.key())
def expand_back_to(self,datetime):
for term in range(0,3):
first_week = datetime._termweeks.first_week_of_term(term)
if first_week is None:
continue
self._termweeks.expand_back_to_week(term,first_week)
def expand_forward_to(self,datetime):
for term in range(0,3):
last_week = datetime._termweeks.last_week_of_term(term)
if last_week is None:
continue
self._termweeks.expand_forward_to_week(term,last_week)
def __eq__(self,other):
if self._template != other._template:
return False
if self._termweeks != other._termweeks:
return False
if set(self._daytimes) != set(other._daytimes):
return False
return True
def __ne__(self,other):
return not self.__eq__(other)
|
ieb/timetables
|
python/lib/patternatom.py
|
Python
|
agpl-3.0
| 7,447
|
[
"BLAST"
] |
61629ce7e7bcf450d88eb3fdea092fe088e8a36804dcd96f688a2b437eebd2ad
|
"""
Actions manager for transcripts ajax calls.
+++++++++++++++++++++++++++++++++++++++++++
Module do not support rollback (pressing "Cancel" button in Studio)
All user changes are saved immediately.
"""
import copy
import json
import logging
import os
import requests
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http import Http404, HttpResponse
from django.utils.translation import ugettext as _
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import UsageKey
from six import text_type
from student.auth import has_course_author_access
from util.json_request import JsonResponse
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from xmodule.exceptions import NotFoundError
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.video_module.transcripts_utils import (
copy_or_rename_transcript,
download_youtube_subs,
GetTranscriptsFromYouTubeException,
get_video_transcript_content,
generate_subs_from_source,
get_transcripts_from_youtube,
manage_video_subtitles_save,
remove_subs_from_store,
Transcript,
TranscriptsRequestValidationException,
youtube_video_transcript_name,
)
from xmodule.video_module.transcripts_model_utils import (
is_val_transcript_feature_enabled_for_course
)
__all__ = [
'upload_transcripts',
'download_transcripts',
'check_transcripts',
'choose_transcripts',
'replace_transcripts',
'rename_transcripts',
'save_transcripts',
]
log = logging.getLogger(__name__)
def error_response(response, message, status_code=400):
"""
Simplify similar actions: log message and return JsonResponse with message included in response.
By default return 400 (Bad Request) Response.
"""
log.debug(message)
response['status'] = message
return JsonResponse(response, status_code)
@login_required
def upload_transcripts(request):
"""
Upload transcripts for current module.
returns: response dict::
status: 'Success' and HTTP 200 or 'Error' and HTTP 400.
subs: Value of uploaded and saved html5 sub field in video item.
"""
response = {
'status': 'Unknown server error',
'subs': '',
}
locator = request.POST.get('locator')
if not locator:
return error_response(response, 'POST data without "locator" form data.')
try:
item = _get_item(request, request.POST)
except (InvalidKeyError, ItemNotFoundError):
return error_response(response, "Can't find item by locator.")
if 'transcript-file' not in request.FILES:
return error_response(response, 'POST data without "file" form data.')
video_list = request.POST.get('video_list')
if not video_list:
return error_response(response, 'POST data without video names.')
try:
video_list = json.loads(video_list)
except ValueError:
return error_response(response, 'Invalid video_list JSON.')
# Used utf-8-sig encoding type instead of utf-8 to remove BOM(Byte Order Mark), e.g. U+FEFF
source_subs_filedata = request.FILES['transcript-file'].read().decode('utf-8-sig')
source_subs_filename = request.FILES['transcript-file'].name
if '.' not in source_subs_filename:
return error_response(response, "Undefined file extension.")
basename = os.path.basename(source_subs_filename)
source_subs_name = os.path.splitext(basename)[0]
source_subs_ext = os.path.splitext(basename)[1][1:]
if item.category != 'video':
return error_response(response, 'Transcripts are supported only for "video" modules.')
# Allow upload only if any video link is presented
if video_list:
sub_attr = source_subs_name
try:
# Generate and save for 1.0 speed, will create subs_sub_attr.srt.sjson subtitles file in storage.
generate_subs_from_source({1: sub_attr}, source_subs_ext, source_subs_filedata, item)
for video_dict in video_list:
video_name = video_dict['video']
# We are creating transcripts for every video source, if in future some of video sources would be deleted.
# Updates item.sub with `video_name` on success.
copy_or_rename_transcript(video_name, sub_attr, item, user=request.user)
response['subs'] = item.sub
response['status'] = 'Success'
except Exception as ex:
return error_response(response, text_type(ex))
else:
return error_response(response, 'Empty video sources.')
return JsonResponse(response)
@login_required
def download_transcripts(request):
"""
Passes to user requested transcripts file.
Raises Http404 if unsuccessful.
"""
locator = request.GET.get('locator')
subs_id = request.GET.get('subs_id')
if not locator:
log.debug('GET data without "locator" property.')
raise Http404
try:
item = _get_item(request, request.GET)
except (InvalidKeyError, ItemNotFoundError):
log.debug("Can't find item by locator.")
raise Http404
if item.category != 'video':
log.debug('transcripts are supported only for video" modules.')
raise Http404
try:
if not subs_id:
raise NotFoundError
filename = subs_id
content_location = StaticContent.compute_location(
item.location.course_key,
'subs_{filename}.srt.sjson'.format(filename=filename),
)
input_format = Transcript.SJSON
transcript_content = contentstore().find(content_location).data
except NotFoundError:
# Try searching in VAL for the transcript as a last resort
transcript = None
if is_val_transcript_feature_enabled_for_course(item.location.course_key):
transcript = get_video_transcript_content(edx_video_id=item.edx_video_id, language_code=u'en')
if not transcript:
raise Http404
name_and_extension = os.path.splitext(transcript['file_name'])
filename, input_format = name_and_extension[0], name_and_extension[1][1:]
transcript_content = transcript['content']
# convert sjson content into srt format.
transcript_content = Transcript.convert(transcript_content, input_format=input_format, output_format=Transcript.SRT)
if not transcript_content:
raise Http404
# Construct an HTTP response
response = HttpResponse(transcript_content, content_type='application/x-subrip; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename="{filename}.srt"'.format(filename=filename)
return response
@login_required
def check_transcripts(request):
"""
Check state of transcripts availability.
request.GET['data'] has key `videos`, which can contain any of the following::
[
{u'type': u'youtube', u'video': u'OEoXaMPEzfM', u'mode': u'youtube'},
{u'type': u'html5', u'video': u'video1', u'mode': u'mp4'}
{u'type': u'html5', u'video': u'video2', u'mode': u'webm'}
]
`type` is youtube or html5
`video` is html5 or youtube video_id
`mode` is youtube, ,p4 or webm
Returns transcripts_presence dict::
html5_local: list of html5 ids, if subtitles exist locally for them;
is_youtube_mode: bool, if we have youtube_id, and as youtube mode is of higher priority, reflect this with flag;
youtube_local: bool, if youtube transcripts exist locally;
youtube_server: bool, if youtube transcripts exist on server;
youtube_diff: bool, if youtube transcripts exist on youtube server, and are different from local youtube ones;
current_item_subs: string, value of item.sub field;
status: string, 'Error' or 'Success';
subs: string, new value of item.sub field, that should be set in module;
command: string, action to front-end what to do and what to show to user.
"""
transcripts_presence = {
'html5_local': [],
'html5_equal': False,
'is_youtube_mode': False,
'youtube_local': False,
'youtube_server': False,
'youtube_diff': True,
'current_item_subs': None,
'status': 'Error',
}
try:
__, videos, item = _validate_transcripts_data(request)
except TranscriptsRequestValidationException as e:
return error_response(transcripts_presence, text_type(e))
transcripts_presence['status'] = 'Success'
filename = 'subs_{0}.srt.sjson'.format(item.sub)
content_location = StaticContent.compute_location(item.location.course_key, filename)
try:
local_transcripts = contentstore().find(content_location).data
transcripts_presence['current_item_subs'] = item.sub
except NotFoundError:
pass
# Check for youtube transcripts presence
youtube_id = videos.get('youtube', None)
if youtube_id:
transcripts_presence['is_youtube_mode'] = True
# youtube local
filename = 'subs_{0}.srt.sjson'.format(youtube_id)
content_location = StaticContent.compute_location(item.location.course_key, filename)
try:
local_transcripts = contentstore().find(content_location).data
transcripts_presence['youtube_local'] = True
except NotFoundError:
log.debug("Can't find transcripts in storage for youtube id: %s", youtube_id)
# youtube server
youtube_text_api = copy.deepcopy(settings.YOUTUBE['TEXT_API'])
youtube_text_api['params']['v'] = youtube_id
youtube_transcript_name = youtube_video_transcript_name(youtube_text_api)
if youtube_transcript_name:
youtube_text_api['params']['name'] = youtube_transcript_name
youtube_response = requests.get('http://' + youtube_text_api['url'], params=youtube_text_api['params'])
if youtube_response.status_code == 200 and youtube_response.text:
transcripts_presence['youtube_server'] = True
#check youtube local and server transcripts for equality
if transcripts_presence['youtube_server'] and transcripts_presence['youtube_local']:
try:
youtube_server_subs = get_transcripts_from_youtube(
youtube_id,
settings,
item.runtime.service(item, "i18n")
)
if json.loads(local_transcripts) == youtube_server_subs: # check transcripts for equality
transcripts_presence['youtube_diff'] = False
except GetTranscriptsFromYouTubeException:
pass
# Check for html5 local transcripts presence
html5_subs = []
for html5_id in videos['html5']:
filename = 'subs_{0}.srt.sjson'.format(html5_id)
content_location = StaticContent.compute_location(item.location.course_key, filename)
try:
html5_subs.append(contentstore().find(content_location).data)
transcripts_presence['html5_local'].append(html5_id)
except NotFoundError:
log.debug("Can't find transcripts in storage for non-youtube video_id: %s", html5_id)
if len(html5_subs) == 2: # check html5 transcripts for equality
transcripts_presence['html5_equal'] = json.loads(html5_subs[0]) == json.loads(html5_subs[1])
command, subs_to_use = _transcripts_logic(transcripts_presence, videos)
if command == 'not_found':
# Try searching in VAL for the transcript as a last resort
if is_val_transcript_feature_enabled_for_course(item.location.course_key):
video_transcript = get_video_transcript_content(edx_video_id=item.edx_video_id, language_code=u'en')
command = 'found' if video_transcript else command
transcripts_presence.update({
'command': command,
'subs': subs_to_use,
})
return JsonResponse(transcripts_presence)
def _transcripts_logic(transcripts_presence, videos):
"""
By `transcripts_presence` content, figure what show to user:
returns: `command` and `subs`.
`command`: string, action to front-end what to do and what show to user.
`subs`: string, new value of item.sub field, that should be set in module.
`command` is one of::
replace: replace local youtube subtitles with server one's
found: subtitles are found
import: import subtitles from youtube server
choose: choose one from two html5 subtitles
not found: subtitles are not found
"""
command = None
# new value of item.sub field, that should be set in module.
subs = ''
# youtube transcripts are of high priority than html5 by design
if (
transcripts_presence['youtube_diff'] and
transcripts_presence['youtube_local'] and
transcripts_presence['youtube_server']): # youtube server and local exist
command = 'replace'
subs = videos['youtube']
elif transcripts_presence['youtube_local']: # only youtube local exist
command = 'found'
subs = videos['youtube']
elif transcripts_presence['youtube_server']: # only youtube server exist
command = 'import'
else: # html5 part
if transcripts_presence['html5_local']: # can be 1 or 2 html5 videos
if len(transcripts_presence['html5_local']) == 1 or transcripts_presence['html5_equal']:
command = 'found'
subs = transcripts_presence['html5_local'][0]
else:
command = 'choose'
subs = transcripts_presence['html5_local'][0]
else: # html5 source have no subtitles
# check if item sub has subtitles
if transcripts_presence['current_item_subs'] and not transcripts_presence['is_youtube_mode']:
log.debug("Command is use existing %s subs", transcripts_presence['current_item_subs'])
command = 'use_existing'
else:
command = 'not_found'
log.debug(
"Resulted command: %s, current transcripts: %s, youtube mode: %s",
command,
transcripts_presence['current_item_subs'],
transcripts_presence['is_youtube_mode']
)
return command, subs
@login_required
def choose_transcripts(request):
"""
Replaces html5 subtitles, presented for both html5 sources, with chosen one.
Code removes rejected html5 subtitles and updates sub attribute with chosen html5_id.
It does nothing with youtube id's.
Returns: status `Success` and resulted item.sub value or status `Error` and HTTP 400.
"""
response = {
'status': 'Error',
'subs': '',
}
try:
data, videos, item = _validate_transcripts_data(request)
except TranscriptsRequestValidationException as e:
return error_response(response, text_type(e))
html5_id = data.get('html5_id') # html5_id chosen by user
# find rejected html5_id and remove appropriate subs from store
html5_id_to_remove = [x for x in videos['html5'] if x != html5_id]
if html5_id_to_remove:
remove_subs_from_store(html5_id_to_remove, item)
if item.sub != html5_id: # update sub value
item.sub = html5_id
item.save_with_metadata(request.user)
response = {
'status': 'Success',
'subs': item.sub,
}
return JsonResponse(response)
@login_required
def replace_transcripts(request):
"""
Replaces all transcripts with youtube ones.
Downloads subtitles from youtube and replaces all transcripts with downloaded ones.
Returns: status `Success` and resulted item.sub value or status `Error` and HTTP 400.
"""
response = {'status': 'Error', 'subs': ''}
try:
__, videos, item = _validate_transcripts_data(request)
except TranscriptsRequestValidationException as e:
return error_response(response, text_type(e))
youtube_id = videos['youtube']
if not youtube_id:
return error_response(response, 'YouTube id {} is not presented in request data.'.format(youtube_id))
try:
download_youtube_subs(youtube_id, item, settings)
except GetTranscriptsFromYouTubeException as e:
return error_response(response, text_type(e))
item.sub = youtube_id
item.save_with_metadata(request.user)
response = {
'status': 'Success',
'subs': item.sub,
}
return JsonResponse(response)
def _validate_transcripts_data(request):
"""
Validates, that request contains all proper data for transcripts processing.
Returns tuple of 3 elements::
data: dict, loaded json from request,
videos: parsed `data` to useful format,
item: video item from storage
Raises `TranscriptsRequestValidationException` if validation is unsuccessful
or `PermissionDenied` if user has no access.
"""
data = json.loads(request.GET.get('data', '{}'))
if not data:
raise TranscriptsRequestValidationException(_('Incoming video data is empty.'))
try:
item = _get_item(request, data)
except (InvalidKeyError, ItemNotFoundError):
raise TranscriptsRequestValidationException(_("Can't find item by locator."))
if item.category != 'video':
raise TranscriptsRequestValidationException(_('Transcripts are supported only for "video" modules.'))
# parse data form request.GET.['data']['video'] to useful format
videos = {'youtube': '', 'html5': {}}
for video_data in data.get('videos'):
if video_data['type'] == 'youtube':
videos['youtube'] = video_data['video']
else: # do not add same html5 videos
if videos['html5'].get('video') != video_data['video']:
videos['html5'][video_data['video']] = video_data['mode']
return data, videos, item
@login_required
def rename_transcripts(request):
"""
Create copies of existing subtitles with new names of HTML5 sources.
Old subtitles are not deleted now, because we do not have rollback functionality.
If succeed, Item.sub will be chosen randomly from html5 video sources provided by front-end.
"""
response = {'status': 'Error', 'subs': ''}
try:
__, videos, item = _validate_transcripts_data(request)
except TranscriptsRequestValidationException as e:
return error_response(response, text_type(e))
old_name = item.sub
for new_name in videos['html5'].keys(): # copy subtitles for every HTML5 source
try:
# updates item.sub with new_name if it is successful.
copy_or_rename_transcript(new_name, old_name, item, user=request.user)
except NotFoundError:
# subtitles file `item.sub` is not presented in the system. Nothing to copy or rename.
error_response(response, "Can't find transcripts in storage for {}".format(old_name))
response['status'] = 'Success'
response['subs'] = item.sub # item.sub has been changed, it is not equal to old_name.
log.debug("Updated item.sub to %s", item.sub)
return JsonResponse(response)
@login_required
def save_transcripts(request):
"""
Saves video module with updated values of fields.
Returns: status `Success` or status `Error` and HTTP 400.
"""
response = {'status': 'Error'}
data = json.loads(request.GET.get('data', '{}'))
if not data:
return error_response(response, 'Incoming video data is empty.')
try:
item = _get_item(request, data)
except (InvalidKeyError, ItemNotFoundError):
return error_response(response, "Can't find item by locator.")
metadata = data.get('metadata')
if metadata is not None:
new_sub = metadata.get('sub')
for metadata_key, value in metadata.items():
setattr(item, metadata_key, value)
item.save_with_metadata(request.user) # item becomes updated with new values
if new_sub:
manage_video_subtitles_save(item, request.user)
else:
# If `new_sub` is empty, it means that user explicitly does not want to use
# transcripts for current video ids and we remove all transcripts from storage.
current_subs = data.get('current_subs')
if current_subs is not None:
for sub in current_subs:
remove_subs_from_store(sub, item)
response['status'] = 'Success'
return JsonResponse(response)
def _get_item(request, data):
"""
Obtains from 'data' the locator for an item.
Next, gets that item from the modulestore (allowing any errors to raise up).
Finally, verifies that the user has access to the item.
Returns the item.
"""
usage_key = UsageKey.from_string(data.get('locator'))
# This is placed before has_course_author_access() to validate the location,
# because has_course_author_access() raises r if location is invalid.
item = modulestore().get_item(usage_key)
# use the item's course_key, because the usage_key might not have the run
if not has_course_author_access(request.user, item.location.course_key):
raise PermissionDenied()
return item
|
procangroup/edx-platform
|
cms/djangoapps/contentstore/views/transcripts_ajax.py
|
Python
|
agpl-3.0
| 21,457
|
[
"FEFF"
] |
1ebba7ad1ea55d45c44ae98f3c8b6e0f4e7d6d904367e766ccd05e9833bfd327
|
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2011 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Batch
Batches
=======
Batches allow you to optimize the number of gl calls using pyglets batch
"""
__docformat__ = 'restructuredtext'
import cocosnode
from batch import *
import pyglet
from pyglet.graphics import OrderedGroup
from pyglet import image
from pyglet.gl import *
__all__ = ['BatchNode','BatchableNode']
def ensure_batcheable(node):
if not isinstance(node, BatchableNode):
raise Exception("Children node of a batch must be have the batch mixin")
for c in node.get_children():
ensure_batcheable(c)
class BatchNode( cocosnode.CocosNode ):
def __init__(self):
super(BatchNode, self).__init__()
self.batch = pyglet.graphics.Batch()
self.groups = {}
def add(self, child, z=0, name=None):
ensure_batcheable(child)
child.set_batch(self.batch, self.groups, z)
super(BatchNode, self).add(child, z, name)
def visit(self):
""" All children are placed in to self.batch, so nothing to visit """
glPushMatrix()
self.transform()
self.batch.draw()
glPopMatrix()
def remove(self, child):
if isinstance(child, str):
child_node = self.get(child)
else:
child_node = child
child_node.set_batch(None)
super(BatchNode, self).remove(child)
def draw(self):
pass # All drawing done in visit!
class BatchableNode( cocosnode.CocosNode ):
def add(self, child, z=0, name=None):
batchnode = self.get_ancestor(BatchNode)
if not batchnode:
# this node was addded, but theres no batchnode in the
# hierarchy. so we proceed as normal
super(BatchableNode, self).add(child, z, name)
return
ensure_batcheable(child)
super(BatchableNode, self).add(child, z, name)
child.set_batch(self.batch, batchnode.groups, z)
def remove(self, child):
if isinstance(child, str):
child_node = self.get(child)
else:
child_node = child
child_node.set_batch(None)
super(BatchableNode, self).remove(child)
def set_batch(self, batch, groups=None, z=0):
self.batch = batch
if batch is None:
self.group = None
else:
group = groups.get(z)
if group is None:
group = pyglet.graphics.Group()
groups[z] = group
self.group = group
for childZ, child in self.children:
child.set_batch(self.batch, groups, z + childZ)
|
eevee/cocos2d-mirror
|
cocos/batch.py
|
Python
|
bsd-3-clause
| 4,335
|
[
"VisIt"
] |
80f3901b80fcf4aed399d4ad42394b217506e90f4f0d36f104ae88b9e22c758e
|
#!/usr/bin/env python
########################################################################
# File : dirac-admin-list-users
# Author : Adrian Casajus
########################################################################
"""
Lists the users in the Configuration. If no group is specified return all users.
Example:
$ dirac-admin-list-users
All users registered:
vhamar
msapunov
atsareg
"""
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
@Script()
def main():
Script.registerSwitch("e", "extended", "Show extended info")
# Registering arguments will automatically add their description to the help menu
Script.registerArgument(["Group: Only users from this group (default: all)"], default=["all"], mandatory=False)
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs(group=True)
import DIRAC
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
exitCode = 0
errorList = []
extendedInfo = False
for unprocSw in Script.getUnprocessedSwitches():
if unprocSw[0] in ("e", "extended"):
extendedInfo = True
def printUsersInGroup(group=False):
result = diracAdmin.csListUsers(group)
if result["OK"]:
if group:
print("Users in group %s:" % group)
else:
print("All users registered:")
for username in result["Value"]:
print(" %s" % username)
def describeUsersInGroup(group=False):
result = diracAdmin.csListUsers(group)
if result["OK"]:
if group:
print("Users in group %s:" % group)
else:
print("All users registered:")
result = diracAdmin.csDescribeUsers(result["Value"])
print(diracAdmin.pPrint.pformat(result["Value"]))
for group in args:
if "all" in args:
group = False
if not extendedInfo:
printUsersInGroup(group)
else:
describeUsersInGroup(group)
for error in errorList:
print("ERROR %s: %s" % error)
DIRAC.exit(exitCode)
if __name__ == "__main__":
main()
|
ic-hep/DIRAC
|
src/DIRAC/Interfaces/scripts/dirac_admin_list_users.py
|
Python
|
gpl-3.0
| 2,214
|
[
"DIRAC"
] |
7972466f8c59256aa38843a3693d258079bc751b830c5ee8b65e1b6dcec3d856
|
"""
Django module container for classes and operations related to the "Course Module" content type
"""
import logging
from cStringIO import StringIO
from math import exp
from lxml import etree
from path import path # NOTE (THK): Only used for detecting presence of syllabus
import requests
from datetime import datetime
import dateutil.parser
from lazy import lazy
from xmodule import course_metadata_utils
from xmodule.course_metadata_utils import DEFAULT_START_DATE
from xmodule.exceptions import UndefinedContext
from xmodule.seq_module import SequenceDescriptor, SequenceModule
from xmodule.graders import grader_from_conf
from xmodule.tabs import CourseTabList
from xmodule.mixin import LicenseMixin
import json
from xblock.core import XBlock
from xblock.fields import Scope, List, String, Dict, Boolean, Integer, Float
from .fields import Date
from django.utils.timezone import UTC
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
CATALOG_VISIBILITY_CATALOG_AND_ABOUT = "both"
CATALOG_VISIBILITY_ABOUT = "about"
CATALOG_VISIBILITY_NONE = "none"
class StringOrDate(Date):
def from_json(self, value):
"""
Parse an optional metadata key containing a time or a string:
if present, assume it's a string if it doesn't parse.
"""
try:
result = super(StringOrDate, self).from_json(value)
except ValueError:
return value
if result is None:
return value
else:
return result
def to_json(self, value):
"""
Convert a time struct or string to a string.
"""
try:
result = super(StringOrDate, self).to_json(value)
except:
return value
if result is None:
return value
else:
return result
edx_xml_parser = etree.XMLParser(dtd_validation=False, load_dtd=False,
remove_comments=True, remove_blank_text=True)
_cached_toc = {}
class Textbook(object):
def __init__(self, title, book_url):
self.title = title
self.book_url = book_url
@lazy
def start_page(self):
return int(self.table_of_contents[0].attrib['page'])
@lazy
def end_page(self):
# The last page should be the last element in the table of contents,
# but it may be nested. So recurse all the way down the last element
last_el = self.table_of_contents[-1]
while last_el.getchildren():
last_el = last_el[-1]
return int(last_el.attrib['page'])
@lazy
def table_of_contents(self):
"""
Accesses the textbook's table of contents (default name "toc.xml") at the URL self.book_url
Returns XML tree representation of the table of contents
"""
toc_url = self.book_url + 'toc.xml'
# cdodge: I've added this caching of TOC because in Mongo-backed instances (but not Filesystem stores)
# course modules have a very short lifespan and are constantly being created and torn down.
# Since this module in the __init__() method does a synchronous call to AWS to get the TOC
# this is causing a big performance problem. So let's be a bit smarter about this and cache
# each fetch and store in-mem for 10 minutes.
# NOTE: I have to get this onto sandbox ASAP as we're having runtime failures. I'd like to swing back and
# rewrite to use the traditional Django in-memory cache.
try:
# see if we already fetched this
if toc_url in _cached_toc:
(table_of_contents, timestamp) = _cached_toc[toc_url]
age = datetime.now(UTC) - timestamp
# expire every 10 minutes
if age.seconds < 600:
return table_of_contents
except Exception as err:
pass
# Get the table of contents from S3
log.info("Retrieving textbook table of contents from %s", toc_url)
try:
r = requests.get(toc_url)
except Exception as err:
msg = 'Error %s: Unable to retrieve textbook table of contents at %s' % (err, toc_url)
log.error(msg)
raise Exception(msg)
# TOC is XML. Parse it
try:
table_of_contents = etree.fromstring(r.text)
except Exception as err:
msg = 'Error %s: Unable to parse XML for textbook table of contents at %s' % (err, toc_url)
log.error(msg)
raise Exception(msg)
return table_of_contents
def __eq__(self, other):
return (self.title == other.title and
self.book_url == other.book_url)
def __ne__(self, other):
return not self == other
class TextbookList(List):
def from_json(self, values):
textbooks = []
for title, book_url in values:
try:
textbooks.append(Textbook(title, book_url))
except:
# If we can't get to S3 (e.g. on a train with no internet), don't break
# the rest of the courseware.
log.exception("Couldn't load textbook ({0}, {1})".format(title, book_url))
continue
return textbooks
def to_json(self, values):
json_data = []
for val in values:
if isinstance(val, Textbook):
json_data.append((val.title, val.book_url))
elif isinstance(val, tuple):
json_data.append(val)
else:
continue
return json_data
class CourseFields(object):
lti_passports = List(
display_name=_("LTI Passports"),
help=_('Enter the passports for course LTI tools in the following format: "id:client_key:client_secret".'),
scope=Scope.settings
)
textbooks = TextbookList(
help=_("List of pairs of (title, url) for textbooks used in this course"),
default=[],
scope=Scope.content
)
wiki_slug = String(help=_("Slug that points to the wiki for this course"), scope=Scope.content)
enrollment_start = Date(help=_("Date that enrollment for this class is opened"), scope=Scope.settings)
enrollment_end = Date(help=_("Date that enrollment for this class is closed"), scope=Scope.settings)
start = Date(
help=_("Start time when this module is visible"),
default=DEFAULT_START_DATE,
scope=Scope.settings
)
end = Date(help=_("Date that this class ends"), scope=Scope.settings)
cosmetic_display_price = Integer(
display_name=_("Cosmetic Course Display Price"),
help=_(
"The cost displayed to students for enrolling in the course. If a paid course registration price is "
"set by an administrator in the database, that price will be displayed instead of this one."
),
default=0,
scope=Scope.settings,
)
advertised_start = String(
display_name=_("Course Advertised Start Date"),
help=_(
"Enter the date you want to advertise as the course start date, if this date is different from the set "
"start date. To advertise the set start date, enter null."
),
scope=Scope.settings
)
pre_requisite_courses = List(
display_name=_("Pre-Requisite Courses"),
help=_("Pre-Requisite Course key if this course has a pre-requisite course"),
scope=Scope.settings
)
grading_policy = Dict(
help=_("Grading policy definition for this class"),
default={
"GRADER": [
{
"type": "Homework",
"min_count": 12,
"drop_count": 2,
"short_label": "HW",
"weight": 0.15,
},
{
"type": "Lab",
"min_count": 12,
"drop_count": 2,
"weight": 0.15,
},
{
"type": "Midterm Exam",
"short_label": "Midterm",
"min_count": 1,
"drop_count": 0,
"weight": 0.3,
},
{
"type": "Final Exam",
"short_label": "Final",
"min_count": 1,
"drop_count": 0,
"weight": 0.4,
}
],
"GRADE_CUTOFFS": {
"Pass": 0.5,
},
},
scope=Scope.content
)
show_calculator = Boolean(
display_name=_("Show Calculator"),
help=_("Enter true or false. When true, students can see the calculator in the course."),
default=False,
scope=Scope.settings
)
display_name = String(
help=_("Enter the name of the course as it should appear in the edX.org course list."),
default="Empty",
display_name=_("Course Display Name"),
scope=Scope.settings
)
course_edit_method = String(
display_name=_("Course Editor"),
help=_('Enter the method by which this course is edited ("XML" or "Studio").'),
default="Studio",
scope=Scope.settings,
deprecated=True # Deprecated because someone would not edit this value within Studio.
)
show_chat = Boolean(
display_name=_("Show Chat Widget"),
help=_("Enter true or false. When true, students can see the chat widget in the course."),
default=False,
scope=Scope.settings
)
tabs = CourseTabList(help="List of tabs to enable in this course", scope=Scope.settings, default=[])
end_of_course_survey_url = String(
display_name=_("Course Survey URL"),
help=_("Enter the URL for the end-of-course survey. If your course does not have a survey, enter null."),
scope=Scope.settings
)
discussion_blackouts = List(
display_name=_("Discussion Blackout Dates"),
help=_(
'Enter pairs of dates between which students cannot post to discussion forums. Inside the provided '
'brackets, enter an additional set of square brackets surrounding each pair of dates you add. '
'Format each pair of dates as ["YYYY-MM-DD", "YYYY-MM-DD"]. To specify times as well as dates, '
'format each pair as ["YYYY-MM-DDTHH:MM", "YYYY-MM-DDTHH:MM"]. Be sure to include the "T" between '
'the date and time. For example, an entry defining two blackout periods looks like this, including '
'the outer pair of square brackets: [["2015-09-15", "2015-09-21"], ["2015-10-01", "2015-10-08"]] '
),
scope=Scope.settings
)
discussion_topics = Dict(
display_name=_("Discussion Topic Mapping"),
help=_(
'Enter discussion categories in the following format: "CategoryName": '
'{"id": "i4x-InstitutionName-CourseNumber-course-CourseRun"}. For example, one discussion '
'category may be "Lydian Mode": {"id": "i4x-UniversityX-MUS101-course-2015_T1"}. The "id" '
'value for each category must be unique. In "id" values, the only special characters that are '
'supported are underscore, hyphen, and period.'
),
scope=Scope.settings
)
discussion_sort_alpha = Boolean(
display_name=_("Discussion Sorting Alphabetical"),
scope=Scope.settings, default=False,
help=_(
"Enter true or false. If true, discussion categories and subcategories are sorted alphabetically. "
"If false, they are sorted chronologically."
)
)
announcement = Date(
display_name=_("Course Announcement Date"),
help=_("Enter the date to announce your course."),
scope=Scope.settings
)
cohort_config = Dict(
display_name=_("Cohort Configuration"),
help=_(
"Enter policy keys and values to enable the cohort feature, define automated student assignment to "
"groups, or identify any course-wide discussion topics as private to cohort members."
),
scope=Scope.settings
)
is_new = Boolean(
display_name=_("Course Is New"),
help=_(
"Enter true or false. If true, the course appears in the list of new courses on edx.org, and a New! "
"badge temporarily appears next to the course image."
),
scope=Scope.settings
)
mobile_available = Boolean(
display_name=_("Mobile Course Available"),
help=_("Enter true or false. If true, the course will be available to mobile devices."),
default=False,
scope=Scope.settings
)
video_upload_pipeline = Dict(
display_name=_("Video Upload Credentials"),
help=_("Enter the unique identifier for your course's video files provided by edX."),
scope=Scope.settings
)
facebook_url = String(
help=_(
"Enter the URL for the official course Facebook group. "
"If you provide a URL, the mobile app includes a button that students can tap to access the group."
),
default=None,
display_name=_("Facebook URL"),
scope=Scope.settings
)
no_grade = Boolean(
display_name=_("Course Not Graded"),
help=_("Enter true or false. If true, the course will not be graded."),
default=False,
scope=Scope.settings
)
disable_progress_graph = Boolean(
display_name=_("Disable Progress Graph"),
help=_("Enter true or false. If true, students cannot view the progress graph."),
default=False,
scope=Scope.settings
)
pdf_textbooks = List(
display_name=_("PDF Textbooks"),
help=_("List of dictionaries containing pdf_textbook configuration"), scope=Scope.settings
)
html_textbooks = List(
display_name=_("HTML Textbooks"),
help=_(
"For HTML textbooks that appear as separate tabs in the courseware, enter the name of the tab (usually "
"the name of the book) as well as the URLs and titles of all the chapters in the book."
),
scope=Scope.settings
)
remote_gradebook = Dict(
display_name=_("Remote Gradebook"),
help=_(
"Enter the remote gradebook mapping. Only use this setting when "
"REMOTE_GRADEBOOK_URL has been specified."
),
scope=Scope.settings
)
enable_ccx = Boolean(
# Translators: Custom Courses for edX (CCX) is an edX feature for re-using course content. CCX Coach is
# a role created by a course Instructor to enable a person (the "Coach") to manage the custom course for
# his students.
display_name=_("Enable CCX"),
# Translators: Custom Courses for edX (CCX) is an edX feature for re-using course content. CCX Coach is
# a role created by a course Instructor to enable a person (the "Coach") to manage the custom course for
# his students.
help=_(
"Allow course instructors to assign CCX Coach roles, and allow coaches to manage Custom Courses on edX."
" When false, Custom Courses cannot be created, but existing Custom Courses will be preserved."
),
default=False,
scope=Scope.settings
)
allow_anonymous = Boolean(
display_name=_("Allow Anonymous Discussion Posts"),
help=_("Enter true or false. If true, students can create discussion posts that are anonymous to all users."),
scope=Scope.settings, default=True
)
allow_anonymous_to_peers = Boolean(
display_name=_("Allow Anonymous Discussion Posts to Peers"),
help=_(
"Enter true or false. If true, students can create discussion posts that are anonymous to other "
"students. This setting does not make posts anonymous to course staff."
),
scope=Scope.settings, default=False
)
advanced_modules = List(
display_name=_("Advanced Module List"),
help=_("Enter the names of the advanced components to use in your course."),
scope=Scope.settings
)
has_children = True
checklists = List(
scope=Scope.settings,
default=[
{
"short_description": _("Getting Started With Studio"),
"items": [
{
"short_description": _("Add Course Team Members"),
"long_description": _(
"Grant your collaborators permission to edit your course so you can work together."
),
"is_checked": False,
"action_url": "ManageUsers",
"action_text": _("Edit Course Team"),
"action_external": False,
},
{
"short_description": _("Set Important Dates for Your Course"),
"long_description": _(
"Establish your course's student enrollment and launch dates on the Schedule and Details "
"page."
),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Details & Schedule"),
"action_external": False,
},
{
"short_description": _("Draft Your Course's Grading Policy"),
"long_description": _(
"Set up your assignment types and grading policy even if you haven't created all your "
"assignments."
),
"is_checked": False,
"action_url": "SettingsGrading",
"action_text": _("Edit Grading Settings"),
"action_external": False,
},
{
"short_description": _("Explore the Other Studio Checklists"),
"long_description": _(
"Discover other available course authoring tools, and find help when you need it."
),
"is_checked": False,
"action_url": "",
"action_text": "",
"action_external": False,
},
],
},
{
"short_description": _("Draft a Rough Course Outline"),
"items": [
{
"short_description": _("Create Your First Section and Subsection"),
"long_description": _("Use your course outline to build your first Section and Subsection."),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Set Section Release Dates"),
"long_description": _(
"Specify the release dates for each Section in your course. Sections become visible to "
"students on their release dates."
),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Designate a Subsection as Graded"),
"long_description": _(
"Set a Subsection to be graded as a specific assignment type. Assignments within graded "
"Subsections count toward a student's final grade."
),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Reordering Course Content"),
"long_description": _("Use drag and drop to reorder the content in your course."),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Renaming Sections"),
"long_description": _("Rename Sections by clicking the Section name from the Course Outline."),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Deleting Course Content"),
"long_description": _(
"Delete Sections, Subsections, or Units you don't need anymore. Be careful, as there is "
"no Undo function."
),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
{
"short_description": _("Add an Instructor-Only Section to Your Outline"),
"long_description": _(
"Some course authors find using a section for unsorted, in-progress work useful. To do "
"this, create a section and set the release date to the distant future."
),
"is_checked": False,
"action_url": "CourseOutline",
"action_text": _("Edit Course Outline"),
"action_external": False,
},
],
},
{
"short_description": _("Explore edX's Support Tools"),
"items": [
{
"short_description": _("Explore the Studio Help Forum"),
"long_description": _(
"Access the Studio Help forum from the menu that appears when you click your user name "
"in the top right corner of Studio."
),
"is_checked": False,
"action_url": "http://help.edge.edx.org/",
"action_text": _("Visit Studio Help"),
"action_external": True,
},
{
"short_description": _("Enroll in edX 101"),
"long_description": _("Register for edX 101, edX's primer for course creation."),
"is_checked": False,
"action_url": "https://edge.edx.org/courses/edX/edX101/How_to_Create_an_edX_Course/about",
"action_text": _("Register for edX 101"),
"action_external": True,
},
{
"short_description": _("Download the Studio Documentation"),
"long_description": _("Download the searchable Studio reference documentation in PDF form."),
"is_checked": False,
"action_url": "http://files.edx.org/Getting_Started_with_Studio.pdf",
"action_text": _("Download Documentation"),
"action_external": True,
},
],
},
{
"short_description": _("Draft Your Course About Page"),
"items": [
{
"short_description": _("Draft a Course Description"),
"long_description": _(
"Courses on edX have an About page that includes a course video, description, and more. "
"Draft the text students will read before deciding to enroll in your course."
),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Schedule & Details"),
"action_external": False,
},
{
"short_description": _("Add Staff Bios"),
"long_description": _(
"Showing prospective students who their instructor will be is helpful. "
"Include staff bios on the course About page."
),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Schedule & Details"),
"action_external": False,
},
{
"short_description": _("Add Course FAQs"),
"long_description": _("Include a short list of frequently asked questions about your course."),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Schedule & Details"),
"action_external": False,
},
{
"short_description": _("Add Course Prerequisites"),
"long_description": _(
"Let students know what knowledge and/or skills they should have before "
"they enroll in your course."
),
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": _("Edit Course Schedule & Details"),
"action_external": False,
},
],
},
],
)
info_sidebar_name = String(
display_name=_("Course Info Sidebar Name"),
help=_(
"Enter the heading that you want students to see above your course handouts on the Course Info page. "
"Your course handouts appear in the right panel of the page."
),
scope=Scope.settings, default='Course Handouts')
show_timezone = Boolean(
help=_(
"True if timezones should be shown on dates in the courseware. "
"Deprecated in favor of due_date_display_format."
),
scope=Scope.settings, default=True
)
due_date_display_format = String(
display_name=_("Due Date Display Format"),
help=_(
"Enter the format for due dates. The default is Mon DD, YYYY. Enter \"%m-%d-%Y\" for MM-DD-YYYY, "
"\"%d-%m-%Y\" for DD-MM-YYYY, \"%Y-%m-%d\" for YYYY-MM-DD, or \"%Y-%d-%m\" for YYYY-DD-MM."
),
scope=Scope.settings, default=None
)
enrollment_domain = String(
display_name=_("External Login Domain"),
help=_("Enter the external login method students can use for the course."),
scope=Scope.settings
)
certificates_show_before_end = Boolean(
display_name=_("Certificates Downloadable Before End"),
help=_(
"Enter true or false. If true, students can download certificates before the course ends, if they've "
"met certificate requirements."
),
scope=Scope.settings,
default=False,
deprecated=True
)
certificates_display_behavior = String(
display_name=_("Certificates Display Behavior"),
help=_(
"Enter end, early_with_info, or early_no_info. After certificate generation, students who passed see a "
"link to their certificates on the dashboard and students who did not pass see information about the "
"grading configuration. The default is end, which displays this certificate information to all students "
"after the course end date. To display this certificate information to all students as soon as "
"certificates are generated, enter early_with_info. To display only the links to passing students as "
"soon as certificates are generated, enter early_no_info."
),
scope=Scope.settings,
default="end"
)
course_image = String(
display_name=_("Course About Page Image"),
help=_(
"Edit the name of the course image file. You must upload this file on the Files & Uploads page. "
"You can also set the course image on the Settings & Details page."
),
scope=Scope.settings,
# Ensure that courses imported from XML keep their image
default="images_course_image.jpg"
)
issue_badges = Boolean(
display_name=_("Issue Open Badges"),
help=_(
"Issue Open Badges badges for this course. Badges are generated when certificates are created."
),
scope=Scope.settings,
default=True
)
## Course level Certificate Name overrides.
cert_name_short = String(
help=_(
"Use this setting only when generating PDF certificates. "
"Between quotation marks, enter the short name of the course to use on the certificate that "
"students receive when they complete the course."
),
display_name=_("Certificate Name (Short)"),
scope=Scope.settings,
default=""
)
cert_name_long = String(
help=_(
"Use this setting only when generating PDF certificates. "
"Between quotation marks, enter the long name of the course to use on the certificate that students "
"receive when they complete the course."
),
display_name=_("Certificate Name (Long)"),
scope=Scope.settings,
default=""
)
cert_html_view_enabled = Boolean(
display_name=_("Certificate Web/HTML View Enabled"),
help=_("If true, certificate Web/HTML views are enabled for the course."),
scope=Scope.settings,
default=False,
)
cert_html_view_overrides = Dict(
# Translators: This field is the container for course-specific certifcate configuration values
display_name=_("Certificate Web/HTML View Overrides"),
# Translators: These overrides allow for an alternative configuration of the certificate web view
help=_("Enter course-specific overrides for the Web/HTML template parameters here (JSON format)"),
scope=Scope.settings,
)
# Specific certificate information managed via Studio (should eventually fold other cert settings into this)
certificates = Dict(
# Translators: This field is the container for course-specific certifcate configuration values
display_name=_("Certificate Configuration"),
# Translators: These overrides allow for an alternative configuration of the certificate web view
help=_("Enter course-specific configuration information here (JSON format)"),
scope=Scope.settings,
)
# An extra property is used rather than the wiki_slug/number because
# there are courses that change the number for different runs. This allows
# courses to share the same css_class across runs even if they have
# different numbers.
#
# TODO get rid of this as soon as possible or potentially build in a robust
# way to add in course-specific styling. There needs to be a discussion
# about the right way to do this, but arjun will address this ASAP. Also
# note that the courseware template needs to change when this is removed.
css_class = String(
display_name=_("CSS Class for Course Reruns"),
help=_("Allows courses to share the same css class across runs even if they have different numbers."),
scope=Scope.settings, default="",
deprecated=True
)
# TODO: This is a quick kludge to allow CS50 (and other courses) to
# specify their own discussion forums as external links by specifying a
# "discussion_link" in their policy JSON file. This should later get
# folded in with Syllabus, Course Info, and additional Custom tabs in a
# more sensible framework later.
discussion_link = String(
display_name=_("Discussion Forum External Link"),
help=_("Allows specification of an external link to replace discussion forums."),
scope=Scope.settings,
deprecated=True
)
# TODO: same as above, intended to let internal CS50 hide the progress tab
# until we get grade integration set up.
# Explicit comparison to True because we always want to return a bool.
hide_progress_tab = Boolean(
display_name=_("Hide Progress Tab"),
help=_("Allows hiding of the progress tab."),
scope=Scope.settings,
deprecated=True
)
display_organization = String(
display_name=_("Course Organization Display String"),
help=_(
"Enter the course organization that you want to appear in the courseware. This setting overrides the "
"organization that you entered when you created the course. To use the organization that you entered "
"when you created the course, enter null."
),
scope=Scope.settings
)
display_coursenumber = String(
display_name=_("Course Number Display String"),
help=_(
"Enter the course number that you want to appear in the courseware. This setting overrides the course "
"number that you entered when you created the course. To use the course number that you entered when "
"you created the course, enter null."
),
scope=Scope.settings
)
max_student_enrollments_allowed = Integer(
display_name=_("Course Maximum Student Enrollment"),
help=_(
"Enter the maximum number of students that can enroll in the course. To allow an unlimited number of "
"students, enter null."
),
scope=Scope.settings
)
allow_public_wiki_access = Boolean(
display_name=_("Allow Public Wiki Access"),
help=_(
"Enter true or false. If true, edX users can view the course wiki even "
"if they're not enrolled in the course."
),
default=False,
scope=Scope.settings
)
invitation_only = Boolean(
display_name=_("Invitation Only"),
help=_("Whether to restrict enrollment to invitation by the course staff."),
default=False,
scope=Scope.settings
)
course_survey_name = String(
display_name=_("Pre-Course Survey Name"),
help=_("Name of SurveyForm to display as a pre-course survey to the user."),
default=None,
scope=Scope.settings,
deprecated=True
)
course_survey_required = Boolean(
display_name=_("Pre-Course Survey Required"),
help=_(
"Specify whether students must complete a survey before they can view your course content. If you "
"set this value to true, you must add a name for the survey to the Course Survey Name setting above."
),
default=False,
scope=Scope.settings,
deprecated=True
)
catalog_visibility = String(
display_name=_("Course Visibility In Catalog"),
help=_(
"Defines the access permissions for showing the course in the course catalog. This can be set to one "
"of three values: 'both' (show in catalog and allow access to about page), 'about' (only allow access "
"to about page), 'none' (do not show in catalog and do not allow access to an about page)."
),
default=CATALOG_VISIBILITY_CATALOG_AND_ABOUT,
scope=Scope.settings,
values=[
{"display_name": _("Both"), "value": CATALOG_VISIBILITY_CATALOG_AND_ABOUT},
{"display_name": _("About"), "value": CATALOG_VISIBILITY_ABOUT},
{"display_name": _("None"), "value": CATALOG_VISIBILITY_NONE}]
)
entrance_exam_enabled = Boolean(
display_name=_("Entrance Exam Enabled"),
help=_(
"Specify whether students must complete an entrance exam before they can view your course content. "
"Note, you must enable Entrance Exams for this course setting to take effect."
),
default=False,
scope=Scope.settings,
)
entrance_exam_minimum_score_pct = Float(
display_name=_("Entrance Exam Minimum Score (%)"),
help=_(
"Specify a minimum percentage score for an entrance exam before students can view your course content. "
"Note, you must enable Entrance Exams for this course setting to take effect."
),
default=65,
scope=Scope.settings,
)
entrance_exam_id = String(
display_name=_("Entrance Exam ID"),
help=_("Content module identifier (location) of entrance exam."),
default=None,
scope=Scope.settings,
)
social_sharing_url = String(
display_name=_("Social Media Sharing URL"),
help=_(
"If dashboard social sharing and custom course URLs are enabled, you can provide a URL "
"(such as the URL to a course About page) that social media sites can link to. URLs must "
"be fully qualified. For example: http://www.edx.org/course/Introduction-to-MOOCs-ITM001"
),
default=None,
scope=Scope.settings,
)
language = String(
display_name=_("Course Language"),
help=_("Specify the language of your course."),
default=None,
scope=Scope.settings
)
teams_configuration = Dict(
display_name=_("Teams Configuration"),
help=_(
"Enter configuration for the teams feature. Expects two entries: max_team_size and topics, where "
"topics is a list of topics."
),
scope=Scope.settings,
deprecated=True, # Deprecated until the teams feature is made generally available
)
enable_proctored_exams = Boolean(
display_name=_("Enable Proctored Exams"),
help=_(
"Enter true or false. If this value is true, timed and proctored exams are enabled in your course."
),
default=False,
scope=Scope.settings
)
minimum_grade_credit = Float(
display_name=_("Minimum Grade for Credit"),
help=_(
"The minimum grade that a learner must earn to receive credit in the course, "
"as a decimal between 0.0 and 1.0. For example, for 75%, enter 0.75."
),
default=0.8,
scope=Scope.settings,
)
class CourseModule(CourseFields, SequenceModule): # pylint: disable=abstract-method
"""
The CourseDescriptor needs its module_class to be a SequenceModule, but some code that
expects a CourseDescriptor to have all its fields can fail if it gets a SequenceModule instead.
This class is to make sure that all the fields are present in all cases.
"""
class CourseDescriptor(CourseFields, SequenceDescriptor, LicenseMixin):
"""
The descriptor for the course XModule
"""
module_class = CourseModule
def __init__(self, *args, **kwargs):
"""
Expects the same arguments as XModuleDescriptor.__init__
"""
super(CourseDescriptor, self).__init__(*args, **kwargs)
_ = self.runtime.service(self, "i18n").ugettext
if self.wiki_slug is None:
self.wiki_slug = self.location.course
if self.due_date_display_format is None and self.show_timezone is False:
# For existing courses with show_timezone set to False (and no due_date_display_format specified),
# set the due_date_display_format to what would have been shown previously (with no timezone).
# Then remove show_timezone so that if the user clears out the due_date_display_format,
# they get the default date display.
self.due_date_display_format = "DATE_TIME"
delattr(self, 'show_timezone')
# NOTE: relies on the modulestore to call set_grading_policy() right after
# init. (Modulestore is in charge of figuring out where to load the policy from)
# NOTE (THK): This is a last-minute addition for Fall 2012 launch to dynamically
# disable the syllabus content for courses that do not provide a syllabus
if self.system.resources_fs is None:
self.syllabus_present = False
else:
self.syllabus_present = self.system.resources_fs.exists(path('syllabus'))
self._grading_policy = {}
self.set_grading_policy(self.grading_policy)
if self.discussion_topics == {}:
self.discussion_topics = {_('General'): {'id': self.location.html_id()}}
if not getattr(self, "tabs", []):
CourseTabList.initialize_default(self)
def set_grading_policy(self, course_policy):
"""
The JSON object can have the keys GRADER and GRADE_CUTOFFS. If either is
missing, it reverts to the default.
"""
if course_policy is None:
course_policy = {}
# Load the global settings as a dictionary
grading_policy = self.grading_policy
# BOY DO I HATE THIS grading_policy CODE ACROBATICS YET HERE I ADD MORE (dhm)--this fixes things persisted w/
# defective grading policy values (but not None)
if 'GRADER' not in grading_policy:
grading_policy['GRADER'] = CourseFields.grading_policy.default['GRADER']
if 'GRADE_CUTOFFS' not in grading_policy:
grading_policy['GRADE_CUTOFFS'] = CourseFields.grading_policy.default['GRADE_CUTOFFS']
# Override any global settings with the course settings
grading_policy.update(course_policy)
# Here is where we should parse any configurations, so that we can fail early
# Use setters so that side effecting to .definitions works
self.raw_grader = grading_policy['GRADER'] # used for cms access
self.grade_cutoffs = grading_policy['GRADE_CUTOFFS']
@classmethod
def read_grading_policy(cls, paths, system):
"""Load a grading policy from the specified paths, in order, if it exists."""
# Default to a blank policy dict
policy_str = '{}'
for policy_path in paths:
if not system.resources_fs.exists(policy_path):
continue
log.debug("Loading grading policy from {0}".format(policy_path))
try:
with system.resources_fs.open(policy_path) as grading_policy_file:
policy_str = grading_policy_file.read()
# if we successfully read the file, stop looking at backups
break
except IOError:
msg = "Unable to load course settings file from '{0}'".format(policy_path)
log.warning(msg)
return policy_str
@classmethod
def from_xml(cls, xml_data, system, id_generator):
instance = super(CourseDescriptor, cls).from_xml(xml_data, system, id_generator)
# bleh, have to parse the XML here to just pull out the url_name attribute
# I don't think it's stored anywhere in the instance.
course_file = StringIO(xml_data.encode('ascii', 'ignore'))
xml_obj = etree.parse(course_file, parser=edx_xml_parser).getroot()
policy_dir = None
url_name = xml_obj.get('url_name', xml_obj.get('slug'))
if url_name:
policy_dir = 'policies/' + url_name
# Try to load grading policy
paths = ['grading_policy.json']
if policy_dir:
paths = [policy_dir + '/grading_policy.json'] + paths
try:
policy = json.loads(cls.read_grading_policy(paths, system))
except ValueError:
system.error_tracker("Unable to decode grading policy as json")
policy = {}
# now set the current instance. set_grading_policy() will apply some inheritance rules
instance.set_grading_policy(policy)
return instance
@classmethod
def definition_from_xml(cls, xml_object, system):
textbooks = []
for textbook in xml_object.findall("textbook"):
textbooks.append((textbook.get('title'), textbook.get('book_url')))
xml_object.remove(textbook)
# Load the wiki tag if it exists
wiki_slug = None
wiki_tag = xml_object.find("wiki")
if wiki_tag is not None:
wiki_slug = wiki_tag.attrib.get("slug", default=None)
xml_object.remove(wiki_tag)
definition, children = super(CourseDescriptor, cls).definition_from_xml(xml_object, system)
definition['textbooks'] = textbooks
definition['wiki_slug'] = wiki_slug
# load license if it exists
definition = LicenseMixin.parse_license_from_xml(definition, xml_object)
return definition, children
def definition_to_xml(self, resource_fs):
xml_object = super(CourseDescriptor, self).definition_to_xml(resource_fs)
if len(self.textbooks) > 0:
textbook_xml_object = etree.Element('textbook')
for textbook in self.textbooks:
textbook_xml_object.set('title', textbook.title)
textbook_xml_object.set('book_url', textbook.book_url)
xml_object.append(textbook_xml_object)
if self.wiki_slug is not None:
wiki_xml_object = etree.Element('wiki')
wiki_xml_object.set('slug', self.wiki_slug)
xml_object.append(wiki_xml_object)
# handle license specifically. Default the course to have a license
# of "All Rights Reserved", if a license is not explicitly set.
self.add_license_to_xml(xml_object, default="all-rights-reserved")
return xml_object
def has_ended(self):
"""
Returns True if the current time is after the specified course end date.
Returns False if there is no end date specified.
"""
return course_metadata_utils.has_course_ended(self.end)
def may_certify(self):
"""
Return whether it is acceptable to show the student a certificate download link.
"""
return course_metadata_utils.may_certify_for_course(
self.certificates_display_behavior,
self.certificates_show_before_end,
self.has_ended()
)
def has_started(self):
return course_metadata_utils.has_course_started(self.start)
@property
def grader(self):
return grader_from_conf(self.raw_grader)
@property
def raw_grader(self):
# force the caching of the xblock value so that it can detect the change
# pylint: disable=pointless-statement
self.grading_policy['GRADER']
return self._grading_policy['RAW_GRADER']
@raw_grader.setter
def raw_grader(self, value):
# NOTE WELL: this change will not update the processed graders. If we need that, this needs to call grader_from_conf
self._grading_policy['RAW_GRADER'] = value
self.grading_policy['GRADER'] = value
@property
def grade_cutoffs(self):
return self._grading_policy['GRADE_CUTOFFS']
@grade_cutoffs.setter
def grade_cutoffs(self, value):
self._grading_policy['GRADE_CUTOFFS'] = value
# XBlock fields don't update after mutation
policy = self.grading_policy
policy['GRADE_CUTOFFS'] = value
self.grading_policy = policy
@property
def lowest_passing_grade(self):
return min(self._grading_policy['GRADE_CUTOFFS'].values())
@property
def is_cohorted(self):
"""
Return whether the course is cohorted.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
config = self.cohort_config
if config is None:
return False
return bool(config.get("cohorted"))
@property
def auto_cohort(self):
"""
Return whether the course is auto-cohorted.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
if not self.is_cohorted:
return False
return bool(self.cohort_config.get(
"auto_cohort", False))
@property
def auto_cohort_groups(self):
"""
Return the list of groups to put students into. Returns [] if not
specified. Returns specified list even if is_cohorted and/or auto_cohort are
false.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
if self.cohort_config is None:
return []
else:
return self.cohort_config.get("auto_cohort_groups", [])
@property
def top_level_discussion_topic_ids(self):
"""
Return list of topic ids defined in course policy.
"""
topics = self.discussion_topics
return [d["id"] for d in topics.values()]
@property
def cohorted_discussions(self):
"""
Return the set of discussions that is explicitly cohorted. It may be
the empty set. Note that all inline discussions are automatically
cohorted based on the course's is_cohorted setting.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
config = self.cohort_config
if config is None:
return set()
return set(config.get("cohorted_discussions", []))
@property
def always_cohort_inline_discussions(self):
"""
This allow to change the default behavior of inline discussions cohorting. By
setting this to False, all inline discussions are non-cohorted unless their
ids are specified in cohorted_discussions.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
config = self.cohort_config
if config is None:
return True
return bool(config.get("always_cohort_inline_discussions", True))
@property
def is_newish(self):
"""
Returns if the course has been flagged as new. If
there is no flag, return a heuristic value considering the
announcement and the start dates.
"""
flag = self.is_new
if flag is None:
# Use a heuristic if the course has not been flagged
announcement, start, now = self._sorting_dates()
if announcement and (now - announcement).days < 30:
# The course has been announced for less that month
return True
elif (now - start).days < 1:
# The course has not started yet
return True
else:
return False
elif isinstance(flag, basestring):
return flag.lower() in ['true', 'yes', 'y']
else:
return bool(flag)
@property
def sorting_score(self):
"""
Returns a tuple that can be used to sort the courses according
the how "new" they are. The "newness" score is computed using a
heuristic that takes into account the announcement and
(advertized) start dates of the course if available.
The lower the number the "newer" the course.
"""
# Make courses that have an announcement date shave a lower
# score than courses than don't, older courses should have a
# higher score.
announcement, start, now = self._sorting_dates()
scale = 300.0 # about a year
if announcement:
days = (now - announcement).days
score = -exp(-days / scale)
else:
days = (now - start).days
score = exp(days / scale)
return score
def _sorting_dates(self):
# utility function to get datetime objects for dates used to
# compute the is_new flag and the sorting_score
announcement = self.announcement
if announcement is not None:
announcement = announcement
try:
start = dateutil.parser.parse(self.advertised_start)
if start.tzinfo is None:
start = start.replace(tzinfo=UTC())
except (ValueError, AttributeError):
start = self.start
now = datetime.now(UTC())
return announcement, start, now
@lazy
def grading_context(self):
"""
This returns a dictionary with keys necessary for quickly grading
a student. They are used by grades.grade()
The grading context has two keys:
graded_sections - This contains the sections that are graded, as
well as all possible children modules that can affect the
grading. This allows some sections to be skipped if the student
hasn't seen any part of it.
The format is a dictionary keyed by section-type. The values are
arrays of dictionaries containing
"section_descriptor" : The section descriptor
"xmoduledescriptors" : An array of xmoduledescriptors that
could possibly be in the section, for any student
all_descriptors - This contains a list of all xmodules that can
effect grading a student. This is used to efficiently fetch
all the xmodule state for a FieldDataCache without walking
the descriptor tree again.
"""
# If this descriptor has been bound to a student, return the corresponding
# XModule. If not, just use the descriptor itself
try:
module = getattr(self, '_xmodule', None)
if not module:
module = self
except UndefinedContext:
module = self
def possibly_scored(usage_key):
"""Can this XBlock type can have a score or children?"""
return usage_key.block_type in self.block_types_affecting_grading
all_descriptors = []
graded_sections = {}
def yield_descriptor_descendents(module_descriptor):
for child in module_descriptor.get_children(usage_key_filter=possibly_scored):
yield child
for module_descriptor in yield_descriptor_descendents(child):
yield module_descriptor
for chapter in self.get_children():
for section in chapter.get_children():
if section.graded:
xmoduledescriptors = list(yield_descriptor_descendents(section))
xmoduledescriptors.append(section)
# The xmoduledescriptors included here are only the ones that have scores.
section_description = {
'section_descriptor': section,
'xmoduledescriptors': [child for child in xmoduledescriptors if child.has_score]
}
section_format = section.format if section.format is not None else ''
graded_sections[section_format] = graded_sections.get(section_format, []) + [section_description]
all_descriptors.extend(xmoduledescriptors)
all_descriptors.append(section)
return {'graded_sections': graded_sections,
'all_descriptors': all_descriptors, }
@lazy
def block_types_affecting_grading(self):
"""Return all block types that could impact grading (i.e. scored, or having children)."""
return frozenset(
cat for (cat, xblock_class) in XBlock.load_classes() if (
getattr(xblock_class, 'has_score', False) or getattr(xblock_class, 'has_children', False)
)
)
@staticmethod
def make_id(org, course, url_name):
return '/'.join([org, course, url_name])
@property
def id(self):
"""Return the course_id for this course"""
return self.location.course_key
def start_datetime_text(self, format_string="SHORT_DATE"):
"""
Returns the desired text corresponding the course's start date and time in UTC. Prefers .advertised_start,
then falls back to .start
"""
i18n = self.runtime.service(self, "i18n")
return course_metadata_utils.course_start_datetime_text(
self.start,
self.advertised_start,
format_string,
i18n.ugettext,
i18n.strftime
)
@property
def start_date_is_still_default(self):
"""
Checks if the start date set for the course is still default, i.e. .start has not been modified,
and .advertised_start has not been set.
"""
return course_metadata_utils.course_start_date_is_default(
self.start,
self.advertised_start
)
def end_datetime_text(self, format_string="SHORT_DATE"):
"""
Returns the end date or date_time for the course formatted as a string.
"""
return course_metadata_utils.course_end_datetime_text(
self.end,
format_string,
self.runtime.service(self, "i18n").strftime
)
def get_discussion_blackout_datetimes(self):
"""
Get a list of dicts with start and end fields with datetime values from
the discussion_blackouts setting
"""
date_proxy = Date()
try:
ret = [
{"start": date_proxy.from_json(start), "end": date_proxy.from_json(end)}
for start, end
in filter(None, self.discussion_blackouts)
]
for blackout in ret:
if not blackout["start"] or not blackout["end"]:
raise ValueError
return ret
except (TypeError, ValueError):
log.exception(
"Error parsing discussion_blackouts %s for course %s",
self.discussion_blackouts,
self.id
)
return []
@property
def forum_posts_allowed(self):
"""
Return whether forum posts are allowed by the discussion_blackouts
setting
"""
blackouts = self.get_discussion_blackout_datetimes()
now = datetime.now(UTC())
for blackout in blackouts:
if blackout["start"] <= now <= blackout["end"]:
return False
return True
@property
def number(self):
"""
Returns this course's number.
This is a "number" in the sense of the "course numbers" that you see at
lots of universities. For example, given a course
"Intro to Computer Science" with the course key "edX/CS-101/2014", the
course number would be "CS-101"
"""
return course_metadata_utils.number_for_course_location(self.location)
@property
def display_number_with_default(self):
"""
Return a display course number if it has been specified, otherwise return the 'course' that is in the location
"""
if self.display_coursenumber:
return self.display_coursenumber
return self.number
@property
def org(self):
return self.location.org
@property
def display_org_with_default(self):
"""
Return a display organization if it has been specified, otherwise return the 'org' that is in the location
"""
if self.display_organization:
return self.display_organization
return self.org
@property
def video_pipeline_configured(self):
"""
Returns whether the video pipeline advanced setting is configured for this course.
"""
return (
self.video_upload_pipeline is not None and
'course_video_upload_token' in self.video_upload_pipeline
)
def clean_id(self, padding_char='='):
"""
Returns a unique deterministic base32-encoded ID for the course.
The optional padding_char parameter allows you to override the "=" character used for padding.
"""
return course_metadata_utils.clean_course_key(self.location.course_key, padding_char)
@property
def teams_enabled(self):
"""
Returns whether or not teams has been enabled for this course.
Currently, teams are considered enabled when at least one topic has been configured for the course.
"""
if self.teams_configuration:
return len(self.teams_configuration.get('topics', [])) > 0
return False
@property
def teams_max_size(self):
"""
Returns the max size for teams if teams has been configured, else None.
"""
return self.teams_configuration.get('max_team_size', None)
@property
def teams_topics(self):
"""
Returns the topics that have been configured for teams for this course, else None.
"""
return self.teams_configuration.get('topics', None)
def get_user_partitions_for_scheme(self, scheme):
"""
Retrieve all user partitions defined in the course for a particular
partition scheme.
Arguments:
scheme (object): The user partition scheme.
Returns:
list of `UserPartition`
"""
return [
p for p in self.user_partitions
if p.scheme == scheme
]
def set_user_partitions_for_scheme(self, partitions, scheme):
"""
Set the user partitions for a particular scheme.
Preserves partitions associated with other schemes.
Arguments:
scheme (object): The user partition scheme.
Returns:
list of `UserPartition`
"""
other_partitions = [
p for p in self.user_partitions # pylint: disable=access-member-before-definition
if p.scheme != scheme
]
self.user_partitions = other_partitions + partitions # pylint: disable=attribute-defined-outside-init
|
ahmadio/edx-platform
|
common/lib/xmodule/xmodule/course_module.py
|
Python
|
agpl-3.0
| 63,344
|
[
"VisIt"
] |
9fd74216caf30cb3789b7044f15b04c05c5f5c593008dcf07c428d78eeb2428e
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""SpineML Bundle Module
This modual will form a convience class to bundle together related SpineML
objects into a single standard object which can be easily passed between
programs. The bundle will be able to interact with premade spineML objects
through the other support classes, or parse directly from XML
TODO:
## export all as a loop through
## export each element, as a pass through
## import a project file
"""
import os
import pdb
import tempfile
import smlExperiment # SpineML layer classes
import smlNetwork
import smlComponent
class Bundle(object):
"""Bundle instances are a container class for the various spineML specifications.
Each specification is stored a list of objects.
"""
def __init__(self, experiments=None, networks=None, components=None,project_dict=None):
self.experiments = []
self.components = []
self.networks = []
self.index = {}
if type(experiments) is not type(None):
if type(experiments) is smlExperiment.SpineMLType:
self.experiments.append(experiments)
elif type(experiments) is list:
for e in experiments:
if type(e) is not smlExperiment.SpineMLType:
raise TypeError('Invalid Experiment Input: %s' % str(type(e)))
else:
self.experiments.append(e)
else:
raise TypeError('Invalid Experiment Input: %s' % str(type(experiments)))
if type(networks) is not type(None):
if type(networks) is smlNetwork.SpineMLType:
self.networks.append(networks)
elif type(networks) is list:
for n in networks:
if type(n) is not smlNetwork.SpineMLType:
raise TypeError('Invalid Network Input: %s' % str(type(n)))
else:
self.networks.append(n)
else:
raise TypeError('Invalid Network Input: %s' % str(type(networks)))
if type(components) is not type(None):
if type(components) is smlComponent.SpineMLType:
self.components.append(components)
elif type(components) is list:
for c in components:
if type(c) is not smlComponent.SpineMLType:
raise TypeError('Invalid Component Input: %s' % str(type(c)))
else:
self.components.append(c)
else:
raise TypeError('Invalid Component Input: %s' % str(type(components)))
if type(project_dict) is not type(None):
assert 'experiment' in project_dict
assert 'network' in project_dict
assert 'components' in project_dict
# set experiment
# eg: 'experiment':('emperiment0.xml','<xml content>')
print project_dict['experiment']
experiment_file, experiment_xml = project_dict['experiment']
with tempfile.NamedTemporaryFile() as temp:
temp.write(experiment_xml)
temp.flush()
temp.seek(0)
exp_obj = smlExperiment.parse(temp,True)
self.experiments.append(exp_obj)
# build up the experiment index
self.index[experiment_file] = {}
self.index[experiment_file]['experiment'] = {experiment_file:exp_obj}
# set network
# eg: 'network':('model.xml','<xml content>')
network_file, network_xml = project_dict['network']
with tempfile.NamedTemporaryFile() as temp:
temp.write(network_xml)
temp.flush()
temp.seek(0)
net_obj = smlNetwork.parse(temp,True)
self.networks.append(net_obj)
self.index[experiment_file]['network'] = {}
self.index[experiment_file]['network'][network_file] = net_obj
# set components
for component_file,component_xml in project_dict['components']:
with tempfile.NamedTemporaryFile() as temp:
temp.write(component_xml)
temp.flush()
temp.seek(0)
comp_obj = smlComponent.parse(temp,True)
self.components.append(comp_obj)
self.index[experiment_file]['component'] = {}
self.index[experiment_file]['component'][component_file] = comp_obj
def add_experiment(self, experiment,recursive=False):
"""Add a SpineML Experiment stored as SpineMLType types, to the bundle
Setting recursive=True will enable the experiment to add further subcomponents
which it accesses, such as the network file and the component file.
Adding an experiment using the recursive option also builds an index, which
may provide a more organic structure
"""
if type(experiment) is smlExperiment.SpineMLType:
self.experiments.append(experiment)
elif type(experiment) is str:
exp_obj = smlExperiment.parse(experiment,True)
self.experiments.append(exp_obj)
exp_file = os.path.basename(experiment)
# build up the experiment index
self.index[exp_file] = {}
self.index[exp_file]['experiment'] = {exp_file:exp_obj}
if recursive:
# Add the linked model files if recursive is set to true.
path = os.path.dirname(experiment) + '/'
if path == '/':
path = ''
for e in exp_obj.Experiment:
self.add_network(path+e.Model.network_layer_url,True,exp_file)
else:
raise TypeError('Invalid Experiment Input: %s' % str(type(experiment)))
def add_network(self, network,recursive=False,index=None):
"""Add a SpineML Network stored as a SpineMLType, to the bundle
When building an index recursively, pass the experiment file name as the index
"""
if type(network) is smlNetwork.SpineMLType:
self.networks.append(network)
elif type(network) is str:
net_file = os.path.basename(network)
path = os.path.dirname(network) + '/'
if path == '/':
path = ''
net_obj = smlNetwork.parse(network,True)
self.networks.append(net_obj)
if recursive:
if index is not None:
self.index[index]['network'] = {net_file:net_obj}
# Add the linked component files if recursive is set to true
for n in net_obj.Population:
self.add_component(smlComponent.parse(path + n.Neuron.url,True))
if index is not None:
self.index[index]['component'] = {n.Neuron.url:self.components[-1]}
else:
raise TypeError('Invalid Network Input %s' % str(type(network)))
def add_component(self, component):
"""Add a SpineML Component of SpineMLType type to the bundle
"""
if type(component) is smlComponent.SpineMLType:
self.components.append(component)
elif type(component) is str:
self.components.append(smlComponent.parse(component,True))
else:
raise TypeError('Invalid Component Input %s' % str(type(component)))
|
AdamRTomkins/libSpineML
|
libSpineML/smlBundle.py
|
Python
|
gpl-3.0
| 7,712
|
[
"NEURON"
] |
def63f6ee556d10c370c83a284807f0907a7a21be402286d275a1028e053ee7a
|
#
# Copyright 2022 Lucas Frérot (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Harmonic potentials for bonds and triplets."""
import numpy as np
from ase import Atoms
from ..calculator import NiceManybody
class ZeroPair(NiceManybody.G):
"""Defines a non-interacting pair potential."""
def __call__(self, r, xi, *args):
"""Return triplet energy only."""
return xi
def gradient(self, r, xi, *args):
"""Return triplet interaction only."""
return [np.zeros_like(xi), np.ones_like(xi)]
def hessian(self, r, xi, *args):
"""Zero hessian."""
return [np.zeros_like(r)] * 3
class ZeroTriplet(NiceManybody.G):
"""Defines a non-interacting triplet potential."""
def __call__(self, *args):
"""Zero triplet energy."""
return np.zeros(args[0].shape[0])
def gradient(self, *args):
"""Zero triplet force."""
return np.zeros([2] + list(args[0].shape))
def hessian(self, *args):
"""Zero triplet hessian."""
return np.zeros([3] + list(args[0].shape) + [args[0].shape[1]])
class HarmonicBond(NiceManybody.F):
"""Defines a harmonic bond."""
def __init__(self, r0, k):
"""Initialize with equilibrium distance and stiffness."""
self.r0 = r0
self.k = k
def __call__(self, r, xi, atype, ptype):
r"""Compute spring potential energy.
.. math:: E(r) = \frac{1}{2} k(r - r_0)^2 + \xi
"""
e = 0.5 * self.k * (r - self.r0)**2
e[ptype < 0] = 0 # ignore bonds from angles
return e + xi
def gradient(self, r, xi, atype, ptype):
"""Compute spring force."""
g = self.k * (r - self.r0)
g[ptype < 0] = 0
return [g, np.ones_like(xi)]
def hessian(self, r, xi, atype, ptype):
"""Compute spring stiffness."""
h = np.full_like(r, self.k)
h[ptype < 0] = 0
return [h, np.zeros_like(r), np.zeros_like(r)]
class HarmonicAngle(NiceManybody.G):
"""Defines a harmonic angle potential."""
def __init__(self, a0, k, atoms: Atoms):
"""Initialize with equilibrium angle and stiffness.
Note: atoms are needed because mics are calculated for triplet
distances. This will be removed once G is redefined to take triplet
distances instead of vectors.
"""
self.a0 = a0
self.k = k
self.atoms = atoms
def __call__(self, r_ij_c, r_ik_c, *args):
r"""Angle harmonic energy.
Define the following functional form for :math:`G`:
.. math::
E(a) & = \frac{1}{2} k(a - a_0)^2 \\
\vec{u} & = \vec{r_{ij}} \\
\vec{v} & = \vec{r_{ik}} \\
\vec{w}(\vec{u}, \vec{v}) & = \vec{r_{jk}} = \vec{v} - \vec{u} \\
f(u, v, w) & = -\frac{u^2 + w^2 - v^2}{2uw} \\
F(\vec{u}, \vec{v}) & = \frac{\vec{u}\cdot\vec{w}(\vec{u}, \vec{v})}{uw} \\
& = f(u, v, |\vec{w}(\vec{u}, \vec{v})|) \\
h(x) & = E(\arccos(x)) \\
G(\vec{u}, \vec{v}) & = h(F(\vec{u}, \vec{v})))
"""
_, (r_ij, r_ik, r_jk) = self._distance_triplet(
r_ij_c, r_ik_c, self.atoms.cell, self.atoms.pbc
)
a = np.arccos(-(r_ij**2 + r_jk**2 - r_ik**2) / (2 * r_ij * r_jk))
return 0.5 * self.k * (a - self.a0)**2
def gradient(self, r_ij_c, r_ik_c, *args):
r"""Compute derivatives of :math:`G` w/r to :math:`r_{ij}` and :math:`r_{ik}`.
We have the following partial derivatives:
.. math::
\frac{\partial G}{\partial u_i}(\vec{u}, \vec{v}) & = h'(F(\vec{u}, \vec{v})) \frac{\partial F}{\partial u_i}(\vec{u}, \vec{v}) \\
\frac{\partial G}{\partial v_i}(\vec{u}, \vec{v}) & = h'(F(\vec{u}, \vec{v})) \frac{\partial F}{\partial v_i}(\vec{u}, \vec{v}) \\
The partial derivatives of :math:`F` are expressed as:
.. math::
\frac{\partial F}{\partial u_i} = U_i & = \frac{\partial f}{\partial u}\frac{\partial u}{\partial u_i} + \frac{\partial f}{\partial w}\frac{\partial w}{\partial u_i}\\
\frac{\partial F}{\partial v_i} = V_i & = \frac{\partial f}{\partial v}\frac{\partial v}{\partial v_i} + \frac{\partial f}{\partial w}\frac{\partial w}{\partial v_i}
We note the normal vectors as:
.. math::
\bar{u}_i & = \frac{u_i}{u}\\
\bar{v}_i & = \frac{v_i}{v}\\
\bar{w}_i & = \frac{w_i}{w}
So that we can write the following partial derivatives:
.. math::
\frac{\partial u}{\partial u_i} & = \bar{u}_i\\
\frac{\partial v}{\partial v_i} & = \bar{v}_i\\
\frac{\partial w}{\partial u_i} & = -\bar{w}_i\\
\frac{\partial w}{\partial v_i} & = \bar{w}_i
Which gives the final expressions for :math:`U_i` and :math:`V_i`:
.. math::
U_i &= \frac{\partial f}{\partial u} \bar{u}_i + \frac{\partial f}{\partial w} (-\bar{w}_i)\\
V_i &= \frac{\partial f}{\partial v} \bar{v}_i + \frac{\partial f}{\partial w} \bar{w}_i
The remaining scalar partial derivatives are simple to derive and left
to the reader :P .
"""
D, d = self._distance_triplet(
r_ij_c, r_ik_c, self.atoms.cell, self.atoms.pbc
)
# Broadcast slices
_c = np.s_[:, np.newaxis]
# Mapping: u <- r_ij, v <- r_ik, w <- r_jk = |r_ik_c - r_ij_c|
u, v, w = d
# Normal vectors
nu, nv, nw = (D[i] / d[i][_c] for i in range(3))
# cos of angle
f = -(u**2 + w**2 - v**2) / (2 * u * w)
# derivatives with respect to triangle lengths
df_u = -(u**2 - w**2 + v**2) / (2 * u**2 * w)
df_w = -(w**2 - u**2 + v**2) / (2 * w**2 * u)
df_v = v / (u * w)
# Scalar derivatives
def E_(a):
return self.k * (a - self.a0) # noqa
def h_(f):
with np.errstate(divide="raise"):
d_arccos = -1 / np.sqrt(1 - f**2)
return E_(np.arccos(f)) * d_arccos
# Derivatives with respect to vectors rij and rik
dG = np.zeros([2] + list(r_ij_c.shape))
# dG_rij
dG[0] = df_u[_c] * nu + df_w[_c] * (-nw)
# dG_rik
dG[1] = df_v[_c] * nv + df_w[_c] * (+nw)
dG *= h_(f)[_c]
return dG
def hessian(self, r_ij_c, r_ik_c, *args):
r"""Compute derivatives of :math:`G` w/r to :math:`r_{ij}` and :math:`r_{ik}`.
We have the following partial derivatives:
.. math::
\frac{\partial^2 G}{\partial u_i\partial u_j}(\vec{u}, \vec{v}) & = h''(F) U_i U_j + h'(F)\frac{\partial U_i}{\partial u_j}\\
\frac{\partial^2 G}{\partial v_i\partial v_j}(\vec{u}, \vec{v}) & = h''(F) V_i V_j + h'(F)\frac{\partial V_i}{\partial v_j}\\
\frac{\partial^2 G}{\partial u_i\partial v_j}(\vec{u}, \vec{v}) & = h''(F) U_i V_j + h'(F)\frac{\partial U_i}{\partial v_j}
The derivatives of :math:`U_i` and :math:`V_i` need careful treatment:
.. math::
\frac{\partial U_i}{\partial u_j} = \frac{\partial}{\partial u_j}\left(\frac{\partial f}{\partial u}(u, v, w(\vec{u}, \vec{v}))\right) \frac{\partial u}{\partial u_i} + \frac{\partial f}{\partial u}\frac{\partial^2 u}{\partial u_i\partial u_j} + \frac{\partial}{\partial u_j}\left(\frac{\partial f}{\partial w}(u, v, w(\vec{u}, \vec{v}))\right) \frac{\partial w}{\partial u_i} + \frac{\partial f}{\partial w} \frac{\partial^2 w}{\partial u_i\partial u_j}\\
\frac{\partial V_i}{\partial v_j} = \frac{\partial}{\partial v_j}\left(\frac{\partial f}{\partial v}(u, v, w(\vec{u}, \vec{v}))\right) \frac{\partial v}{\partial v_i} + \frac{\partial f}{\partial v}\frac{\partial^2 v}{\partial v_i\partial v_j} + \frac{\partial}{\partial v_j}\left(\frac{\partial f}{\partial w}(u, v, w(\vec{u}, \vec{v}))\right) \frac{\partial w}{\partial v_i} + \frac{\partial f}{\partial w} \frac{\partial^2 w}{\partial v_i\partial v_j}\\
\frac{\partial U_i}{\partial v_j} = \frac{\partial}{\partial v_j}\left(\frac{\partial f}{\partial u}(u, v, w(\vec{u}, \vec{v}))\right) \frac{\partial u}{\partial u_i} + \frac{\partial f}{\partial u}\frac{\partial^2 u}{\partial u_i\partial v_j} + \frac{\partial}{\partial v_j}\left(\frac{\partial f}{\partial w}(u, v, w(\vec{u}, \vec{v}))\right) \frac{\partial w}{\partial u_i} + \frac{\partial f}{\partial w} \frac{\partial^2 w}{\partial u_i\partial v_j}
For the simple partial derivatives in the above section, we have:
.. math::
\frac{\partial^2 u}{\partial u_i\partial u_j} & = \bar{\bar{u}}_{ij} = \frac{\delta_{ij} - \bar{u}_i \bar{u}_j}{u}\\
\frac{\partial^2 v}{\partial v_i\partial v_j} & = \bar{\bar{u}}_{ij} = \frac{\delta_{ij} - \bar{v}_i \bar{v}_j}{v}\\
\frac{\partial^2 u}{\partial u_i\partial v_j} & = 0\\
\frac{\partial^2 w}{\partial u_i\partial u_j} & = \bar{\bar{w}}_{ij} = \frac{\delta_{ij} - \bar{w}_i \bar{w}_j}{w}\\
\frac{\partial^2 w}{\partial v_i\partial v_j} & = \bar{\bar{w}}_{ij}\\
\frac{\partial^2 w}{\partial u_i\partial v_j} & = -\bar{\bar{w}}_{ij}
For the more complex partial derivatives:
.. math::
\frac{\partial}{\partial u_j}\left(\frac{\partial f}{\partial u}(u, v, w(\vec{u}, \vec{v}))\right) & = \frac{\partial^2 f}{\partial u^2} \frac{\partial u}{\partial u_j} + \frac{\partial^2 f}{\partial u\partial w}\frac{\partial w}{\partial u_j}\\
\frac{\partial}{\partial u_j}\left(\frac{\partial f}{\partial w}(u, v, w(\vec{u}, \vec{v}))\right) & = \frac{\partial^2 f}{\partial w\partial u} \frac{\partial u}{\partial u_j} + \frac{\partial^2 f}{\partial w^2}\frac{\partial w}{\partial u_j}\\
\frac{\partial}{\partial v_j}\left(\frac{\partial f}{\partial v}(u, v, w(\vec{u}, \vec{v}))\right) & = \frac{\partial^2 f}{\partial v^2} \frac{\partial v}{\partial v_j} + \frac{\partial^2 f}{\partial v\partial w}\frac{\partial w}{\partial v_j}\\
\frac{\partial}{\partial v_j}\left(\frac{\partial f}{\partial w}(u, v, w(\vec{u}, \vec{v}))\right) & = \frac{\partial^2 f}{\partial w\partial v} \frac{\partial v}{\partial v_j} + \frac{\partial^2 f}{\partial w^2}\frac{\partial w}{\partial v_j}\\
\frac{\partial}{\partial v_j}\left(\frac{\partial f}{\partial u}(u, v, w(\vec{u}, \vec{v}))\right) & = \frac{\partial^2 f}{\partial u\partial v} \frac{\partial v}{\partial v_j} + \frac{\partial^2 f}{\partial u\partial w}\frac{\partial w}{\partial v_j}\\
The remaining scalar derivatives are left to the reader.
"""
D, d = self._distance_triplet(
r_ij_c, r_ik_c, self.atoms.cell, self.atoms.pbc
)
# Utilities
_c = np.s_[:, np.newaxis]
_cc = np.s_[:, np.newaxis, np.newaxis]
_o = lambda u, v: np.einsum('...i,...j', u, v, optimize=True) # noqa
# Scalar functions
dE = lambda a: self.k * (a - self.a0) # Force
ddE = lambda a: self.k # Stiffness
arccos = np.arccos
darccos = lambda x: -1 / np.sqrt(1 - x**2)
ddarccos = lambda x: -x / (1 - x**2)**(3/2)
dh = lambda f: dE(arccos(f)) * darccos(f)
ddh = lambda f: (
ddE(arccos(f)) * darccos(f) * darccos(f)
+ dE(arccos(f)) * ddarccos(f)
)
# Mapping: u <- r_ij, v <- r_ik, w <- r_jk = |r_ik_c - r_ij_c|
u, v, w = d
# Normal vectors
nu, nv, nw = (D[i] / d[i][_c] for i in range(3))
# Outer products
nunu, nvnv, nwnw = (_o(n, n) for n in (nu, nv, nw))
# Normal tensors
Id = np.eye(3)[np.newaxis, :]
nnu, nnv, nnw = ((Id - o) / d[i][_cc]
for i, o in enumerate((nunu, nvnv, nwnw)))
# cos of angle
f = -(u**2 + w**2 - v**2) / (2 * u * w)
# derivatives with respect to triangle lengths
df_u = -(u**2 - w**2 + v**2) / (2 * u**2 * w)
df_w = -(w**2 - u**2 + v**2) / (2 * w**2 * u)
df_v = v / (u * w)
# second derivatives
ddf_uu = (v**2 - w**2) / (u**3 * w)
ddf_ww = (v**2 - u**2) / (w**3 * u)
ddf_vv = 1 / (u * w)
ddf_uv = -v / (u**2 * w)
ddf_uw = (u**2 + w**2 + v**2) / (2 * u**2 * w**2)
ddf_vw = -v / (w**2 * u)
# Compond derivatives w/r to vectors
U = df_u[_c] * nu + df_w[_c] * (-nw)
V = df_v[_c] * nv + df_w[_c] * (+nw)
# Second derivatives w/r to vectors
dU_u = (
_o(nu, ddf_uu[_c] * nu + ddf_uw[_c] * (-nw))
+ df_u[_cc] * nnu
+ _o(-nw, ddf_uw[_c] * nu + ddf_ww[_c] * (-nw))
+ df_w[_cc] * nnw
)
dV_v = (
_o(nv, ddf_vv[_c] * nv + ddf_vw[_c] * nw)
+ df_v[_cc] * nnv
+ _o(nw, ddf_vw[_c] * nv + ddf_ww[_c] * nw)
+ df_w[_cc] * nnw
)
dU_v = (
_o(nu, ddf_uv[_c] * nv + ddf_uw[_c] * nw)
+ _o(-nw, ddf_vw[_c] * nv + ddf_ww[_c] * nw)
+ df_w[_cc] * (-nnw)
)
# Scalar parts
dh = dh(f)
ddh = ddh(f)
# Defining full derivatives
ddG = np.zeros([3, r_ij_c.shape[0], r_ij_c.shape[1], r_ij_c.shape[1]])
ddG[0] = ddh[_cc] * _o(U, U) + dh[_cc] * dU_u
ddG[1] = ddh[_cc] * _o(V, V) + dh[_cc] * dV_v
ddG[2] = ddh[_cc] * _o(U, V) + dh[_cc] * dU_v
return ddG
|
libAtoms/matscipy
|
matscipy/calculators/manybody/explicit_forms/harmonic.py
|
Python
|
lgpl-2.1
| 14,227
|
[
"ASE",
"Matscipy"
] |
730b4ff371fbc738573af222b00f4b97e5a37e7a06f610a2031684d9be070a0a
|
import numpy as np
from matplotlib import pyplot
import rft1d
eps = np.finfo(float).eps
def here_anova1(Y, X, X0, Xi, X0i, df):
Y = np.matrix(Y)
### estimate parameters:
b = Xi*Y
eij = Y - X*b
R = eij.T*eij
### reduced design:
b0 = X0i*Y
eij0 = Y - X0*b0
R0 = eij0.T*eij0
### compute F statistic:
F = ((np.diag(R0)-np.diag(R))/df[0]) / (np.diag(R+eps)/df[1])
return F
def here_design_matrices(nResponses, nGroups):
nTotal = sum(nResponses)
X = np.zeros((nTotal,nGroups))
i0 = 0
for i,n in enumerate(nResponses):
X[i0:i0+n,i] = 1
i0 += n
X = np.matrix(X)
X0 = np.matrix(np.ones(nTotal)).T #reduced design matrix
Xi,X0i = np.linalg.pinv(X), np.linalg.pinv(X0) #pseudo-inverses
return X,X0,Xi,X0i
#(0) Set parameters:
np.random.seed(123456789)
nResponses = 6,8,9 #number of responses in each group
nNodes = 101
FWHM = 12.0
nIterations = 5000
### derived parameters:
nGroups = len(nResponses)
nTotal = sum(nResponses)
df = nGroups-1, nTotal-nGroups
X,X0,Xi,X0i = here_design_matrices(nResponses, nGroups)
#(1) Generate Gaussian 1D fields, compute test stat, store field maximum:
F = []
generator = rft1d.random.Generator1D(nTotal, nNodes, FWHM)
for i in range(nIterations):
y = generator.generate_sample()
f = here_anova1(y, X, X0, Xi, X0i, df)
F.append( f.max() )
F = np.asarray(F)
#(2) Survival functions:
heights = np.linspace(6, 14, 21)
sf = np.array( [ (F>h).mean() for h in heights] )
sfE = rft1d.f.sf(heights, df, nNodes, FWHM) #theoretical
sf0D = rft1d.f.sf0d(heights, df) #theoretical (0D)
#(3) Plot results:
pyplot.close('all')
ax = pyplot.axes()
ax.plot(heights, sf, 'o', label='Simulated')
ax.plot(heights, sfE, '-', label='Theoretical')
ax.plot(heights, sf0D, 'r-', label='Theoretical (0D)')
ax.set_xlabel('$u$', size=20)
ax.set_ylabel('$P (F_\mathrm{max} > u)$', size=20)
ax.legend()
ax.set_title('ANOVA validation (1D)', size=20)
pyplot.show()
|
0todd0000/rft1d
|
rft1d/examples/val_max_4_anova1_1d.py
|
Python
|
gpl-3.0
| 2,110
|
[
"Gaussian"
] |
8b24e7bfa2ca7466e7ab335a7cf487150d18754dfdf80a693af35823d2154ce9
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010--2014 Nico Schlömer
#
# This file is part of matplotlib2tikz.
#
# matplotlib2tikz is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# matplotlib2tikz is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# matplotlib2tikz. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as pp
def basic_sin():
from mpltools import style
style.use('ggplot')
t = np.arange(0.0, 2.0, 0.1)
s = np.sin(2*np.pi*t)
s2 = np.cos(2*np.pi*t)
pp.plot(t, s, 'o-', lw=4.1)
pp.plot(t, s2, 'o-', lw=4.1)
pp.xlabel('time(s)')
#pp.xlabel('time(s) _ % $ \\')
pp.ylabel('Voltage (mV)')
pp.title('Easier than easy $\\frac{1}{2}$')
pp.grid(True)
return 'Simple $\sin$ plot with some labels'
def subplots():
def f(t):
s1 = np.cos(2*np.pi*t)
e1 = np.exp(-t)
return np.multiply(s1, e1)
t1 = np.arange(0.0, 5.0, 0.1)
t2 = np.arange(0.0, 5.0, 0.02)
t3 = np.arange(0.0, 2.0, 0.01)
pp.subplot(211)
pp.plot(t1, f(t1), 'bo', t2, f(t2), 'k--', markerfacecolor='green')
pp.grid(True)
pp.title('A tale of 2 subplots')
pp.ylabel('Damped oscillation')
pp.subplot(212)
pp.plot(t3, np.cos(2*np.pi*t3), 'r.')
pp.grid(True)
pp.xlabel('time (s)')
pp.ylabel('Undamped')
return 'Two subplots on top of each other'
def image_plot():
from matplotlib import rcParams
try:
import Image
except ImportError:
raise SystemExit('PIL must be installed to run this example')
lena = Image.open('lena.png')
dpi = rcParams['figure.dpi']
figsize = lena.size[0]/dpi, lena.size[1]/dpi
pp.figure(figsize=figsize)
ax = pp.axes([0, 0, 1, 1], frameon=False)
ax.set_axis_off()
pp.imshow(lena, origin='lower')
# Set the current color map to HSV.
pp.hsv()
pp.colorbar()
return 'An \\texttt{imshow} plot'
def noise():
from numpy.random import randn
# Make plot with vertical (default) colorbar
fig = pp.figure()
ax = fig.add_subplot(111)
data = np.clip(randn(250, 250), -1, 1)
cax = ax.imshow(data, interpolation='nearest')
ax.set_title('Gaussian noise with vertical colorbar')
# Add colorbar, make sure to specify tick locations
# to match desired ticklabels.
cbar = fig.colorbar(cax, ticks=[-1, 0, 1])
# vertically oriented colorbar
cbar.ax.set_yticklabels(['< -1', '0', '> 1'])
# Make plot with horizontal colorbar
fig = pp.figure()
ax = fig.add_subplot(111)
data = np.clip(np.random.randn(250, 250), -1, 1)
cax = ax.imshow(data, interpolation='nearest')
ax.set_title('Gaussian noise with horizontal colorbar')
cbar = fig.colorbar(cax, ticks=[-1, 0, 1], orientation='horizontal')
# horizontal colorbar
cbar.ax.set_xticklabels(['Low', 'Medium', 'High'])
return 'Noise with a color bar'
def circle_patch():
from matplotlib.patches import Circle
fig = pp.figure()
ax = fig.add_subplot(111)
ax.add_patch(Circle((0, 0), 1))
return 'A circle patch'
def patches():
from matplotlib.patches import Circle, Wedge, Polygon
from matplotlib.collections import PatchCollection
fig = pp.figure()
ax = fig.add_subplot(111)
N = 3
x = np.random.rand(N)
y = np.random.rand(N)
radii = 0.1*np.random.rand(N)
patches = []
for x1, y1, r in zip(x, y, radii):
circle = Circle((x1, y1), r)
patches.append(circle)
x = np.random.rand(N)
y = np.random.rand(N)
radii = 0.1*np.random.rand(N)
theta1 = 360.0*np.random.rand(N)
theta2 = 360.0*np.random.rand(N)
for x1, y1, r, t1, t2 in zip(x, y, radii, theta1, theta2):
wedge = Wedge((x1, y1), r, t1, t2)
patches.append(wedge)
# Some limiting conditions on Wedge
patches += [
Wedge((0.3, 0.7), .1, 0, 360), # Full circle
Wedge((0.7, 0.8), .2, 0, 360, width=0.05), # Full ring
Wedge((0.8, 0.3), .2, 0, 45), # Full sector
Wedge((0.8, 0.3), .2, 45, 90, width=0.10), # Ring sector
]
for i in range(N):
polygon = Polygon(np.random.rand(N, 2), True)
patches.append(polygon)
colors = 100*np.random.rand(len(patches))
p = PatchCollection(patches,
cmap=mpl.cm.jet,
alpha=0.4
)
p.set_array(np.array(colors))
ax.add_collection(p)
pp.colorbar(p)
return 'Some patches and a color bar'
def legends():
x = np.ma.arange(0, 2*np.pi, 0.02)
y = np.ma.sin(x)
y1 = np.sin(2*x)
y2 = np.sin(3*x)
ym1 = np.ma.masked_where(y1 > 0.5, y1)
ym2 = np.ma.masked_where(y2 < -0.5, y2)
lines = pp.plot(x, y, 'r', x, ym1, 'g', x, ym2, 'bo')
pp.setp(lines[0], linewidth=4)
pp.setp(lines[1], linewidth=2)
pp.setp(lines[2], markersize=10)
pp.legend(('No mask', 'Masked if > 0.5', 'Masked if < -0.5'),
loc='upper right'
)
pp.title('Masked line demo')
return 'Plot with legends'
def annotate():
fig = pp.figure(1, figsize=(8, 5))
ax = fig.add_subplot(111,
autoscale_on=False,
xlim=(-1, 5),
ylim=(-4, 3)
)
t = np.arange(0.0, 5.0, 0.01)
s = np.cos(2*np.pi*t)
line, = ax.plot(t, s, color='blue')
ax.annotate('text',
xy=(4., 1.),
xycoords = 'data',
xytext = (4.5, 1.5),
textcoords='data',
arrowprops=dict(arrowstyle='->', ec='r')
)
ax.annotate('arrowstyle',
xy=(0, 1),
xycoords='data',
xytext=(-50, 30),
textcoords='offset points',
arrowprops=dict(arrowstyle='->')
)
return 'Annotations'
def legends2():
t1 = np.arange(0.0, 2.0, 0.1)
t2 = np.arange(0.0, 2.0, 0.01)
# note that plot returns a list of lines. The 'l1, = plot' usage
# extracts the first element of the list inot l1 using tuple
# unpacking. So l1 is a Line2D instance, not a sequence of lines
l1, = pp.plot(t2, np.exp(-t2))
l2, l3 = pp.plot(t2, np.sin(2*np.pi*t2), '--go', t1, np.log(1+t1), '.')
l4, = pp.plot(t2, np.exp(-t2)*np.sin(2*np.pi*t2), 'rs-.')
pp.legend((l2, l4), ('oscillatory', 'damped'), 'upper right', shadow=True)
pp.xlabel('time')
pp.ylabel('volts')
pp.title('Damped oscillation')
return 'Another legend plot'
def logplot():
a = [pow(10, i) for i in range(10)]
fig = pp.figure()
ax = fig.add_subplot(1, 1, 1)
line, = ax.semilogy(a, color='blue', lw=2)
return 'Log scaled plot'
def loglogplot():
x = np.logspace(0, 6, num=5)
pp.loglog(x, x**2)
return 'Loglog plot with large ticks dimensions'
def text_overlay():
xxx = np.linspace(0, 5)
yyy = xxx**2
pp.text(1, 5, 'test1', size=50, rotation=30.,
ha='center', va='bottom', color='r', style='italic',
bbox=dict(boxstyle='round, pad=0.2',
ec=(1., 0.5, 0.5),
fc=(1., 0.8, 0.8),
ls='dashdot'
)
)
pp.text(3, 6, 'test2', size=50, rotation=-30.,
ha='center', va='center', color='b', weight='bold',
bbox=dict(boxstyle='square',
ec=(1., 0.5, 0.5),
fc=(1., 0.8, 0.8),
)
)
pp.plot(xxx, yyy, label='graph')
pp.legend()
return 'Regular plot with overlay text'
def subplot4x4():
an = np.linspace(0, 2*np.pi, 100)
pp.subplot(221)
pp.plot(3*np.cos(an), 3*np.sin(an))
pp.title('not equal, looks like ellipse', fontsize=10)
pp.subplot(222)
pp.plot(3*np.cos(an), 3*np.sin(an))
pp.axis('equal')
pp.title('equal, looks like circle', fontsize=10)
pp.subplot(223)
pp.plot(3*np.cos(an), 3*np.sin(an))
pp.axis('equal')
pp.axis([-3, 3, -3, 3])
pp.title('looks like circle, even after changing limits', fontsize=10)
pp.subplot(224)
pp.plot(3*np.cos(an), 3*np.sin(an))
pp.axis('equal')
pp.axis([-3, 3, -3, 3])
pp.plot([0, 4], [0, 4])
pp.title('still equal after adding line', fontsize=10)
return '$4\\times 4$ subplots'
def histogram():
import matplotlib.pyplot as plt
# Make plot with vertical (default) colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(10+2*np.random.randn(1000), label='men')
ax.hist(12+3*np.random.randn(1000), label='women', alpha=0.5)
ax.legend()
return 'Histogram'
def contourf_with_logscale():
import matplotlib.pyplot as plt
import matplotlib.ticker as tkr
#from matplotlib import colors, ticker
from matplotlib.mlab import bivariate_normal
N = 100
x = np.linspace(-3.0, 3.0, N)
y = np.linspace(-2.0, 2.0, N)
X, Y = np.meshgrid(x, y)
# A low hump with a spike coming out of the top right.
# Needs to have z/colour axis on a log scale so we see both hump and spike.
# linear scale only shows the spike.
z = bivariate_normal(X, Y, 0.1, 0.2, 1.0, 1.0) \
+ 0.1 * bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
# Put in some negative values (lower left corner) to cause trouble with
# logs:
z[:5, :5] = -1
# The following is not strictly essential, but it will eliminate a warning.
# Comment it out to see the warning.
z = np.ma.masked_where(z <= 0, z)
# Automatic selection of levels works; setting the log locator tells
# contourf to use a log scale:
plt.contourf(X, Y, z,
locator=tkr.LogLocator()
)
# Alternatively, you can manually set the levels
# and the norm:
#lev_exp = np.arange(np.floor(np.log10(z.min())-1),
# np.ceil(np.log10(z.max())+1))
#levs = np.power(10, lev_exp)
#cs = plt.contourf(X, Y, z, levs, norm=colors.LogNorm())
#The 'extend' kwarg does not work yet with a log scale.
plt.colorbar()
return 'contourf with logscale'
if __name__ == '__main__':
basic_sin()
pp.show()
|
0u812/matplotlib2tikz
|
test/testfunctions.py
|
Python
|
lgpl-3.0
| 10,698
|
[
"Gaussian"
] |
81ccf1adc3d15d609caf52cb98ab5bae60caac8e9316f0a75c65f6f9a24df497
|
'''<h1> Figure of Merit (FOM)</h1>
The Figure of Merit (FOM) is the function that compares how well the simulation matches the measured data. Strictly speaking, for Gaussian errors, a chi squared (χ<sup>2</sup>) FOM is the most appropriate. However, the world is not perfect and many times the data can be fitted more easily and more robustly if another FOM is chosen. Each FOM function has its merits and drawbacks, and fitting can rely critically on choosing the right FOM function for the particular data to be analyzed. The following gives a brief summary and explanation of the FOMs included in the standard GenX distribution so far.<br>
It is also possible to create custom FOM functions to be used by GenX. For more information on this refer to the Section "Customization" below.<br>
<h2>Available FOM functions</h2>
In the following, the merged data set consisting of all data sets
that are marked for use is denoted as <var>Y</var> and the corresponding
simulation is denoted as <var>S</var>. A single element of these arrays
is indicated by a subscript <var>i</var>. In the same manner, the
independent variable (denoted as <var>x</var> in the data strucure) is called
<var>X</var>. The error array is denoted <var>E</var>. Finally the total number
of data points is given by <var>N</var> and <var>p</p> is the number of free parameters
in the fit.<br>
<h3>Unweighted FOM functions</h3>
<h4>diff</h4>
Average of the absolute difference between simulation and data.<br>
<br><huge>
FOM<sub>diff</sub> = 1/(N-p) × ∑<sub><var>i</var></sub>
|<var>Y<sub>i</sub></var> - <var>S<sub>i</sub></var>|
</huge><br>
<h4>log</h4>
Average of the absolute difference between the logarithms (base 10) of the data and the simulation.<br>
<br><huge>
FOM<sub>log</sub> = 1/(N-p) ×∑<sub><var>i</var></sub>
|log<sub>10</sub>(<var>Y<sub>i</sub></var>) -
log<sub>10</sub>(<var>S<sub>i</sub></var>)|
</huge><br>
<h4>sqrt</h4>
Average of the absolute difference between the square roots of the data and the simulation:<br>
<br><huge>
FOM<sub>sqrt</sub> = 1/(N-p) × ∑<sub><var>i</var></sub>
|sqrt(<var>Y<sub>i</sub></var>) - sqrt(<var>S<sub>i</sub></var>)
|
</huge><br>
<h4>R1</h4>
Crystallographic R-factor (often denoted as R1, sometimes called residual factor or reliability factor or the R-value or R<sub>work</sub>).<br>
Gives the percentage of the summed structure factor residuals (absolute difference between data and simulation) over the entire data set with respect to the total sum of measured structure factors. For data sets spanning several orders of magnitude in intensity, R1 is dominated by the residuals at high intensities, while large residuals at low intensities have very little impact on R1.
This implementation here assumes that the loaded data are intensities (squares of the structure factors), hence the square roots of the loaded data are taken for the calculation of R1.<br>
[A.J.C. Wilson, Acta Crystallogr. A32, 994 (1976)]<br>
<br><huge>
FOM<sub>R1</sub> =
∑<sub><var>i</var></sub> [
|sqrt(<var>Y<sub>i</sub></var>) - sqrt(<var>S<sub>i</sub></var>)
| ] / ∑<sub><var>i</var></sub> [ sqrt(<var>Y<sub>i</sub></var>) ]
</huge><br>
<h4>logR1</h4>
The logarithmic R1 factor is a modification of the crystallographic R-factor, calculated using the logarithm (base 10) of the structure factor and simulation. This scaling results in a more equal weighting of high-intensity and low-intensity data points which can be very helpful when fitting data which is spanning several orders of magnitude on the y-axis. Essentially it gives all data points equal weight when displayed in a log-plot.<br>
<br><huge>
FOM<sub>logR1</sub> =
∑<sub><var>i</var></sub> [ |
log<sub>10</sub>(sqrt(<var>Y<sub>i</sub></var>)) -
log<sub>10</sub>(sqrt(<var>S<sub>i</sub></var>))
| ] /
∑<sub><var>i</var></sub> [
log<sub>10</sub>(sqrt(<var>Y<sub>i</sub></var>) ]
</huge><br>
<h4>R2</h4>
Crystallographic R2 factor. In contrast to R1, this gives the ratio of the total sum of squared deviations to the total sum of squared structure factors. (Note that sometimes R2 is also defined as the square root of the value defined here.)
Like in the case for R1, this implementation assumes that the loaded data are intensities (squares of the structure factors).<br>
[A.J.C. Wilson, Acta Crystallogr. A32, 994 (1976)]<br>
<br><huge>
FOM<sub>R2</sub> =
∑<sub><var>i</var></sub> [
(<var>Y<sub>i</sub></var> - <var>S<sub>i</sub></var>)<sup>2</sup> ] /
∑<sub><var>i</var></sub> [ <var>Y<sub>i</sub><sup>2</sup></var> ]
</huge><br>
<h4>logR2</h4>
The logarithmic R2 factor is a modification of the crystallographic R2 factor, calculated using the logarithm (base 10) of the structure factor and simulation. This scaling results in a more similar weighting of high-intensity and low-intensity data points which can be very helpful when fitting data which is spanning several orders of magnitude on the y-axis. Essentially it gives all data points equal weight when displayed in a log-plot.<br>
<br><huge>
FOM<sub>logR2</sub> =
∑<sub><var>i</var></sub> [
(log<sub>10</sub>(<var>Y<sub>i</sub></var>) -
log<sub>10</sub>(<var>S<sub>i</sub></var>)
)<sup>2</sup> ] /
∑<sub><var>i</var></sub> [
log<sub>10</sub>(<var>Y<sub>i</sub>)<sup>2</sup></var> ]
</huge><br>
<h4>sintth4</h4>
Gives the average of the absolute differences scaled with a sin(2θ)<sup>4</sup> term (2θ = tth). For reflectivity data, this will divide away the Fresnel reflectivity. <br>
<br><huge>
FOM<sub>sintth4</sub> = 1/(N-p) ×
∑<sub><var>i</var></sub>
|<var>Y<sub>i</sub></var> - <var>S<sub>i</sub></var>| ×
sin(<var>tth</var>)<sup>4</sup>
</huge><br>
<h3>Weighted FOM functions</h3>
<h4>chi2bars</h4>
Chi squared (χ<sup>2</sup>) FOM including error bars<br>
<br><huge>
FOM<sub>chi2bars</sub> = 1/(N-p) × ∑<sub><var>i</var></sub>
((<var>Y<sub>i</sub></var> - <var>S<sub>i</sub></var>) /
<var>E<sub>i</sub></var>)<sup>2</sup>
</huge><br>
<h4>chibars</h4>
Chi squared but without the squaring! Includes error bars:<br>
<br><huge>
FOM<sub>chibars</sub> = 1/(N-p) × ∑<sub><var>i</var></sub>
|(<var>Y<sub>i</sub></var> - <var>S<sub>i</sub></var>) /
<var>E<sub>i</sub></var>|
</huge><br>
<h4>logbars</h4>
Absolute logarithmic (base 10) difference, taking errors into account:<br>
<br><huge>
FOM<sub>logbars</sub> = 1/(N-p) × ∑<sub><var>i</var></sub>
|log<sub>10</sub>(<var>Y<sub>i</sub></var>) -
log<sub>10</sub>(<var>S<sub>i</sub></var>)| /
<var>E<sub>i</sub></var>*ln(10)*<var>Y<sub>i</sub></var>
</huge><br>
<h4>R1bars</h4>
Similar to the crystallographic R-factor R1, but with weighting of the data points by the experimental error values. The error values in E are assumed to be proportional to the standard deviation of the measured intensities.<br>
[A.J.C. Wilson, Acta Crystallogr. A32, 994 (1976), W.C. Hamilton, Acta Crystallogr. 18(3), 502 (1965)]<br>
<br><huge>
FOM<sub>R1bars</sub> =
∑<sub><var>i</var></sub><var> [ sqrt(1/E<sub>i</sub></var>) ×
|sqrt(<var>Y<sub>i</sub></var>) - sqrt(<var>S<sub>i</sub></var>)
| ] /
∑<sub><var>i</var></sub> [ sqrt(1/E<sub>i</sub></var>) ×
sqrt(<var>Y<sub>i</sub></var>) ]
</huge><br>
<h4>R2bars</h4>
Weighted R2 factor. The error values in E are assumed to be proportional to the standard deviation of the measured intensities.<br>
[A.J.C. Wilson, Acta Crystallogr. A32, 994 (1976), W.C. Hamilton, Acta Crystallogr. 18(3), 502 (1965)]<br>
<br><huge>
FOM<sub>R2bars</sub> =
∑<sub><var>i</var></sub> [ (1/E<sub>i</sub></var>) ×
(<var>Y<sub>i</sub></var> - <var>S<sub>i</sub></var>)<sup>2</sup> ] /
∑<sub><var>i</var></sub> [ (1/E<sub>i</sub></var>) ×
<var>Y<sub>i</sub><sup>2</sup></var> ]
</huge><br>
<h2>Customization</h2>
Users can add their own cumstom-built FOM functions to be used in GenX. For detailed instructions on how to write the code for a custom FOM function and how to include it in the list of FOM functions available to GenX, see the manual at
<a href = "http://apps.sourceforge.net/trac/genx/wiki/DocPages/WriteFom">
http://apps.sourceforge.net/trac/genx/wiki/DocPages/WriteFom </a>
'''
#==============================================================================
import numpy as np
# import also the custom FOM functions defined in fom_funcs_custom.py
# (do nothing if file does not exist)
try:
from fom_funcs_custom import *
#print "Imported custom-defined FOM functions from fom_funcs_custom.py"
except:
pass
#print "Could not find additional custom-defined FOM functions."
#print "Nothing imported. All standard FOM functions are available."
bg_peaks={'00':[0,2,4,6],'02':[-8.2782,-6.2782,-4.2782,-2.2782,-0.2782,1.7218,3.7218,5.7218,7.7218],\
'10':[-7,-5.0,-3.0,3.0,5.0,7],'11':[-6.1391,-4.1391,-2.1391,-0.1391,1.8609,3.8609,5.8609],\
'20':[-8,-6,-4,-2,0,2,4,6,8],'22':[-8.2782,-6.2782,-4.2782,-2.2782,-0.2782,1.7218,3.7218,5.7218,7.7218],\
'30':[-9,-7,-5,-1,1,5,7,9],'2-1':[-8.8609,-6.8609,-4.8609,-0.8609,3.1391,5.1391,7.1391],\
'21':[-7.1391,-5.1391,-3.1391,0.8609,4.8609,6.8609]}
#==============================================================================
# BEGIN FOM function defintions
#=========================
# unweighted FOM functions
def diff(simulations, data):
''' Average absolute difference
'''
N = np.sum([len(dataset.y)*dataset.use for dataset in data])
#return 1.0/(N-1)*np.sum([np.sum(np.abs(dataset.y - sim))\
# for (dataset, sim) in zip(data,simulations) if dataset.use])
return [(dataset.y - sim)
for (dataset, sim) in zip(data,simulations)]
diff.__div_dof__ = True
def log(simulations, data):
''' Average absolute logartihmic difference
'''
N = np.sum([len(dataset.y)*dataset.use for dataset in data])
return [(np.log10(dataset.y)-np.log10(sim))
for (dataset, sim) in zip(data,simulations)]
log.__div_dof__ = True
def sqrt(simulations, data):
''' Average absolute difference of the square root
'''
N = np.sum([len(dataset.y)*dataset.use for dataset in data])
return [(np.sqrt(dataset.y) - np.sqrt(sim))
for (dataset, sim) in zip(data,simulations)]
sqrt.__div_dof__ = True
def R1(simulations, data):
''' Crystallographic R-factor (R1)
'''
denom = np.sum([np.sum(np.sqrt(np.abs(dataset.y))) for dataset in data\
if dataset.use])
return [1.0/denom*(np.sqrt(np.abs(dataset.y)) - np.sqrt(np.abs(sim)))\
for (dataset, sim) in zip(data,simulations)]
def R1_weighted(simulations, data):
''' Crystallographic R-factor (R1)
'''
denom = np.sum([np.sum(np.sqrt(np.abs(dataset.y))) for dataset in data\
if dataset.use])
return [1.0/denom*abs(np.sqrt(np.abs(dataset.y)) - np.sqrt(np.abs(sim)))/np.sqrt(np.abs(dataset.y))\
for (dataset, sim) in zip(data,simulations)]
def R1_weighted_2(simulations, data):
''' Crystallographic R-factor (R1)
'''
denom = np.sum([np.sum(np.sqrt(np.abs(dataset.y))) for dataset in data\
if dataset.use])
#denom=1
return_list=[]
for (dataset, sim) in zip(data,simulations):
if dataset.x[0]>100:
scaler=np.average(dataset.y[[6,19,32]]/sim[[6,19,32]])
return_list.append(1.0/denom*abs(np.sqrt(np.abs(dataset.y[6:-6])) - np.sqrt(np.abs(sim[6:-6]*scaler)))/np.sqrt(np.abs(dataset.y[6:-6])))
else:
return_list.append(1.0/denom*abs(np.sqrt(np.abs(dataset.y)) - np.sqrt(np.abs(sim)))/np.sqrt(np.abs(dataset.y)))
return return_list
def chi2bars_2(simulations, data):
''' Weighted chi squared
'''
return_list=[]
N = np.sum([len(dataset.y)*dataset.use for dataset in data])
for (dataset,sim) in zip(data,simulations):
if dataset.x[0]>100:
scaler=np.average(dataset.y[6:-6]/sim[6:-6])
return_list.append((dataset.y - sim*scaler)**2/dataset.error**2)
else:
return_list.append((dataset.y - sim)**2/dataset.error**2)
return return_list
chi2bars_2.__div_dof__ = True
def R1_weighted_2b(simulations, data):
''' Crystallographic R-factor (R1)
'''
denom = np.sum([np.sum(np.sqrt(np.abs(dataset.y))) for dataset in data\
if dataset.use])
#denom=1
return_list=[]
for (dataset, sim) in zip(data,simulations):
if dataset.x[0]>100:
scaler=np.average(dataset.y[[6,19,32]]/sim[[6,19,32]])
return_list.append(1.0/denom*abs(np.abs(dataset.y[6:-6]) - np.abs(sim[6:-6]*scaler))/np.abs(dataset.y[6:-6]))
else:
return_list.append(1.0/denom*abs(np.sqrt(np.abs(dataset.y)) - np.sqrt(np.abs(sim)))/np.sqrt(np.abs(dataset.y)))
return return_list
def R1_weighted_3(simulations, data):
''' Crystallographic R-factor (R1)
'''
denom = np.sum([np.sum(np.sqrt(np.abs(dataset.y))) for dataset in data\
if dataset.use])
#denom=1
return_list=[]
for (dataset, sim) in zip(data,simulations):
if dataset.x[0]>100:
scaler=np.average(dataset.y[[6,19,32]]/sim[[6,19,32]])
return_list.append(1.0/denom*abs(np.log10(np.sqrt(np.abs(dataset.y[6:-6]))) - np.log10(np.sqrt(np.abs(sim[6:-6]*scaler)))))
else:
return_list.append(1.0/denom*abs(np.log10(np.sqrt(np.abs(dataset.y))) - np.log10(np.sqrt(np.abs(sim)))))
return return_list
def logR1(simulations, data):
''' logarithmic crystallographic R-factor (R1)
'''
denom = np.sum([np.sum(np.log10(np.sqrt(dataset.y))) for dataset in data\
if dataset.use])
return [1.0/denom*(np.log10(np.sqrt(dataset.y)) - \
np.log10(np.sqrt(sim)))\
for (dataset, sim) in zip(data,simulations)]
def R2(simulations, data):
''' Crystallographic R2 factor
'''
denom = np.sum([np.sum(dataset.y**2) for dataset in data\
if dataset.use])
return [1.0/denom*np.sign(dataset.y - sim)*(dataset.y - sim)**2\
for (dataset, sim) in zip(data,simulations)]
def R2_weighted(simulations, data):
''' Crystallographic R2 factor
'''
denom = np.sum([np.sum(dataset.y**2) for dataset in data\
if dataset.use])
return [1.0/denom*np.sign(dataset.y - sim)*(dataset.y - sim)**2/dataset.error**2\
for (dataset, sim) in zip(data,simulations)]
def logR2(simulations, data):
''' logarithmic crystallographic R2 factor
'''
denom = np.sum([np.sum(np.log10(dataset.y)**2) for dataset in data\
if dataset.use])
return [1.0/denom*np.sign(np.log10(dataset.y) - np.log10(sim))*(np.log10(dataset.y) - np.log10(sim))**2\
for (dataset, sim) in zip(data,simulations)]
def sintth4(simulations, data):
''' Sin(tth)^4 scaling of the average absolute difference for reflectivity.
'''
N = np.sum([len(dataset.y)*dataset.use for dataset in data])
return [np.sin(dataset.x*np.pi/360.0)**4*
(dataset.y - sim)
for (dataset, sim) in zip(data,simulations)]
sintth4.__div_dof__ = True
def Norm(simulations, data):
''' dataset normalized 1/3 scaling of the error
'''
return [1.0/np.sum(np.abs(dataset.y))*(np.sign(dataset.y)*np.abs(dataset.y) - np.sign(sim)*np.abs(sim))\
for (dataset, sim) in zip(data,simulations)]
Norm.__div_dof__ = True
#=======================
# weighted FOM functions
def chi2bars(simulations, data):
''' Weighted chi squared
'''
N = np.sum([len(dataset.y)*dataset.use for dataset in data])
return [(dataset.y - sim)**2/dataset.error**2 for (dataset, sim) in zip(data,simulations)]
chi2bars.__div_dof__ = True
def chi2bars_w_trainor(simulations, data):
''' Weighted chi squared
'''
N = np.sum([len(dataset.y)*dataset.use for dataset in data])
return [(dataset.y - sim)**2/(dataset.y*0.2)**2 for (dataset, sim) in zip(data,simulations)]
chi2bars_w_trainor.__div_dof__ = True
#fom's are weighted with dip zones having higher wt number and bragg peak zone having lower wt number
def chi2bars_weighted(simulations, data):
''' Weighted chi squared
'''
def _weight_fom(h,k,l_list=[]):
wt_array=[]
hk=str(int(h))+str(int(k))
for l in l_list:
temp_sign=np.array(bg_peaks[hk])-l
left,right=0,0
for sign in temp_sign:
if sign>=0:
right=list(temp_sign).index(sign)
left=right-1
break
l_mid=(bg_peaks[hk][left]+bg_peaks[hk][right])/2
l_half_span=(bg_peaks[hk][right]-bg_peaks[hk][left])/2
l_span=abs(l-l_mid)
wt_array.append(50/(1+l_span/l_half_span*50))
#print wt_array
return np.array(wt_array)
N = np.sum([len(dataset.y)*dataset.use for dataset in data])
return [np.sign(dataset.y - sim)*(dataset.y - sim)**2/dataset.error**2*_weight_fom(dataset.extra_data['h'][0],dataset.extra_data['k'][0],dataset.x)
for (dataset, sim) in zip(data,simulations)]
chi2bars_weighted.__div_dof__ = True
def chibars(simulations, data):
''' Weighted chi squared but without the squaring
'''
N = np.sum([len(dataset.y)*dataset.use for dataset in data])
return [((dataset.y - sim)/dataset.error)
for (dataset, sim) in zip(data,simulations)]
chibars.__div_dof__ = True
def logbars(simulations, data):
''' Weighted average absolute difference of the logarithm of the data
'''
N = np.sum([len(dataset.y)*dataset.use for dataset in data])
return [((np.log10(dataset.y) - np.log10(sim))
/dataset.error*np.log(10)*dataset.y)
for (dataset, sim) in zip(data,simulations)]
logbars.__div_dof__ = True
def R1bars(simulations, data):
''' Weighted crystallographic R-factor (R1)
'''
denom = np.sum([np.sum(np.sqrt(1/dataset.error)*np.sqrt(dataset.y))
for dataset in data if dataset.use])
return [1.0/denom*np.sqrt(1/dataset.error)*
(np.sqrt(dataset.y) - np.sqrt(sim))
for (dataset, sim) in zip(data,simulations)]
def R2bars(simulations, data):
''' Weighted crystallographic R2 factor
'''
denom = np.sum([(1/dataset.error)*np.sum(dataset.y**2)
for dataset in data if dataset.use])
return [1.0/denom*(1/dataset.error) * np.sign(dataset.y - sim)*(dataset.y - sim)**2
for (dataset, sim) in zip(data,simulations)]
# END FOM function definition
#==============================================================================
# create introspection variables so that everything updates automatically
# Find all objects in this namespace
# (this includes the custom-defined FOM functions from fom_funcs_custom.py)
obj_list = dir()[:]
# find all functions
all_func_names = [s for s in obj_list if type(eval(s)).__name__ == 'function']
func_names = [s for s in all_func_names if all_func_names[0] != '_']
# End of file
#==============================================================================
|
jackey-qiu/genx_pc_qiu
|
fom_funcs.py
|
Python
|
gpl-3.0
| 19,343
|
[
"Gaussian"
] |
cc981c5b5c5ea140e7d01b86b396cd75574b5fcc40eb0791a81ca7bb84ec560a
|
# coding: utf-8
from __future__ import unicode_literals, division
from monty.os.path import zpath
import os
import time
import datetime
import operator
import shutil
from functools import reduce
from collections import Counter
import re
import numpy as np
from monty.dev import deprecated
from monty.serialization import loadfn
from custodian.custodian import ErrorHandler
from custodian.utils import backup
from pymatgen.io.vasp import Poscar, VaspInput, Incar, Kpoints, Vasprun, \
Oszicar, Outcar
from pymatgen.transformations.standard_transformations import \
SupercellTransformation
from custodian.ansible.interpreter import Modder
from custodian.ansible.actions import FileActions
from custodian.vasp.interpreter import VaspModder
"""
This module implements specific error handlers for VASP runs. These handlers
tries to detect common errors in vasp runs and attempt to fix them on the fly
by modifying the input files.
"""
__author__ = "Shyue Ping Ong, William Davidson Richards, Anubhav Jain, " \
"Wei Chen, Stephen Dacek"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__status__ = "Beta"
__date__ = "2/4/13"
VASP_BACKUP_FILES = {"INCAR", "KPOINTS", "POSCAR", "OUTCAR", "CONTCAR",
"OSZICAR", "vasprun.xml", "vasp.out", "std_err.txt"}
class VaspErrorHandler(ErrorHandler):
"""
Master VaspErrorHandler class that handles a number of common errors
that occur during VASP runs.
"""
is_monitor = True
error_msgs = {
"tet": ["Tetrahedron method fails for NKPT<4",
"Fatal error detecting k-mesh",
"Fatal error: unable to match k-point",
"Routine TETIRR needs special values",
"Tetrahedron method fails (number of k-points < 4)"],
"inv_rot_mat": ["inverse of rotation matrix was not found (increase "
"SYMPREC)"],
"brmix": ["BRMIX: very serious problems"],
"subspacematrix": ["WARNING: Sub-Space-Matrix is not hermitian in "
"DAV"],
"tetirr": ["Routine TETIRR needs special values"],
"incorrect_shift": ["Could not get correct shifts"],
"real_optlay": ["REAL_OPTLAY: internal error",
"REAL_OPT: internal ERROR"],
"rspher": ["ERROR RSPHER"],
"dentet": ["DENTET"],
"too_few_bands": ["TOO FEW BANDS"],
"triple_product": ["ERROR: the triple product of the basis vectors"],
"rot_matrix": ["Found some non-integer element in rotation matrix"],
"brions": ["BRIONS problems: POTIM should be increased"],
"pricel": ["internal error in subroutine PRICEL"],
"zpotrf": ["LAPACK: Routine ZPOTRF failed"],
"amin": ["One of the lattice vectors is very long (>50 A), but AMIN"],
"zbrent": ["ZBRENT: fatal internal in",
"ZBRENT: fatal error in bracketing"],
"pssyevx": ["ERROR in subspace rotation PSSYEVX"],
"eddrmm": ["WARNING in EDDRMM: call to ZHEGV failed"],
"edddav": ["Error EDDDAV: Call to ZHEGV failed"],
"grad_not_orth": [
"EDWAV: internal error, the gradient is not orthogonal"],
"nicht_konv": ["ERROR: SBESSELITER : nicht konvergent"],
"zheev": ["ERROR EDDIAG: Call to routine ZHEEV failed!"],
"elf_kpar": ["ELF: KPAR>1 not implemented"],
"elf_ncl": ["WARNING: ELF not implemented for non collinear case"],
"rhosyg": ["RHOSYG internal error"],
"posmap": ["POSMAP internal error: symmetry equivalent atom not found"],
"point_group": ["Error: point group operation missing"]
}
def __init__(self, output_filename="vasp.out", natoms_large_cell=100,
errors_subset_to_catch=None):
"""
Initializes the handler with the output file to check.
Args:
output_filename (str): This is the file where the stdout for vasp
is being redirected. The error messages that are checked are
present in the stdout. Defaults to "vasp.out", which is the
default redirect used by :class:`custodian.vasp.jobs.VaspJob`.
natoms_large_cell (int): Number of atoms threshold to treat cell
as large. Affects the correction of certain errors. Defaults to
100.
errors_subset_to_detect (list): A subset of errors to catch. The
default is None, which means all supported errors are detected.
Use this to only catch only a subset of supported errors.
E.g., ["eddrrm", "zheev"] will only catch the eddrmm and zheev
errors, and not others. If you wish to only excluded one or
two of the errors, you can create this list by the following
lines:
```
subset = list(VaspErrorHandler.error_msgs.keys())
subset.pop("eddrrm")
handler = VaspErrorHandler(errors_subset_to_catch=subset)
```
"""
self.output_filename = output_filename
self.errors = set()
self.error_count = Counter()
# threshold of number of atoms to treat the cell as large.
self.natoms_large_cell = natoms_large_cell
self.errors_subset_to_catch = errors_subset_to_catch or \
list(VaspErrorHandler.error_msgs.keys())
def check(self):
incar = Incar.from_file("INCAR")
self.errors = set()
with open(self.output_filename, "r") as f:
for line in f:
l = line.strip()
for err, msgs in VaspErrorHandler.error_msgs.items():
if err in self.errors_subset_to_catch:
for msg in msgs:
if l.find(msg) != -1:
# this checks if we want to run a charged
# computation (e.g., defects) if yes we don't
# want to kill it because there is a change in
# e-density (brmix error)
if err == "brmix" and 'NELECT' in incar:
continue
self.errors.add(err)
return len(self.errors) > 0
def correct(self):
backup(VASP_BACKUP_FILES | {self.output_filename})
actions = []
vi = VaspInput.from_directory(".")
if self.errors.intersection(["tet", "dentet"]):
actions.append({"dict": "INCAR",
"action": {"_set": {"ISMEAR": 0}}})
if "inv_rot_mat" in self.errors:
actions.append({"dict": "INCAR",
"action": {"_set": {"SYMPREC": 1e-8}}})
if "brmix" in self.errors:
# If there is not a valid OUTCAR already, increment
# error count to 1 to skip first fix
if self.error_count['brmix'] == 0:
try:
assert (Outcar(zpath(os.path.join(
os.getcwd(), "OUTCAR"))).is_stopped is False)
except:
self.error_count['brmix'] += 1
if self.error_count['brmix'] == 0:
# Valid OUTCAR - simply rerun the job and increment
# error count for next time
actions.append({"dict": "INCAR",
"action": {"_set": {"ISTART": 1}}})
self.error_count['brmix'] += 1
elif self.error_count['brmix'] == 1:
# Use Kerker mixing w/default values for other parameters
actions.append({"dict": "INCAR",
"action": {"_set": {"IMIX": 1}}})
self.error_count['brmix'] += 1
elif self.error_count['brmix'] == 2 and vi["KPOINTS"].style \
== Kpoints.supported_modes.Gamma:
actions.append({"dict": "KPOINTS",
"action": {"_set": {"generation_style":
"Monkhorst"}}})
actions.append({"dict": "INCAR",
"action": {"_unset": {"IMIX": 1}}})
self.error_count['brmix'] += 1
elif self.error_count['brmix'] in [2, 3] and vi["KPOINTS"].style \
== Kpoints.supported_modes.Monkhorst:
actions.append({"dict": "KPOINTS",
"action": {"_set": {"generation_style":
"Gamma"}}})
actions.append({"dict": "INCAR",
"action": {"_unset": {"IMIX": 1}}})
self.error_count['brmix'] += 1
if vi["KPOINTS"].num_kpts < 1:
all_kpts_even = all([
bool(n % 2 == 0) for n in vi["KPOINTS"].kpts[0]
])
print("all_kpts_even = {}".format(all_kpts_even))
if all_kpts_even:
new_kpts = (
tuple(n + 1 for n in vi["KPOINTS"].kpts[0]),)
print("new_kpts = {}".format(new_kpts))
actions.append({"dict": "KPOINTS", "action": {"_set": {
"kpoints": new_kpts
}}})
else:
actions.append({"dict": "INCAR",
"action": {"_set": {"ISYM": 0}}})
if vi["KPOINTS"].style == Kpoints.supported_modes.Monkhorst:
actions.append({"dict": "KPOINTS",
"action": {
"_set": {"generation_style": "Gamma"}}})
# Based on VASP forum's recommendation, you should delete the
# CHGCAR and WAVECAR when dealing with this error.
if vi["INCAR"].get("ICHARG", 0) < 10:
actions.append({"file": "CHGCAR",
"action": {
"_file_delete": {'mode': "actual"}}})
actions.append({"file": "WAVECAR",
"action": {
"_file_delete": {'mode': "actual"}}})
if "zpotrf" in self.errors:
# Usually caused by short bond distances. If on the first step,
# volume needs to be increased. Otherwise, it was due to a step
# being too big and POTIM should be decreased. If a static run
# try turning off symmetry.
try:
oszicar = Oszicar("OSZICAR")
nsteps = len(oszicar.ionic_steps)
except:
nsteps = 0
if nsteps >= 1:
potim = float(vi["INCAR"].get("POTIM", 0.5)) / 2.0
actions.append(
{"dict": "INCAR",
"action": {"_set": {"ISYM": 0, "POTIM": potim}}})
elif vi["INCAR"].get("NSW", 0) == 0 \
or vi["INCAR"].get("ISIF", 0) in range(3):
actions.append(
{"dict": "INCAR", "action": {"_set": {"ISYM": 0}}})
else:
s = vi["POSCAR"].structure
s.apply_strain(0.2)
actions.append({"dict": "POSCAR",
"action": {"_set": {"structure": s.as_dict()}}})
# Based on VASP forum's recommendation, you should delete the
# CHGCAR and WAVECAR when dealing with this error.
if vi["INCAR"].get("ICHARG", 0) < 10:
actions.append({"file": "CHGCAR",
"action": {"_file_delete": {'mode': "actual"}}})
actions.append({"file": "WAVECAR",
"action": {"_file_delete": {'mode': "actual"}}})
if self.errors.intersection(["subspacematrix"]):
if self.error_count["subspacematrix"] == 0:
actions.append({"dict": "INCAR",
"action": {"_set": {"LREAL": False}}})
else:
actions.append({"dict": "INCAR",
"action": {"_set": {"PREC": "Accurate"}}})
self.error_count["subspacematrix"] += 1
if self.errors.intersection(["rspher", "real_optlay", "nicht_konv"]):
s = vi["POSCAR"].structure
if len(s) < self.natoms_large_cell:
actions.append({"dict": "INCAR",
"action": {"_set": {"LREAL": False}}})
else:
# for large supercell, try an in-between option LREAL = True
# prior to LREAL = False
if self.error_count['real_optlay'] == 0:
# use real space projectors generated by pot
actions.append({"dict": "INCAR",
"action": {"_set": {"LREAL": True}}})
elif self.error_count['real_optlay'] == 1:
actions.append({"dict": "INCAR",
"action": {"_set": {"LREAL": False}}})
self.error_count['real_optlay'] += 1
if self.errors.intersection(["tetirr", "incorrect_shift"]):
if vi["KPOINTS"].style == Kpoints.supported_modes.Monkhorst:
actions.append({"dict": "KPOINTS",
"action": {
"_set": {"generation_style": "Gamma"}}})
if "rot_matrix" in self.errors:
if vi["KPOINTS"].style == Kpoints.supported_modes.Monkhorst:
actions.append({"dict": "KPOINTS",
"action": {
"_set": {"generation_style": "Gamma"}}})
else:
actions.append({"dict": "INCAR",
"action": {"_set": {"ISYM": 0}}})
if "amin" in self.errors:
actions.append({"dict": "INCAR",
"action": {"_set": {"AMIN": "0.01"}}})
if "triple_product" in self.errors:
s = vi["POSCAR"].structure
trans = SupercellTransformation(((1, 0, 0), (0, 0, 1), (0, 1, 0)))
new_s = trans.apply_transformation(s)
actions.append({"dict": "POSCAR",
"action": {"_set": {"structure": new_s.as_dict()}},
"transformation": trans.as_dict()})
if "pricel" in self.errors:
actions.append({"dict": "INCAR",
"action": {"_set": {"SYMPREC": 1e-8, "ISYM": 0}}})
if "brions" in self.errors:
potim = float(vi["INCAR"].get("POTIM", 0.5)) + 0.1
actions.append({"dict": "INCAR",
"action": {"_set": {"POTIM": potim}}})
if "zbrent" in self.errors:
actions.append({"dict": "INCAR",
"action": {"_set": {"IBRION": 1}}})
actions.append({"file": "CONTCAR",
"action": {"_file_copy": {"dest": "POSCAR"}}})
if "too_few_bands" in self.errors:
if "NBANDS" in vi["INCAR"]:
nbands = int(vi["INCAR"]["NBANDS"])
else:
with open("OUTCAR") as f:
for line in f:
if "NBANDS" in line:
try:
d = line.split("=")
nbands = int(d[-1].strip())
break
except (IndexError, ValueError):
pass
actions.append({"dict": "INCAR",
"action": {"_set": {"NBANDS": int(1.1 * nbands)}}})
if "pssyevx" in self.errors:
actions.append({"dict": "INCAR", "action":
{"_set": {"ALGO": "Normal"}}})
if "eddrmm" in self.errors:
# RMM algorithm is not stable for this calculation
if vi["INCAR"].get("ALGO", "Normal") in ["Fast", "VeryFast"]:
actions.append({"dict": "INCAR", "action":
{"_set": {"ALGO": "Normal"}}})
else:
potim = float(vi["INCAR"].get("POTIM", 0.5)) / 2.0
actions.append({"dict": "INCAR",
"action": {"_set": {"POTIM": potim}}})
if vi["INCAR"].get("ICHARG", 0) < 10:
actions.append({"file": "CHGCAR",
"action": {"_file_delete": {'mode': "actual"}}})
actions.append({"file": "WAVECAR",
"action": {"_file_delete": {'mode': "actual"}}})
if "edddav" in self.errors:
if vi["INCAR"].get("ICHARG", 0) < 10:
actions.append({"file": "CHGCAR",
"action": {"_file_delete": {'mode': "actual"}}})
actions.append({"dict": "INCAR", "action":
{"_set": {"ALGO": "All"}}})
if "grad_not_orth" in self.errors:
if vi["INCAR"].get("ISMEAR", 1) < 0:
actions.append({"dict": "INCAR",
"action": {"_set": {"ISMEAR": "0"}}})
if "zheev" in self.errors:
if vi["INCAR"].get("ALGO", "Fast").lower() != "exact":
actions.append({"dict": "INCAR",
"action": {"_set": {"ALGO": "Exact"}}})
if "elf_kpar" in self.errors:
actions.append({"dict": "INCAR",
"action": {"_set": {"KPAR": 1}}})
if "rhosyg" in self.errors:
if vi["INCAR"].get("SYMPREC", 1e-4) == 1e-4:
actions.append({"dict": "INCAR",
"action": {"_set": {"ISYM": 0}}})
actions.append({"dict": "INCAR",
"action": {"_set": {"SYMPREC": 1e-4}}})
if "posmap" in self.errors:
actions.append({"dict": "INCAR",
"action": {"_set": {"SYMPREC": 1e-6}}})
if "point_group" in self.errors:
actions.append({"dict": "INCAR",
"action": {"_set": {"ISYM": 0}}})
VaspModder(vi=vi).apply_actions(actions)
return {"errors": list(self.errors), "actions": actions}
class LrfCommutatorHandler(ErrorHandler):
"""
Corrects LRF_COMMUTATOR errors by setting LPEAD=True if not already set.
Note that switching LPEAD=T can slightly change results versus the
default due to numerical evaluation of derivatives.
"""
is_monitor = True
error_msgs = {
"lrf_comm": ["LRF_COMMUTATOR internal error"],
}
def __init__(self, output_filename="std_err.txt"):
"""
Initializes the handler with the output file to check.
Args:
output_filename (str): This is the file where the stderr for vasp
is being redirected. The error messages that are checked are
present in the stderr. Defaults to "std_err.txt", which is the
default redirect used by :class:`custodian.vasp.jobs.VaspJob`.
"""
self.output_filename = output_filename
self.errors = set()
self.error_count = Counter()
def check(self):
self.errors = set()
with open(self.output_filename, "r") as f:
for line in f:
l = line.strip()
for err, msgs in LrfCommutatorHandler.error_msgs.items():
for msg in msgs:
if l.find(msg) != -1:
self.errors.add(err)
return len(self.errors) > 0
def correct(self):
backup(VASP_BACKUP_FILES | {self.output_filename})
actions = []
vi = VaspInput.from_directory(".")
if "lrf_comm" in self.errors:
if Outcar(zpath(os.path.join(
os.getcwd(), "OUTCAR"))).is_stopped is False:
if not vi["INCAR"].get("LPEAD"):
actions.append({"dict": "INCAR",
"action": {"_set": {"LPEAD": True}}})
VaspModder(vi=vi).apply_actions(actions)
return {"errors": list(self.errors), "actions": actions}
class StdErrHandler(ErrorHandler):
"""
Master StdErr class that handles a number of common errors
that occur during VASP runs with error messages only in
the standard error.
"""
is_monitor = True
error_msgs = {
"kpoints_trans": ["internal error in GENERATE_KPOINTS_TRANS: "
"number of G-vector changed in star"],
"out_of_memory": ["Allocation would exceed memory limit"]
}
def __init__(self, output_filename="std_err.txt"):
"""
Initializes the handler with the output file to check.
Args:
output_filename (str): This is the file where the stderr for vasp
is being redirected. The error messages that are checked are
present in the stderr. Defaults to "std_err.txt", which is the
default redirect used by :class:`custodian.vasp.jobs.VaspJob`.
"""
self.output_filename = output_filename
self.errors = set()
self.error_count = Counter()
def check(self):
self.errors = set()
with open(self.output_filename, "r") as f:
for line in f:
l = line.strip()
for err, msgs in StdErrHandler.error_msgs.items():
for msg in msgs:
if l.find(msg) != -1:
self.errors.add(err)
return len(self.errors) > 0
def correct(self):
backup(VASP_BACKUP_FILES | {self.output_filename})
actions = []
vi = VaspInput.from_directory(".")
if "kpoints_trans" in self.errors:
if self.error_count["kpoints_trans"] == 0:
m = reduce(operator.mul, vi["KPOINTS"].kpts[0])
m = max(int(round(m ** (1 / 3))), 1)
if vi["KPOINTS"].style.name.lower().startswith("m"):
m += m % 2
actions.append({"dict": "KPOINTS",
"action": {"_set": {"kpoints": [[m] * 3]}}})
self.error_count['kpoints_trans'] += 1
if "out_of_memory" in self.errors:
if vi["INCAR"].get("KPAR", 1) > 1:
reduced_kpar = max(vi["INCAR"].get("KPAR", 1) // 2, 1)
actions.append({"dict": "INCAR",
"action": {"_set": {"KPAR": reduced_kpar}}})
VaspModder(vi=vi).apply_actions(actions)
return {"errors": list(self.errors), "actions": actions}
class AliasingErrorHandler(ErrorHandler):
"""
Master VaspErrorHandler class that handles a number of common errors
that occur during VASP runs.
"""
is_monitor = True
error_msgs = {
"aliasing": [
"WARNING: small aliasing (wrap around) errors must be expected"],
"aliasing_incar": ["Your FFT grids (NGX,NGY,NGZ) are not sufficient "
"for an accurate"]
}
def __init__(self, output_filename="vasp.out"):
"""
Initializes the handler with the output file to check.
Args:
output_filename (str): This is the file where the stdout for vasp
is being redirected. The error messages that are checked are
present in the stdout. Defaults to "vasp.out", which is the
default redirect used by :class:`custodian.vasp.jobs.VaspJob`.
"""
self.output_filename = output_filename
self.errors = set()
def check(self):
incar = Incar.from_file("INCAR")
self.errors = set()
with open(self.output_filename, "r") as f:
for line in f:
l = line.strip()
for err, msgs in AliasingErrorHandler.error_msgs.items():
for msg in msgs:
if l.find(msg) != -1:
# this checks if we want to run a charged
# computation (e.g., defects) if yes we don't
# want to kill it because there is a change in e-
# density (brmix error)
if err == "brmix" and 'NELECT' in incar:
continue
self.errors.add(err)
return len(self.errors) > 0
def correct(self):
backup(VASP_BACKUP_FILES | {self.output_filename})
actions = []
vi = VaspInput.from_directory(".")
if "aliasing" in self.errors:
with open("OUTCAR") as f:
grid_adjusted = False
changes_dict = {}
r = re.compile(".+aliasing errors.*(NG.)\s*to\s*(\d+)")
for line in f:
m = r.match(line)
if m:
changes_dict[m.group(1)] = int(m.group(2))
grid_adjusted = True
# Ensure that all NGX, NGY, NGZ have been checked
if grid_adjusted and 'NGZ' in line:
actions.append(
{"dict": "INCAR", "action": {"_set": changes_dict}})
if vi["INCAR"].get("ICHARG", 0) < 10:
actions.extend([{"file": "CHGCAR",
"action": {"_file_delete": {
'mode': "actual"}}},
{"file": "WAVECAR",
"action": {"_file_delete": {
'mode': "actual"}}}])
break
if "aliasing_incar" in self.errors:
# vasp seems to give different warnings depending on whether the
# aliasing error was caused by user supplied inputs
d = {k: 1 for k in ['NGX', 'NGY', 'NGZ'] if k in vi['INCAR'].keys()}
actions.append({"dict": "INCAR", "action": {"_unset": d}})
if vi["INCAR"].get("ICHARG", 0) < 10:
actions.extend([{"file": "CHGCAR",
"action": {
"_file_delete": {'mode': "actual"}}},
{"file": "WAVECAR",
"action": {
"_file_delete": {'mode': "actual"}}}])
VaspModder(vi=vi).apply_actions(actions)
return {"errors": list(self.errors), "actions": actions}
class DriftErrorHandler(ErrorHandler):
"""
Corrects for total drift exceeding the force convergence criteria.
"""
def __init__(self, max_drift=None, to_average=3, enaug_multiply=2):
"""
Initializes the handler with max drift
Args:
max_drift (float): This defines the max drift. Leaving this at the default of None gets the max_drift from EDFIFFG
"""
self.max_drift = max_drift
self.to_average = int(to_average)
self.enaug_multiply = enaug_multiply
def check(self):
incar = Incar.from_file("INCAR")
if incar.get("EDIFFG", 0.1) >= 0 or incar.get("NSW", 0) == 0:
# Only activate when force relaxing and ionic steps
# NSW check prevents accidental effects when running DFPT
return False
if not self.max_drift:
self.max_drift = incar["EDIFFG"] * -1
try:
outcar = Outcar("OUTCAR")
except:
# Can't perform check if Outcar not valid
return False
if len(outcar.data.get('drift', [])) < self.to_average:
# Ensure enough steps to get average drift
return False
else:
curr_drift = outcar.data.get("drift", [])[::-1][:self.to_average]
curr_drift = np.average([np.linalg.norm(d) for d in curr_drift])
return curr_drift > self.max_drift
def correct(self):
backup(VASP_BACKUP_FILES)
actions = []
vi = VaspInput.from_directory(".")
incar = vi["INCAR"]
outcar = Outcar("OUTCAR")
# Move CONTCAR to POSCAR
actions.append({"file": "CONTCAR",
"action": {"_file_copy": {"dest": "POSCAR"}}})
# First try adding ADDGRID
if not incar.get("ADDGRID", False):
actions.append({"dict": "INCAR",
"action": {"_set": {"ADDGRID": True}}})
# Otherwise set PREC to High so ENAUG can be used to control Augmentation Grid Size
elif incar.get("PREC", "Accurate").lower() != "high":
actions.append({"dict": "INCAR",
"action": {"_set": {"PREC": "High"}}})
actions.append({"dict": "INCAR",
"action": {"_set": {"ENAUG": incar.get("ENCUT", 520) * 2}}})
# PREC is already high and ENAUG set so just increase it
else:
actions.append({"dict": "INCAR",
"action": {"_set": {"ENAUG": int(incar.get("ENAUG", 1040) * self.enaug_multiply)}}})
curr_drift = outcar.data.get("drift", [])[::-1][:self.to_average]
curr_drift = np.average([np.linalg.norm(d) for d in curr_drift])
VaspModder(vi=vi).apply_actions(actions)
return {"errors": "Excessive drift {} > {}".format(curr_drift, self.max_drift), "actions": actions}
class MeshSymmetryErrorHandler(ErrorHandler):
"""
Corrects the mesh symmetry error in VASP. This error is sometimes
non-fatal. So this error handler only checks at the end of the run,
and if the run has converged, no error is recorded.
"""
is_monitor = False
def __init__(self, output_filename="vasp.out",
output_vasprun="vasprun.xml"):
"""
Initializes the handler with the output files to check.
Args:
output_filename (str): This is the file where the stdout for vasp
is being redirected. The error messages that are checked are
present in the stdout. Defaults to "vasp.out", which is the
default redirect used by :class:`custodian.vasp.jobs.VaspJob`.
output_vasprun (str): Filename for the vasprun.xml file. Change
this only if it is different from the default (unlikely).
"""
self.output_filename = output_filename
self.output_vasprun = output_vasprun
def check(self):
msg = "Reciprocal lattice and k-lattice belong to different class of" \
" lattices."
vi = VaspInput.from_directory('.')
# According to VASP admins, you can disregard this error
# if symmetry is off
# Also disregard if automatic KPOINT generation is used
if (not vi["INCAR"].get('ISYM', True)) or \
vi[
"KPOINTS"].style == Kpoints.supported_modes.Automatic:
return False
try:
v = Vasprun(self.output_vasprun)
if v.converged:
return False
except:
pass
with open(self.output_filename, "r") as f:
for line in f:
l = line.strip()
if l.find(msg) != -1:
return True
return False
def correct(self):
backup(VASP_BACKUP_FILES | {self.output_filename})
vi = VaspInput.from_directory(".")
m = reduce(operator.mul, vi["KPOINTS"].kpts[0])
m = max(int(round(m ** (1 / 3))), 1)
if vi["KPOINTS"].style.name.lower().startswith("m"):
m += m % 2
actions = [{"dict": "KPOINTS",
"action": {"_set": {"kpoints": [[m] * 3]}}}]
VaspModder(vi=vi).apply_actions(actions)
return {"errors": ["mesh_symmetry"], "actions": actions}
class UnconvergedErrorHandler(ErrorHandler):
"""
Check if a run is converged.
"""
is_monitor = False
def __init__(self, output_filename="vasprun.xml"):
"""
Initializes the handler with the output file to check.
Args:
output_vasprun (str): Filename for the vasprun.xml file. Change
this only if it is different from the default (unlikely).
"""
self.output_filename = output_filename
def check(self):
try:
v = Vasprun(self.output_filename)
if not v.converged:
return True
except:
pass
return False
def correct(self):
v = Vasprun(self.output_filename)
actions = []
if not v.converged_electronic:
# Ladder from VeryFast to Fast to Fast to All
# These progressively switches to more stable but more
# expensive algorithms
algo = v.incar.get("ALGO", "Normal")
if algo == "VeryFast":
actions.append({"dict": "INCAR",
"action": {"_set": {"ALGO": "Fast"}}})
elif algo == "Fast":
actions.append({"dict": "INCAR",
"action": {"_set": {"ALGO": "Normal"}}})
elif algo == "Normal":
actions.append({"dict": "INCAR",
"action": {"_set": {"ALGO": "All"}}})
else:
# Try mixing as last resort
new_settings = {"ISTART": 1,
"ALGO": "Normal",
"NELMDL": -6,
"BMIX": 0.001,
"AMIX_MAG": 0.8,
"BMIX_MAG": 0.001}
if not all([v.incar.get(k, "") == val for k, val in new_settings.items()]):
actions.append({"dict": "INCAR",
"action": {"_set": new_settings}})
elif not v.converged_ionic:
# Just continue optimizing and let other handles fix ionic
# optimizer parameters
actions.append({"dict": "INCAR",
"action": {"_set": {"IBRION": 1}}})
actions.append({"file": "CONTCAR",
"action": {"_file_copy": {"dest": "POSCAR"}}})
if actions:
vi = VaspInput.from_directory(".")
backup(VASP_BACKUP_FILES)
VaspModder(vi=vi).apply_actions(actions)
return {"errors": ["Unconverged"], "actions": actions}
else:
# Unfixable error. Just return None for actions.
return {"errors": ["Unconverged"], "actions": None}
class MaxForceErrorHandler(ErrorHandler):
"""
Checks that the desired force convergence has been achieved. Otherwise
restarts the run with smaller EDIFF. (This is necessary since energy
and force convergence criteria cannot be set simultaneously)
"""
is_monitor = False
def __init__(self, output_filename="vasprun.xml",
max_force_threshold=0.25):
"""
Args:
input_filename (str): name of the vasp INCAR file
output_filename (str): name to look for the vasprun
max_force_threshold (float): Threshold for max force for
restarting the run. (typically should be set to the value
that the creator looks for)
"""
self.output_filename = output_filename
self.max_force_threshold = max_force_threshold
def check(self):
try:
v = Vasprun(self.output_filename)
forces = np.array(v.ionic_steps[-1]['forces'])
sdyn = v.final_structure.site_properties.get('selective_dynamics')
if sdyn:
forces[np.logical_not(sdyn)] = 0
max_force = max(np.linalg.norm(forces, axis=1))
if max_force > self.max_force_threshold and v.converged is True:
return True
except:
pass
return False
def correct(self):
backup(VASP_BACKUP_FILES | {self.output_filename})
vi = VaspInput.from_directory(".")
ediff = float(vi["INCAR"].get("EDIFF", 1e-4))
ediffg = float(vi["INCAR"].get("EDIFFG", ediff * 10))
actions = [{"file": "CONTCAR",
"action": {"_file_copy": {"dest": "POSCAR"}}},
{"dict": "INCAR",
"action": {"_set": {"EDIFFG": ediffg * 0.5}}}]
VaspModder(vi=vi).apply_actions(actions)
return {"errors": ["MaxForce"], "actions": actions}
class PotimErrorHandler(ErrorHandler):
"""
Check if a run has excessively large positive energy changes.
This is typically caused by too large a POTIM. Runs typically
end up crashing with some other error (e.g. BRMIX) as the geometry
gets progressively worse.
"""
is_monitor = True
def __init__(self, input_filename="POSCAR", output_filename="OSZICAR",
dE_threshold=1):
"""
Initializes the handler with the input and output files to check.
Args:
input_filename (str): This is the POSCAR file that the run
started from. Defaults to "POSCAR". Change
this only if it is different from the default (unlikely).
output_filename (str): This is the OSZICAR file. Change
this only if it is different from the default (unlikely).
dE_threshold (float): The threshold energy change. Defaults to 1eV.
"""
self.input_filename = input_filename
self.output_filename = output_filename
self.dE_threshold = dE_threshold
def check(self):
try:
oszicar = Oszicar(self.output_filename)
n = len(Poscar.from_file(self.input_filename).structure)
max_dE = max([s['dE'] for s in oszicar.ionic_steps[1:]]) / n
if max_dE > self.dE_threshold:
return True
except:
return False
def correct(self):
backup(VASP_BACKUP_FILES)
vi = VaspInput.from_directory(".")
potim = float(vi["INCAR"].get("POTIM", 0.5))
ibrion = int(vi["INCAR"].get("IBRION", 0))
if potim < 0.2 and ibrion != 3:
actions = [{"dict": "INCAR",
"action": {"_set": {"IBRION": 3,
"SMASS": 0.75}}}]
elif potim < 0.1:
actions = [{"dict": "INCAR",
"action": {"_set": {"SYMPREC": 1e-8}}}]
else:
actions = [{"dict": "INCAR",
"action": {"_set": {"POTIM": potim * 0.5}}}]
VaspModder(vi=vi).apply_actions(actions)
return {"errors": ["POTIM"], "actions": actions}
class FrozenJobErrorHandler(ErrorHandler):
"""
Detects an error when the output file has not been updated
in timeout seconds. Changes ALGO to Normal from Fast
"""
is_monitor = True
def __init__(self, output_filename="vasp.out", timeout=21600):
"""
Initializes the handler with the output file to check.
Args:
output_filename (str): This is the file where the stdout for vasp
is being redirected. The error messages that are checked are
present in the stdout. Defaults to "vasp.out", which is the
default redirect used by :class:`custodian.vasp.jobs.VaspJob`.
timeout (int): The time in seconds between checks where if there
is no activity on the output file, the run is considered
frozen. Defaults to 3600 seconds, i.e., 1 hour.
"""
self.output_filename = output_filename
self.timeout = timeout
def check(self):
st = os.stat(self.output_filename)
if time.time() - st.st_mtime > self.timeout:
return True
def correct(self):
backup(VASP_BACKUP_FILES | {self.output_filename})
vi = VaspInput.from_directory('.')
actions = []
if vi["INCAR"].get("ALGO", "Normal") == "Fast":
actions.append({"dict": "INCAR",
"action": {"_set": {"ALGO": "Normal"}}})
else:
actions.append({"dict": "INCAR",
"action": {"_set": {"SYMPREC": 1e-8}}})
VaspModder(vi=vi).apply_actions(actions)
return {"errors": ["Frozen job"], "actions": actions}
class NonConvergingErrorHandler(ErrorHandler):
"""
Check if a run is hitting the maximum number of electronic steps at the
last nionic_steps ionic steps (default=10). If so, change ALGO from Fast to
Normal or kill the job.
"""
is_monitor = True
def __init__(self, output_filename="OSZICAR", nionic_steps=10):
"""
Initializes the handler with the output file to check.
Args:
output_filename (str): This is the OSZICAR file. Change
this only if it is different from the default (unlikely).
nionic_steps (int): The threshold number of ionic steps that
needs to hit the maximum number of electronic steps for the
run to be considered non-converging.
"""
self.output_filename = output_filename
self.nionic_steps = nionic_steps
def check(self):
vi = VaspInput.from_directory(".")
nelm = vi["INCAR"].get("NELM", 60)
try:
oszicar = Oszicar(self.output_filename)
esteps = oszicar.electronic_steps
if len(esteps) > self.nionic_steps:
return all([len(e) == nelm
for e in esteps[-(self.nionic_steps + 1):-1]])
except:
pass
return False
def correct(self):
vi = VaspInput.from_directory(".")
algo = vi["INCAR"].get("ALGO", "Normal")
amix = vi["INCAR"].get("AMIX", 0.4)
bmix = vi["INCAR"].get("BMIX", 1.0)
amin = vi["INCAR"].get("AMIN", 0.1)
actions = []
# Ladder from VeryFast to Fast to Fast to All
# These progressively switches to more stable but more
# expensive algorithms
if algo == "VeryFast":
actions.append({"dict": "INCAR",
"action": {"_set": {"ALGO": "Fast"}}})
elif algo == "Fast":
actions.append({"dict": "INCAR",
"action": {"_set": {"ALGO": "Normal"}}})
elif algo == "Normal":
actions.append({"dict": "INCAR",
"action": {"_set": {"ALGO": "All"}}})
elif amix > 0.1 and bmix > 0.01:
# Try linear mixing
actions.append({"dict": "INCAR",
"action": {"_set": {"AMIX": 0.1, "BMIX": 0.01,
"ICHARG": 2}}})
elif bmix < 3.0 and amin > 0.01:
# Try increasing bmix
actions.append({"dict": "INCAR",
"action": {"_set": {"AMIN": 0.01, "BMIX": 3.0,
"ICHARG": 2}}})
if actions:
backup(VASP_BACKUP_FILES)
VaspModder(vi=vi).apply_actions(actions)
return {"errors": ["Non-converging job"], "actions": actions}
# Unfixable error. Just return None for actions.
else:
return {"errors": ["Non-converging job"], "actions": None}
class WalltimeHandler(ErrorHandler):
"""
Check if a run is nearing the walltime. If so, write a STOPCAR with
LSTOP or LABORT = .True.. You can specify the walltime either in the init (
which is unfortunately necessary for SGE and SLURM systems. If you happen
to be running on a PBS system and the PBS_WALLTIME variable is in the run
environment, the wall time will be automatically determined if not set.
"""
is_monitor = True
# The WalltimeHandler should not terminate as we want VASP to terminate
# itself naturally with the STOPCAR.
is_terminating = False
# This handler will be unrecoverable, but custodian shouldn't raise an
# error
raises_runtime_error = False
def __init__(self, wall_time=None, buffer_time=300,
electronic_step_stop=False):
"""
Initializes the handler with a buffer time.
Args:
wall_time (int): Total walltime in seconds. If this is None and
the job is running on a PBS system, the handler will attempt to
determine the walltime from the PBS_WALLTIME environment
variable. If the wall time cannot be determined or is not
set, this handler will have no effect.
buffer_time (int): The min amount of buffer time in secs at the
end that the STOPCAR will be written. The STOPCAR is written
when the time remaining is < the higher of 3 x the average
time for each ionic step and the buffer time. Defaults to
300 secs, which is the default polling time of Custodian.
This is typically sufficient for the current ionic step to
complete. But if other operations are being performed after
the run has stopped, the buffer time may need to be increased
accordingly.
electronic_step_stop (bool): Whether to check for electronic steps
instead of ionic steps (e.g. for static runs on large systems or
static HSE runs, ...). Be careful that results such as density
or wavefunctions might not be converged at the electronic level.
Should be used with LWAVE = .True. to be useful. If this is
True, the STOPCAR is written with LABORT = .TRUE. instead of
LSTOP = .TRUE.
"""
if wall_time is not None:
self.wall_time = wall_time
elif "PBS_WALLTIME" in os.environ:
self.wall_time = int(os.environ["PBS_WALLTIME"])
elif "SBATCH_TIMELIMIT" in os.environ:
self.wall_time = int(os.environ["SBATCH_TIMELIMIT"])
else:
self.wall_time = None
self.buffer_time = buffer_time
# Sets CUSTODIAN_WALLTIME_START as the start time to use for
# future jobs in the same batch environment. Can also be
# set manually be the user in the batch environment.
if "CUSTODIAN_WALLTIME_START" in os.environ:
self.start_time = datetime.datetime.strptime(
os.environ["CUSTODIAN_WALLTIME_START"], "%a %b %d %H:%M:%S %Z %Y")
else:
self.start_time = datetime.datetime.now()
os.environ["CUSTODIAN_WALLTIME_START"] = datetime.datetime.strftime(
self.start_time, "%a %b %d %H:%M:%S UTC %Y")
self.electronic_step_stop = electronic_step_stop
self.electronic_steps_timings = [0]
self.prev_check_time = self.start_time
def check(self):
if self.wall_time:
run_time = datetime.datetime.now() - self.start_time
total_secs = run_time.total_seconds()
outcar = Outcar("OUTCAR")
if not self.electronic_step_stop:
# Determine max time per ionic step.
outcar.read_pattern({"timings": "LOOP\+.+real time(.+)"},
postprocess=float)
time_per_step = np.max(outcar.data.get('timings')) if outcar.data.get("timings", []) else 0
else:
# Determine max time per electronic step.
outcar.read_pattern({"timings": "LOOP:.+real time(.+)"},
postprocess=float)
time_per_step = np.max(outcar.data.get('timings')) if outcar.data.get("timings", []) else 0
# If the remaining time is less than average time for 3
# steps or buffer_time.
time_left = self.wall_time - total_secs
if time_left < max(time_per_step * 3, self.buffer_time):
return True
return False
def correct(self):
content = "LSTOP = .TRUE." if not self.electronic_step_stop else \
"LABORT = .TRUE."
# Write STOPCAR
actions = [{"file": "STOPCAR",
"action": {"_file_create": {'content': content}}}]
m = Modder(actions=[FileActions])
for a in actions:
m.modify(a["action"], a["file"])
return {"errors": ["Walltime reached"], "actions": None}
@deprecated(replacement=WalltimeHandler)
class PBSWalltimeHandler(WalltimeHandler):
def __init__(self, buffer_time=300):
super(PBSWalltimeHandler, self).__init__(None, buffer_time=buffer_time)
class CheckpointHandler(ErrorHandler):
"""
This is not an error handler per se, but rather a checkpointer. What this
does is that every X seconds, a STOPCAR and CHKPT will be written. This
forces VASP to stop at the end of the next ionic step. The files are then
copied into a subdir, and then the job is restarted. To use this proper,
max_errors in Custodian must be set to a very high value, and you
probably wouldn't want to use any standard VASP error handlers. The
checkpoint will be stored in subdirs chk_#. This should be used in
combiantion with the StoppedRunHandler.
"""
is_monitor = True
# The CheckpointHandler should not terminate as we want VASP to terminate
# itself naturally with the STOPCAR.
is_terminating = False
def __init__(self, interval=3600):
"""
Initializes the handler with an interval.
Args:
interval (int): Interval at which to checkpoint in seconds.
Defaults to 3600 (1 hr).
"""
self.interval = interval
self.start_time = datetime.datetime.now()
self.chk_counter = 0
def check(self):
run_time = datetime.datetime.now() - self.start_time
total_secs = run_time.seconds + run_time.days * 3600 * 24
if total_secs > self.interval:
return True
return False
def correct(self):
content = "LSTOP = .TRUE."
chkpt_content = "Index: %d\nTime: \"%s\"" % (self.chk_counter,
datetime.datetime.now())
self.chk_counter += 1
# Write STOPCAR
actions = [{"file": "STOPCAR",
"action": {"_file_create": {'content': content}}},
{"file": "chkpt.yaml",
"action": {"_file_create": {'content': chkpt_content}}}]
m = Modder(actions=[FileActions])
for a in actions:
m.modify(a["action"], a["file"])
# Reset the clock.
self.start_time = datetime.datetime.now()
return {"errors": ["Checkpoint reached"], "actions": actions}
def __str__(self):
return "CheckpointHandler with interval %d" % self.interval
class StoppedRunHandler(ErrorHandler):
"""
This is not an error handler per se, but rather a checkpointer. What this
does is that every X seconds, a STOPCAR will be written. This forces VASP to
stop at the end of the next ionic step. The files are then copied into a
subdir, and then the job is restarted. To use this proper, max_errors in
Custodian must be set to a very high value, and you probably wouldn't
want to use any standard VASP error handlers. The checkpoint will be
stored in subdirs chk_#. This should be used in combination with the
StoppedRunHandler.
"""
is_monitor = False
# The CheckpointHandler should not terminate as we want VASP to terminate
# itself naturally with the STOPCAR.
is_terminating = False
def __init__(self):
pass
def check(self):
return os.path.exists("chkpt.yaml")
def correct(self):
d = loadfn("chkpt.yaml")
i = d["Index"]
name = shutil.make_archive(
os.path.join(os.getcwd(), "vasp.chk.%d" % i), "gztar")
actions = [{"file": "CONTCAR",
"action": {"_file_copy": {"dest": "POSCAR"}}}]
m = Modder(actions=[FileActions])
for a in actions:
m.modify(a["action"], a["file"])
actions.append({"Checkpoint": name})
return {"errors": ["Stopped run."],
"actions": actions}
class PositiveEnergyErrorHandler(ErrorHandler):
"""
Check if a run has positive absolute energy.
If so, change ALGO from Fast to Normal or kill the job.
"""
is_monitor = True
def __init__(self, output_filename="OSZICAR"):
"""
Initializes the handler with the output file to check.
Args:
output_filename (str): This is the OSZICAR file. Change
this only if it is different from the default (unlikely).
"""
self.output_filename = output_filename
def check(self):
try:
oszicar = Oszicar(self.output_filename)
if oszicar.final_energy > 0:
return True
except:
pass
return False
def correct(self):
# change ALGO = Fast to Normal if ALGO is !Normal
vi = VaspInput.from_directory(".")
algo = vi["INCAR"].get("ALGO", "Normal")
if algo.lower() not in ['normal', 'n']:
backup(VASP_BACKUP_FILES)
actions = [{"dict": "INCAR",
"action": {"_set": {"ALGO": "Normal"}}}]
VaspModder(vi=vi).apply_actions(actions)
return {"errors": ["Positive energy"], "actions": actions}
elif algo == "Normal":
potim = float(vi["INCAR"].get("POTIM", 0.5)) / 2.0
actions = [{"dict": "INCAR",
"action": {"_set": {"POTIM": potim}}}]
VaspModder(vi=vi).apply_actions(actions)
return {"errors": ["Positive energy"], "actions": actions}
# Unfixable error. Just return None for actions.
else:
return {"errors": ["Positive energy"], "actions": None}
|
specter119/custodian
|
custodian/vasp/handlers.py
|
Python
|
mit
| 54,519
|
[
"VASP",
"pymatgen"
] |
465bf2764da34d3fd4d2539c0f19ae5df39fce9ef83ef83883b7fc43dbcebccc
|
from compositecore import Leaf
class DataPoint(Leaf):
"""
Class for components holding a single data point.
"""
def __init__(self, component_type, value, tags=[]):
super(DataPoint, self).__init__()
self.tags |= set(tags)
self.component_type = component_type
self.value = value
class Flag(Leaf):
"""
Component which only has a component type. Composites with this component has this flag.
"""
def __init__(self, component_type):
super(Flag, self).__init__()
self.component_type = component_type
class DataPointBonusSpoof(Leaf):
"""
Defines a bonus value, if this is added to an entity as spoof.
The entity will get that bonus added to the normal value.
"""
def __init__(self, component_type, bonus_value):
super(DataPointBonusSpoof, self).__init__()
self.component_type = component_type
self.bonus_value = bonus_value
@property
def value(self):
return self.next.value + self.bonus_value
@value.setter
def value(self, new_value):
self.next.value = new_value
class Damage(Leaf):
"""
Holds min and max damage.
"""
def __init__(self, min, max):
super(Damage, self).__init__()
self.component_type = "damage_data_point"
self.min = min
self.max = max
class Class:
ROGUE = "Rogue"
KNIGHT = "Knight"
GUNSLINGER = "Gunslinger"
WITCH = "Witch"
TINKER = "Tinker"
class Races:
HUMAN = "Human"
RATMAN = "Ratman"
CYCLOPS = "Cyclops"
PIXIE = "Pixie"
class Tags:
DAMAGE_TYPE = "damage_type"
class DataTypes:
CLASS = "job"
RACE = "race"
ENERGY = "energy"
CRIT_MULTIPLIER = "crit_multiplier"
UNARMED_CRIT_CHANCE = "unarmed_crit_chance"
CRIT_CHANCE = "crit_chance"
CRIT_CHANCE_WEAPON = "crit_chance_weapon_effect"
STRENGTH = "strength"
ARMOR = "armor"
ACCURACY = "accuracy"
DAMAGE = "damage"
STEALTH = "stealth"
AWARENESS = "awareness"
EVASION = "evasion"
COUNTER_ATTACK_CHANCE = "counter_attack_chance"
OFFENCIVE_ATTACK_CHANCE = "offencive_attack_chance"
DEFENCIVE_ATTACK_CHANCE = "defencive_attack_chance"
MELEE_SPEED = "melee_speed"
SHOOT_SPEED = "shoot_speed"
THROW_SPEED = "throw_speed"
THROW_ITEM_SPEED = "throw_item_speed"
CAST_SPEED = "cast_speed"
MELEE_DAMAGE_MULTIPLIER = "melee_damage_multiplier"
THROW_DAMAGE_MULTIPLIER = "throw_damage_multiplier"
INTELLIGENCE = "intelligence"
GAME_PIECE_TYPE = "game_piece_type"
MOVEMENT_SPEED = "movement_speed"
FACTION = "faction"
WEIGHT = "weight"
WEAPON_RANGE = "weapon_range"
SIGHT_RADIUS = "sight_radius"
SKIP_ACTION_CHANCE = "skip_action_chance"
DENSITY = "density"
CLOUD_TYPE = "cloud_type"
CLONE_FUNCTION = "clone_function"
MINIMUM_DEPTH = "minimum_depth"
GAME_STATE = "game_state"
class Immunities(object):
SPIDER_WEB = "spider_web_immunity"
class IntelligenceLevel(DataPoint):
MINDLESS = 0
PLANT = 1
ANIMAL = 2
NORMAL = 3
HIGH = 4
class Factions(DataPoint):
PLAYER = 0
MONSTER = 1
class GamePieceTypes(DataPoint):
ENTITY = 0
CLOUD = 1
ITEM = 2
DUNGEON_FEATURE = 3
DUNGEON_TRASH = 4
TERRAIN = 5
MAX_INSTANCES_ON_TILE = {ENTITY: 1,
CLOUD: 1,
ITEM: 1,
DUNGEON_FEATURE: 1,
DUNGEON_TRASH: 1,
TERRAIN: 1}
def max_instances_of_composite_on_tile(composite):
return GamePieceTypes.MAX_INSTANCES_ON_TILE[composite.game_piece_type.value]
class UnArmedHitTargetEntityEffectFactory(DataPoint):
def __init__(self, effect_factory_function):
super(UnArmedHitTargetEntityEffectFactory, self).__init__("unarmed_hit_target_entity_effect_factory_" +
str(effect_factory_function), effect_factory_function)
self.tags.add("unarmed_hit_target_entity_effect_factory")
|
co/TheLastRogue
|
stats.py
|
Python
|
bsd-2-clause
| 4,107
|
[
"TINKER"
] |
62712b18fd7e8d67d70733046d13460093e8ea9bd9247741e9d2fadac2e61455
|
import csv
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
import matplotlib.dates as mdates
from dateutil.parser import parse
from datetime import datetime
from datetime import timedelta
# Python 2 and 3: easiest option
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import pytz
import codecs
from matplotlib.backends.backend_pdf import PdfPages
from scipy import optimize
from scipy import asarray as ar,exp
from scipy.integrate import quad
import pandas as pd
from pandas import DataFrame
#--------------------------------------------------------------------------#
# Fit Functions
#--------------------------------------------------------------------------#
def lbound(bound,par):
return 1e4*np.sqrt(bound-par) + 1e-3*(bound-par) if (par<bound) else 0
def ubound(bound,par):
return 1e4*np.sqrt(par-bound) + 1e-3*(par-bound) if (par>bound) else 0
def bound(bounds,par):
return lbound(bounds[0],par) + ubound(bounds[1],par)
def fixed(fix,par):
return bound((fix,fix), par)
def gaus(x,a,x0,sigma):
return a*exp(-(x-x0)**2/(2*sigma**2))+lbound(0,a)+lbound(0,sigma)+lbound(0,x0)
def expo(x,a,slope):
return a*exp(x*slope)
# p = [a1,mean,sigma,a2,shift,slope,const]
def gaus_plus_exp(x,p):
return gaus(x,p[0],p[1],p[2])+expo(x,p[3],p[4])
# p = [a1,mean,sigma,slope,const]
def gaus_plus_line(x,p):
return gaus(x,p[0],p[1],p[2])+p[3]*x+p[4]
def gaus_plus_const(x,p):
return gaus(x,p[0],p[1],p[2])+p[3]
def double_gaus_plus_exp(x,p):
return gaus(x,p[0],p[1],p[2])+gaus(x,p[3],p[4],p[5])+expo(x,p[6],p[7])
def double_gaus_plus_line(x,p):
return gaus(x,p[0],p[1],p[2])+gaus(x,p[3],p[4],p[5])+p[6]*x+p[7]
#--------------------------------------------------------------------------#
# Process input data
#--------------------------------------------------------------------------#
def make_int(lst):
#Makes all entries of a list an integer
y = []
for i in lst:
y.append(int(i))
return y
def make_array(lst,low=10,high=1032):
'''
Makes list into an array. Also splices out the irrelevant stuff
for a spectra. Set lower and upper bound of required Data for each isotope
from input CSV file.
'''
z = np.asarray(make_int(lst[low:high]))
return z
def get_times(rows, number, n=1):
'''
Get list of times for data: determines time as the midpoint between the upper and lower bounds in the integration window
Arguments:
- full list of inputs from data csv
- number of days to collect data over
- number of hours to integrate over
Returns:
- list of times
'''
entries = 600*n
days = (144/n)
i = 0
counter = 0
times = []
while i < number*days:
if counter < days:
time_range = []
integration = rows[(i*entries)+1:((i+1)*entries)+1]
for j in integration:
if len(j) > 1:
this_time = int(j[10])/1000.0
time_range.append(datetime.fromtimestamp(this_time))
if len(time_range) > 0:
times.append(time_range[int(len(time_range)/2)])
counter+=1
i+=1
else:
print('finished', i)
counter = 0
print('finished', i)
counter = 0
print(times)
return times
def double_peak_finder(array,lower,upper):
'''
Fits double gaussian + exponential to data within some window
- fit is applied only to data within the upper/lower channel
boundaries provided as inputs
Arguments:
- full array of data
- lower and upper channel values for the fit window
Returns:
- list of fit parameters and list of parameter errors
'''
points = ar(range(lower,upper))
peak = list(array[lower:upper])
counts = ar(peak)
# Initialize fit parameters based on rough estimates of mean,sigma,amp,etc.
# - mean estimated as center of fit window - set window accordingly
# - double gaussian means shifted slightly in each direction
# - gaussian amp and expo shift estimated based on counts at left edge
# - expo slope determined using fit window boundaries
nentries = len(points)
mean = lower + (upper - lower)/2.0
slope = 2*(np.log(counts[-1])-np.log(counts[0]))/(points[-1]-points[0])
pinit = [counts[0]/5.0,mean-2,5.0,counts[0]/5.0,mean+2,5.0,counts[0],slope]
# Currently using leastsq fit from scipy
# - see scipy documentation for more information
errfunc = lambda p, x, y: double_gaus_plus_exp(x,p) - y
pfit,pcov,infodict,errmsg,success = \
optimize.leastsq(errfunc, pinit, args=(points,counts), \
full_output=1, epsfcn=0.0001)
# Calculate fit parameter uncertainties using the covariance matrix
# and the (fit - data) variance
if (len(counts) > len(pinit)) and pcov is not None:
s_sq = (errfunc(pfit, points, counts)**2).sum()/(len(counts)-len(pinit))
pcov = pcov * s_sq
else:
pcov = 0
error = []
for i in range(len(pfit)):
try:
# This conditional is bad!!
# Artificially sets error to zero if it's too big - remove now!
if np.absolute(pcov[i][i])**0.5 > np.absolute(pfit[i]):
error.append( 0.00 )
else:
error.append(np.absolute(pcov[i][i])**0.5)
except:
error.append( 0.00 )
pfit_leastsq = pfit
perr_leastsq = np.array(error)
return pfit_leastsq, perr_leastsq
def peak_finder(array,lower,upper,count_offset):
'''
Fits gaussian + exponential to data within some window
- fit is applied only to data within the upper/lower channel
boundaries provided as inputs
Arguments:
- full array of data
- lower and upper channel values for the fit window
- count_offset used to correct exponential fit parameter for the fact that the fit is not starting at the left edge of the spectrum
Returns:
- list of fit parameters and list of parameter errors
'''
points = ar(range(lower,upper))
peak = list(array[lower:upper])
counts = ar(peak)
# Initialize fit parameters based on rough estimates of mean,sigma,amp,etc.
# - mean estimated as center of fit window - set window accordingly
# - gaussian amp and expo shift estimated based on counts at left edge
# - expo slope determined using fit window boundaries
nentries = len(points)
mean = lower + (upper - lower)/2.0
slope = 2*(np.log(counts[-1])-np.log(counts[0]))/(points[-1]-points[0])
pinit = [counts[0],mean,5.0,counts[0]*count_offset,slope]
#print('Initial parameters: amp = {0}, mean = {1}, sigma = {2}, amp2 = {3}'.format(pinit[0],pinit[1],pinit[2],pinit[3]))
# Currently using leastsq fit from scipy
# - see scipy documentation for more information
errfunc = lambda p, x, y: gaus_plus_exp(x,p)-y
pfit,pcov,infodict,errmsg,success = \
optimize.leastsq(errfunc, pinit, args=(points,counts), \
full_output=1, epsfcn=0.0001)
#print('after parameters: amp= {0}, mean ={1}, sigma = {2}, amp2 = {3}'.format(pfit[0],pfit[1],pfit[2],pfit[3]))
# Calculate fit parameter uncertainties using the covariance matrix
# and the (fit - data) variance
if (len(counts) > len(pinit)) and pcov is not None:
s_sq = (errfunc(pfit, points, counts)**2).sum()/(len(counts)-len(pinit))
pcov = pcov * s_sq
else:
pcov = 0
error = []
for i in range(len(pfit)):
try:
error.append(np.absolute(pcov[i][i])**0.5)
except:
error.append( 0.00 )
pfit_leastsq = pfit
perr_leastsq = np.array(error)
return pfit_leastsq, perr_leastsq
def get_double_peaks(rows, number, n=1, lower_limit=480, upper_limit=600, make_plot = False):
'''
Applies double gaussian + expo fits to all data over some range of time
Arguments:
- full list of csv data input rows
- number of days to run over
- number of hours to integrate each calculation over
- lower,upper limits for fit windows
- flag to plot each fit for diagnostics
Returns:
- list of means,sigmas,amps for second gaussian in fit
- that's the Bi peak, so this is hard coded to work for a specific case
- each entry in list includes the value and uncertainty
'''
entries = 12*n
days = (24/n)
i = 0
counter = 0
means = []
sigmas = []
amps = []
while i < number*days:
if counter < days:
integration = rows[(i*entries)+1:((i+1)*entries)+1]
array_lst = []
for j in integration:
array_lst.append(make_array(j,12))
integrated = sum(array_lst)
#print integrated
fit_pars, fit_errs = double_peak_finder(integrated,lower_limit,upper_limit)
mean = [fit_pars[1],fit_errs[1]]
sigma = [fit_pars[2],fit_errs[2]]
amp = [fit_pars[0],fit_errs[0]]
if fit_pars[4] > fit_pars[1]:
mean = [fit_pars[4],fit_errs[4]]
sigma = [fit_pars[5],fit_errs[5]]
amp = [fit_pars[3],fit_errs[3]]
means.append(mean)
sigmas.append(sigma)
amps.append(amp)
counter+=1
i+=1
if make_plot:
fig = plt.figure()
fig.patch.set_facecolor('white')
plt.title('Spectra integrated over a day')
plt.xlabel('channels')
plt.ylabel('counts')
plt.xlim(1,1000)
x = ar(range(0,len(integrated)))
plt.plot(x,integrated,'b:',label='data')
plt.plot(x,double_gaus_plus_exp(x,fit_pars),'ro:',label='fit')
plt.legend()
plt.yscale('log')
plt.show()
else:
counter = 0
counter = 0
return means, sigmas, amps
def get_peaks(rows, number=1, n=1, lower_limit=480, upper_limit=600, make_plot = False,count_offset=100):
'''
Applies double gaussian + expo fits to all data over some range of time
Arguments:
- full list of csv data input rows
- number of days to run over
- number of hours to integrate each calculation over
- lower,upper limits for fit windows
- flag to plot each fit for diagnostics
- count offset correction to fit parameters based on peak position
(peaks farther from the left edge of spectrum need bigger correction)
Returns:
- lists of means,sigmas,amps from all gaussian fits
- each entry in list includes the value and uncertainty
'''
entries = 600*n
days = (144/n)
print('making {} plots for each day'.format(days))
i = 0
counter = 0
means = []
sigmas = []
amps = []
while i < number*days:
if counter < days:
integration = rows[(i*entries)+1:((i+1)*entries)+1]
array_lst = []
for j in integration:
try:
array_lst.append(make_array(j,12))
except:
print('Error processing {} as a list'.format(j))
pass
integrated = sum(array_lst)
#print integrated
try:
fit_pars,fit_errs = peak_finder(integrated,lower_limit,upper_limit,count_offset)
except:
print('Error fitting {}'.format(integrated))
pass
means.append([fit_pars[1],fit_errs[1]])
sigmas.append([fit_pars[2],fit_errs[2]])
amps.append([fit_pars[0],fit_errs[0]])
counter +=1
i+=1
if make_plot:
fig = plt.figure()
fig.patch.set_facecolor('white')
plt.title('Spectra integrated over a day')
plt.xlabel('channels')
plt.ylabel('counts')
plt.xlim(1,1000)
#plt.ylim()
x = ar(range(0,len(integrated)))
plt.plot(x,integrated,'b:',label='data')
plt.plot(x,gaus_plus_exp(x,fit_pars),'ro:',label='fit')
plt.legend()
plt.yscale('log')
plt.show()
else:
counter = 0
counter = 0
return means,sigmas,amps
def get_peaks2(rows, number=1, n=1, lower_limit=900, upper_limit=1020, make_plot = False,count_offset=100):
'''
This is for Tl-208
Applies gaussian + const fits to all data over some range of time
Arguments:
- full list of csv data input rows
- number of days to run over
- number of hours to integrate each calculation over
- lower,upper limits for fit windows
- flag to plot each fit for diagnostics
- count offset correction to fit parameters based on peak position
(peaks farther from the left edge of spectrum need bigger correction)
Returns:
- lists of means,sigmas,amps from all gaussian fits
- each entry in list includes the value and uncertainty
'''
entries = 12*n
days = (24/n)
print('making {} plots for each day'.format(days))
i = 0
counter = 0
means = []
sigmas = []
amps = []
while i < number*days:
if counter < days:
integration = rows[(i*entries)+1:((i+1)*entries)+1]
array_lst = []
for j in integration:
try:
array_lst.append(make_array(j,12))
except:
print('Error processing {} as a list'.format(j))
pass
integrated = sum(array_lst)
#print integrated
fit_pars,fit_errs = peak_finder(integrated,lower_limit,upper_limit,count_offset)
means.append([fit_pars[1],fit_errs[1]])
sigmas.append([fit_pars[2],fit_errs[2]])
amps.append([fit_pars[0],fit_errs[0]])
counter +=1
i+=1
if make_plot:
fig = plt.figure()
fig.patch.set_facecolor('white')
plt.title('Spectra integrated over a day')
plt.xlabel('channels')
plt.ylabel('counts')
plt.xlim(1,1000)
#plt.ylim()
x = ar(range(0,len(integrated)))
plt.plot(x,integrated,'b:',label='data')
plt.plot(x,gaus_plus_const(x,fit_pars),'ro:',label='fit')
plt.legend()
plt.yscale('log')
plt.show()
else:
counter = 0
counter = 0
return means,sigmas,amps
#--------------------------------------------------------------------------#
# Methods for performing calculations on fit results
#--------------------------------------------------------------------------#
def get_mean(values):
'''
Calculate the mean and sigma for some input array of data
'''
mean = 0
var = 0
for i in range(len(values)):
if values[i] > 1:
mean += values[i]
mean = mean/len(values)
for i in range(len(values)):
if values[i] > 1:
var += (mean - values[i])**2
np.sum(values)/len(values)
var = np.sqrt(var/len(values))
return mean, var
def get_peak_counts(means,sigmas,amps):
'''
Calculate the area under a gaussian curve (estimate of counts in that peak)
Arguments:
- list of guassian means
- list of guassian widths
- list of gaussian amplitudes
Returns:
- list of counts from resulting gaussian integrations
'''
counts = []
for i in range(len(means)):
count,err = quad(gaus,0,1000,args=(amps[i],means[i],sigmas[i]))
counts.append(count)
return counts
def get_calibration(rows,ndays):
'''
Specific method for getting the data calibration assuming Bi-214 is part
of a double peak and fitting data integrated over a day not an hour
Returns a single calibration constant
'''
Bi_peaks, Bi_sigmas, Bi_amps = get_double_peaks(rows,ndays,24,240,320,True)
K_peaks,K_errs = get_peaks(rows,ndays,24,440,640)
Tl_peaks,Tl_errs = get_peaks2(rows,ndays,24,900,1020)
print(Bi_peaks)
print(K_peaks)
print(Tl_peaks)
Bi_mean, Bi_var = get_mean(np.asarray(Bi_peaks))
K_mean, K_var = get_mean(np.asarray(K_peaks))
Tl_mean, Tl_var = get_mean(np.asarray(Tl_peaks))
print('bizmuth peak channel = {}, potassium peak channel = {}, thallium peak channel= {}'.format(Bi_mean,K_mean,Tl_mean))
calibration_constant = (1460-609)/(K_mean - Bi_mean)
print('keV/channel = {}'.format(calibration_constant))
return calibration_constant
def spectrum_peaks_plotter(rows):
'''
This method intergrates the input data from the CSV file, and make an estimated
plot for each isotope peak, based on number of channels and the corresponding
counts of each isotope
'''
n=4
entries = 12*n
integration = rows[1:entries+1]
array_lst = []
for j in integration:
array_lst.append(make_array(j,160,320))
integrated = sum(array_lst)
Channels = range(0,len(integrated))
Counts = integrated
plt.plot(Channels, Counts)
plt.xlabel('Channels')
plt.ylabel('Counts')
plt.title('Bi-Peaks Identifier ')
plt.show()
integration_1 = rows[1:entries+1]
array_lst_1 = []
for i in integration_1:
array_lst_1.append(make_array(i,540,640))
integrated_1 = sum(array_lst_1)
Channels_1 = range(0,len(integrated_1))
Counts_1 = integrated_1
plt.plot(Channels_1, Counts_1)
plt.xlabel('Channels')
plt.ylabel('Counts')
plt.title('K-Peak Identifier')
plt.show()
integration_2 = rows[1:entries+1]
array_lst_2 = []
for j in integration_2:
array_lst_2.append(make_array(j,800,1022))
integrated_2 = sum(array_lst_2)
Channels_2 = range(0,len(integrated_2))
Counts_2 = integrated_2
plt.plot(Channels_2, Counts_2)
plt.xlabel('Channels')
plt.ylabel('Counts')
plt.title('Tl-Peak Identifier')
plt.show()
if __name__ == '__main__':
# import data from PERM station for all isotopes
#PATH1 = '/Users/alihanks/Google Drive/NQUAKE_analysis/PERM/PERM_data/lbnl_sensor_60.csv'
PATH1 = '/Users/alihanks/Google Drive/NQUAKE_analysis/PERM/PERM_data/lbnl_sensor_60_aug.csv'
with open(PATH1) as f:
reader = csv.reader(f)
rows = [r for r in reader]
date = []
cpm = []
cpm_error = []
line = 0
#---------------------------------------------------------------------#
# Get fit results for ndays integrating over nhours for each fit
#---------------------------------------------------------------------#
ndays = 7
nhours = 2
times = get_times(rows,ndays,nhours)
K_peaks, K_sigmas, K_amps = get_peaks(rows,ndays,nhours,540,640)
Bi_peaks,Bi_sigmas,Bi_amps = get_double_peaks(rows,ndays,nhours,160,320)
Bi_peaks,Bi_sigmas,Bi_amps = get_peaks(rows,ndays,nhours,164,324,False,1)
Tl_peaks, Tl_sigmas, Tl_amps = get_peaks2(rows,ndays,nhours,900,1000)
#-------------------------------------------------------------------------#
# Break apart mean,sigma,amp values and uncertainties
#-------------------------------------------------------------------------#
K_ch = np.asarray([i[0] for i in K_peaks])
K_ch_errs = np.asarray([i[1] for i in K_peaks])
K_sig = [i[0] for i in K_sigmas]
K_A = [i[0] for i in K_amps]
Bi_ch = np.asarray([i[0] for i in Bi_peaks])
Bi_ch_errs = np.asarray([i[1] for i in Bi_peaks])
Bi_sig = [i[0] for i in Bi_sigmas]
Bi_A = [i[0] for i in Bi_amps]
Tl_ch = np.asarray([i[0] for i in Tl_peaks])
Tl_ch_errs = np.asarray([i[1] for i in Tl_peaks])
Tl_sig = [i[0] for i in Tl_sigmas]
Tl_A = [i[0] for i in Tl_amps]
K_ch_ave = np.mean(K_ch)
K_ch_var = np.sqrt(np.var(K_ch))
B_ch_ave = np.mean(Bi_ch)
B_ch_var = np.sqrt(np.var(Bi_ch))
Tl_ch_ave = np.mean(Tl_ch)
Tl_ch_var = np.sqrt(np.var(Tl_ch))
print('K-40 <channel> = {} +/- {}'.format(K_ch_ave,K_ch_var))
print('Bi-214 <channel> = {} +/- {}'.format(B_ch_ave,B_ch_var))
print('Tl-208 <channel> = {} +/- {}'.format(Tl_ch_ave,Tl_ch_var))
for i in range(len(K_ch)):
if abs(K_ch[i]-K_ch_ave) > 3*K_ch_var:
print('Bad K-40 fit: peak channel = {}'.format(K_ch[i]))
if abs(Bi_ch[i]-B_ch_ave) > 3*B_ch_var:
print('Bad Bi-214 fit: peak channel = {}'.format(Bi_ch[i]))
#-------------------------------------------------------------------------#
# Get arrays of counts inside K-40, Bi-214,and Tl-208 peaks using fit results
#-------------------------------------------------------------------------#
K_counts = get_peak_counts(K_ch,K_sig,K_A)
Bi_counts = get_peak_counts(Bi_ch,Bi_sig,Bi_A)
Tl_counts= get_peak_counts(Tl_ch,Tl_sig,Tl_A)
#-------------------------------------------------------------------------#
# Get array of calibration constants from resulting K-40 and Bi-214 means
#-------------------------------------------------------------------------#
calibs = (1460-609)/(K_ch - Bi_ch)
calib_err = (1460-609)/(K_ch - Bi_ch)**2 \
*np.sqrt(Bi_ch_errs**2 + K_ch_errs**2)
#-------------------------------------------------------------------------#
# Plots of everything we are interested in!
#-------------------------------------------------------------------------#
fig, ax = plt.subplots()
fig.patch.set_facecolor('white')
plt.title('K-40 counts vs Time')
plt.xlabel('Time')
plt.ylabel('counts')
plt.ylim(0,1600)
ax.plot(times,K_counts, 'ro')
ax.errorbar(times,K_counts,yerr=np.sqrt(K_counts),fmt='ro',ecolor='r')
fig.autofmt_xdate()
fig, ax = plt.subplots()
fig.patch.set_facecolor('white')
plt.title('Bi-214 counts vs Time')
plt.xlabel('Time')
plt.ylabel('counts')
ax.plot(times,Bi_counts, 'ro')
ax.errorbar(times,Bi_counts,yerr=np.sqrt(Bi_counts),fmt='ro',ecolor='r')
fig.autofmt_xdate()
fig, ax = plt.subplots()
fig.patch.set_facecolor('white')
plt.title('1460 Center channel vs Time')
plt.xlabel('Time')
plt.ylabel('1460 center channel')
ax.plot(times,K_ch, 'ro')
ax.errorbar(times,K_ch,yerr=K_ch_errs,fmt='ro',ecolor='r')
fig.autofmt_xdate()
fig,ax=plt.subplots()
fig.patch.set_facecolor('white')
plt.title('Tl-208 count vs Time')
plt.xlabel('Time')
plt.ylabel('counts')
plt.ylim(0,1000)
ax.plot(times,Tl_counts, 'ro')
ax.errorbar(times,Tl_counts,yerr=np.sqrt(Tl_counts),fmt='ro',ecolor='r')
fig.autofmt_xdate()
fig, ax = plt.subplots()
fig.patch.set_facecolor('white')
plt.title('609 Center channel vs Time')
plt.xlabel('Time')
plt.ylabel('609 center channel')
plt.ylim(B_ch_ave-10*B_ch_var,B_ch_ave+10*B_ch_var)
ax.plot(times,Bi_ch, 'ro')
ax.errorbar(times,Bi_ch,yerr=Bi_ch_errs,fmt='ro',ecolor='r')
fig.autofmt_xdate()
fig, ax = plt.subplots()
fig.patch.set_facecolor('white')
plt.title('keV/channel vs Time')
plt.xlabel('Time')
plt.ylabel('keV/channel')
#plt.ylim(4.9,5.15)
#plt.ylim(4.6,6.0)
ax.plot(times,calibs, 'bo')
ax.errorbar(times,calibs,yerr=calib_err,fmt='bo',ecolor='b')
fig.autofmt_xdate()
# Finally: interested in how much the count rates vary for the two isotopes
Bi_mean, Bi_var = get_mean(np.asarray(Bi_counts))
print('Bi-214 <N> = {} +/- {}'.format(Bi_mean,Bi_var))
K_mean, K_var = get_mean(np.asarray(K_counts))
print('K-40 <N> = {} +/- {}'.format(K_mean,K_var))
Tl_mean, Tl_var = get_mean(np.asarray(Tl_counts))
print('Tl-208 <N> = {} +/- {}'.format(Tl_mean,Tl_var))
#Plotting the the three Isotopes on same plot
fig=plt.figure()
#plt.plot_date(times,K_counts,'bo',label='k-40')
plt.errorbar(times,K_counts,yerr=np.sqrt(K_counts),fmt='bo',ecolor='b',label='K-40')
#plt.plot_date(times,Bi_counts,'ro',label='Bi-214')
plt.errorbar(times,Bi_counts,yerr=np.sqrt(Bi_counts),fmt='ro',ecolor='r',label='Bi-214')
#plt.plot_date(times,Tl_counts,'ko',label='Tl-208')
plt.errorbar(times,Tl_counts,yerr=np.sqrt(Tl_counts),fmt='ko',ecolor='y',label='Tl-208')
plt.ylim(0,1800)
plt.xlabel('Time')
plt.ylabel('counts')
plt.title('K-40,Bi-214,Tl-208 counts vs Time')
#plt.legend(bbox_to_anchor=(1.2, 0.05))
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.02),
ncol=3, fancybox=True, shadow=False,numpoints=1)
fig.autofmt_xdate()
# Show all plots - add autosave?
plt.show()
peaksplot= spectrum_peaks_plotter(rows)
|
bearing/dosenet-analysis
|
D3S_analysis/spectra_fitter.py
|
Python
|
mit
| 25,763
|
[
"Gaussian"
] |
12587b4d660d30bcad05c27d3461945a933d9aeb715c016b43e76851b005dd4d
|
#!BPY
"""
Name: 'ASCII Scene (.ase) v0.16'
Blender: 249
Group: 'Import'
Tooltip: 'ASCII Scene import (*.ase)'
"""
__author__ = "Goofos & Plagman"
__version__ = "0.16"
# goofos at epruegel.de
#
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
import string, time, sys as osSys
import Blender
from Blender import Draw, Mesh, Window, Object, Scene, NMesh, Key, Ipo, IpoCurve
#import meshtools
def read_main(filename):
global counts
counts = {'verts': 0, 'tris': 0}
start = time.clock()
file = open(filename, "r")
print_boxed("----------------start-----------------")
print 'Import Patch: ', filename
editmode = Window.EditMode() # are we in edit mode? If so ...
if editmode: Window.EditMode(0) # leave edit mode before getting the mesh
lines= file.readlines()
read_file(file, lines)
Blender.Window.DrawProgressBar(1.0, '') # clear progressbar
file.close()
print "----------------end-----------------"
end = time.clock()
seconds = " in %.2f %s" % (end-start, "seconds")
totals = "Verts: %i Tris: %i " % (counts['verts'], counts['tris'])
print_boxed(totals)
message = "Successfully imported " + Blender.sys.basename(filename) + seconds
#meshtools.print_boxed(message)
print_boxed(message)
def print_boxed(text): #Copy/Paste from meshtools, only to remove the beep :)
lines = text.splitlines()
maxlinelen = max(map(len, lines))
if osSys.platform[:3] == "win":
print chr(218)+chr(196) + chr(196)*maxlinelen + chr(196)+chr(191)
for line in lines:
print chr(179) + ' ' + line.ljust(maxlinelen) + ' ' + chr(179)
print chr(192)+chr(196) + chr(196)*maxlinelen + chr(196)+chr(217)
else:
print '+-' + '-'*maxlinelen + '-+'
for line in lines: print '| ' + line.ljust(maxlinelen) + ' |'
print '+-' + '-'*maxlinelen + '-+'
#print '\a\r', # beep when done
class ase_obj:
def __init__(self):
self.name = 'Name'
self.objType = None
self.row0x = None
self.row0y = None
self.row0z = None
self.row1x = None
self.row1y = None
self.row1z = None
self.row2x = None
self.row2y = None
self.row2z = None
self.row3x = None
self.row3y = None
self.row3z = None
self.parent = None
self.obj = None
self.objName = 'Name'
class ase_mesh:
def __init__(self):
self.name = ''
self.vCount = 0
self.fCount = 0
self.frames = []
self.verts = []
self.faces = []
self.animated = 0
self.frameCount = -1
class mesh_vert:
def __init__(self):
self.x = 0.0
self.y = 0.0
self.z = 0.0
self.u = 0.0
self.v = 0.0
self.nx = 0.0
self.ny = 0.0
self.nz = 0.0
self.origi = 0
def make_tuple(self):
return (self.x, self.y, self.z, self.u, self.v, self.nx, self.ny, self.nz)
class mesh_face:
def __init__(self):
self.v1 = mesh_vert()
self.v2 = mesh_vert()
self.v3 = mesh_vert()
self.i1 = 0
self.i2 = 0
self.i3 = 0
def read_file(file, lines):
objects = []
objIdx = 0
objCheck = -1 #needed to skip helper objects
PBidx = 0.0
lineCount = float(len(lines))
processed_indices = []
curFaceID = 0
faceVertID = 0
print 'Read file'
Blender.Window.DrawProgressBar(0.0, "Read File...")
for line in lines:
words = string.split(line)
if (PBidx % 10000) == 0.0:
Blender.Window.DrawProgressBar(PBidx / lineCount, "Read File...")
if not words:
continue
elif objIdx > 0 and me.animated == 1:
# I don't know how to make empty statements, this is to skip everything else
me.animated = me.animated
elif words[0] == '*GEOMOBJECT':
objCheck = 0
newObj = ase_obj()
objects.append(newObj)
obj = objects[objIdx]
objIdx += 1
obj.objType = 'Mesh'
obj.obj = ase_mesh()
me = obj.obj
elif words[0] == '*NODE_NAME' and objCheck != -1:
if objCheck == 0:
obj.name = words[1]
objCheck = 1
elif objCheck == 1:
obj.objName = words[1]
elif words[0] == '*TM_ROW0' and objCheck != -1:
obj.row0x = float(words[1])
obj.row0y = float(words[2])
obj.row0z = float(words[3])
elif words[0] == '*TM_ROW1' and objCheck != -1:
obj.row1x = float(words[1])
obj.row1y = float(words[2])
obj.row1z = float(words[3])
elif words[0] == '*TM_ROW2' and objCheck != -1:
obj.row2x = float(words[1])
obj.row2y = float(words[2])
obj.row2z = float(words[3])
elif words[0] == '*TM_ROW3' and objCheck != -1:
obj.row3x = float(words[1])
obj.row3y = float(words[2])
obj.row3z = float(words[3])
objCheck = -1
elif words[0] == '*MESH_NUMVERTEX':
me.vCount = int(words[1])
for i in range(me.vCount):
me.verts.append(mesh_vert())
elif words[0] == '*MESH_NUMFACES':
me.fCount = int(words[1])
for i in range(me.fCount):
me.faces.append(mesh_face())
elif words[0] == '*MESH_VERTEX':
i = int(words[1])
me.verts[i].x = float(words[2]);
me.verts[i].y = float(words[3]);
me.verts[i].z = float(words[4]);
elif words[0] == '*MESH_FACE':
i = int(words[1].rstrip(":")) # looks like "13:"
v1 = int(words[3]);
v2 = int(words[5]);
v3 = int(words[7]);
me.faces[i].v1.x = me.verts[v1].x;
me.faces[i].v1.y = me.verts[v1].y;
me.faces[i].v1.z = me.verts[v1].z;
me.faces[i].v1.origi = v1
me.faces[i].v2.x = me.verts[v2].x;
me.faces[i].v2.y = me.verts[v2].y;
me.faces[i].v2.z = me.verts[v2].z;
me.faces[i].v2.origi = v2
me.faces[i].v3.x = me.verts[v3].x;
me.faces[i].v3.y = me.verts[v3].y;
me.faces[i].v3.z = me.verts[v3].z;
me.faces[i].v3.origi = v3
elif words[0] == '*MESH_NUMTVERTEX':
del me.verts[:]
uvCount = int(words[1])
for i in range(uvCount):
me.verts.append(mesh_vert())
elif words[0] == '*MESH_TVERT':
i = int(words[1])
me.verts[i].u = float(words[2]);
me.verts[i].v = float(words[3]);
elif words[0] == '*MESH_TFACE':
i = int(words[1])
uv1 = int(words[2]);
uv2 = int(words[3]);
uv3 = int(words[4]);
me.faces[i].v1.u = me.verts[uv1].u;
me.faces[i].v1.v = me.verts[uv1].v;
me.faces[i].v2.u = me.verts[uv2].u;
me.faces[i].v2.v = me.verts[uv2].v;
me.faces[i].v3.u = me.verts[uv3].u;
me.faces[i].v3.v = me.verts[uv3].v;
elif words[0] == '*MESH_FACENORMAL':
curFaceID = int(words[1]) # global, vertexnormal needs this
faceVertID = 0 # same
elif words[0] == '*MESH_VERTEXNORMAL':
nx = float(words[2])
ny = float(words[3])
nz = float(words[4])
if (faceVertID == 0):
me.faces[curFaceID].v1.nx = nx;
me.faces[curFaceID].v1.ny = ny;
me.faces[curFaceID].v1.nz = nz;
elif (faceVertID == 1):
me.faces[curFaceID].v2.nx = nx;
me.faces[curFaceID].v2.ny = ny;
me.faces[curFaceID].v2.nz = nz;
elif (faceVertID == 2):
me.faces[curFaceID].v3.nx = nx;
me.faces[curFaceID].v3.ny = ny;
me.faces[curFaceID].v3.nz = nz;
faceVertID = faceVertID + 1;
elif words[0] == '*MESH_ANIMATION':
me.animated = 1
# now the loop for animation frames
if objIdx > 0 and me.animated == 1:
if words[0] == '*MESH_VERTEX_LIST':
me.frameCount += 1
me.frames.append([])
elif words[0] == '*MESH_VERTEX':
me.frames[me.frameCount].append(mesh_vert())
i = int(words[1])
me.frames[me.frameCount][i].x = float(words[2]);
me.frames[me.frameCount][i].y = float(words[3]);
me.frames[me.frameCount][i].z = float(words[4]);
PBidx += 1.0
spawn_main(objects)
Blender.Redraw()
def spawn_main(objects):
PBidx = 0.0
objCount = float(len(objects))
print 'Import Objects'
Blender.Window.DrawProgressBar(0.0, "Importing Objects...")
for obj in objects:
Blender.Window.DrawProgressBar(PBidx / objCount, "Importing Objects...")
if obj.objType == 'Mesh':
spawn_mesh(obj)
PBidx += 1.0
import random
def spawn_mesh(obj):
objMe = obj.obj
#normal_flag = 1
row0 = obj.row0x, obj.row0y, obj.row0z
row1 = obj.row1x, obj.row1y, obj.row1z
row2 = obj.row2x, obj.row2y, obj.row2z
row3 = obj.row3x, obj.row3y, obj.row3z
newMatrix = Blender.Mathutils.Matrix(row0, row1, row2, row3)
newMatrix.resize4x4()
newObj = Blender.Object.New(obj.objType, obj.name)
newObj.setMatrix(newMatrix)
Blender.Scene.getCurrent().link(newObj)
newMesh = Blender.Mesh.New(obj.objName)
newMesh.getFromObject(newObj.name)
newMesh.vertexUV = 1
newObj.link(newMesh)
del objMe.verts[:]
objMe.vCount = 0
vertDict = {}
#for face in objMe.faces:
#objMe.verts.append(face.v1)
#objMe.verts.append(face.v2)
#objMe.verts.append(face.v3)
#face.i1 = objMe.vCount
#objMe.vCount = objMe.vCount + 1
#face.i2 = objMe.vCount
#objMe.vCount = objMe.vCount + 1
#face.i3 = objMe.vCount
#objMe.vCount = objMe.vCount + 1
for face in objMe.faces:
if not face.v1.make_tuple() in vertDict:
vertDict[face.v1.make_tuple()] = objMe.vCount
objMe.verts.append(face.v1)
objMe.vCount = objMe.vCount + 1
if not face.v2.make_tuple() in vertDict:
vertDict[face.v2.make_tuple()] = objMe.vCount
objMe.verts.append(face.v2)
objMe.vCount = objMe.vCount + 1
if not face.v3.make_tuple() in vertDict:
vertDict[face.v3.make_tuple()] = objMe.vCount
objMe.verts.append(face.v3)
objMe.vCount = objMe.vCount + 1
face.i1 = vertDict[face.v1.make_tuple()]
face.i2 = vertDict[face.v2.make_tuple()]
face.i3 = vertDict[face.v3.make_tuple()]
# Verts
for i in range(objMe.vCount):
xyz = Blender.Mathutils.Vector(objMe.verts[i].x, objMe.verts[i].y, objMe.verts[i].z)
newMesh.verts.extend(xyz)
for i in range(objMe.vCount):
xyz = Blender.Mathutils.Vector(objMe.verts[i].x, objMe.verts[i].y, objMe.verts[i].z)
uv = Blender.Mathutils.Vector(objMe.verts[i].u, objMe.verts[i].v)
norm = Blender.Mathutils.Vector(objMe.verts[i].nx, objMe.verts[i].ny, objMe.verts[i].nz)
newMesh.verts[i].co = xyz;
newMesh.verts[i].uvco = uv;
newMesh.verts[i].no = norm;
if objMe.animated:
objMe.frameCount -= 1 # do we always get an extra frame at the end?
for frame in objMe.frames:
for i in range(objMe.vCount):
xyz = Blender.Mathutils.Vector(frame[objMe.verts[i].origi].x, frame[objMe.verts[i].origi].y, frame[objMe.verts[i].origi].z)
newMesh.verts[i].co = xyz;
newObj.insertShapeKey()
for key in Key.Get() :
key.ipo = Ipo.New('Key', "bleh" + "_ipo")
index = 1
for curveName in key.ipo.curveConsts :
# print curveName
key.ipo.addCurve(curveName)
key.ipo[curveName].interpolation = IpoCurve.InterpTypes.CONST
key.ipo[curveName].addBezier((0, 0))
key.ipo[curveName].addBezier((index, 1))
key.ipo[curveName].addBezier((index + 1, 0))
index+=1
# Faces
for i in range(objMe.fCount):
face = [objMe.faces[i].i1, objMe.faces[i].i2, objMe.faces[i].i3]
newMesh.faces.extend(face)
# UV
#if guiTable['UV'] == 1 and objMe.hasFUV == 1:
#newMesh.faceUV = 1
#for f in objMe.uvFaces:
#uv1 = Blender.Mathutils.Vector(float(objMe.uvVerts[f.uv1].u), float(objMe.uvVerts[f.uv1].v))
#uv2 = Blender.Mathutils.Vector(float(objMe.uvVerts[f.uv2].u), float(objMe.uvVerts[f.uv2].v))
#uv3 = Blender.Mathutils.Vector(float(objMe.uvVerts[f.uv3].u), float(objMe.uvVerts[f.uv3].v))
#newMesh.faces[f.index].uv = [uv1, uv2, uv3]
## normals
#vertices = [coords for n, coords in sorted(objMe.normals)]
#random.seed()
#i = 0
#for v in newMesh.verts:
#no = Blender.Mathutils.Vector(vertices[i][0], vertices[i][1], vertices[i][2])
#v.no = no
#print 'vertice ', i, 'normal : ', v.no
##v.no[0] = vertices[i][0]
##v.no[1] = vertices[i][1]
##v.no[2] = vertices[i][2]
#i = i + 1
newMesh.transform((newObj.getMatrix('worldspace').invert()), 1)
Blender.Set("curframe", objMe.frameCount + 1)
counts['verts'] += objMe.vCount
counts['tris'] += objMe.fCount
print 'Imported Mesh-Object: ', obj.name
def read_ui(filename):
Window.WaitCursor(1)
read_main(filename)
Window.WaitCursor(0)
Blender.Window.FileSelector(read_ui, "Import ASE")
|
masterfeizz/EDuke3D
|
build/src/util/ase_import.py
|
Python
|
gpl-2.0
| 15,337
|
[
"ASE"
] |
c29e34759baf6c30e0e87501e92cf594c354fd9423c0ffbf68ffe5dedac077ec
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
import unittest
import os
import warnings
import numpy as np
from pymatgen.io.cif import CifParser, CifWriter, CifBlock
from pymatgen.io.vasp.inputs import Poscar
from pymatgen import Element, Specie, Lattice, Structure, Composition, DummySpecie
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.util.testing import PymatgenTest
from pymatgen.electronic_structure.core import Magmom
try:
import pybtex
except ImportError:
pybtex = None
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class CifBlockTest(unittest.TestCase):
def test_to_string(self):
with open(os.path.join(test_dir, 'Graphite.cif')) as f:
s = f.read()
c = CifBlock.from_string(s)
cif_str_2 = str(CifBlock.from_string(str(c)))
cif_str = """data_53781-ICSD
_database_code_ICSD 53781
_audit_creation_date 2003-04-01
_audit_update_record 2013-02-01
_chemical_name_systematic Carbon
_chemical_formula_structural C
_chemical_formula_sum C1
_chemical_name_structure_type Graphite(2H)
_chemical_name_mineral 'Graphite 2H'
_exptl_crystal_density_diffrn 2.22
_publ_section_title 'Structure of graphite'
loop_
_citation_id
_citation_journal_full
_citation_year
_citation_journal_volume
_citation_page_first
_citation_page_last
_citation_journal_id_ASTM
primary 'Physical Review (1,1893-132,1963/141,1966-188,1969)'
1917 10 661 696 PHRVAO
loop_
_publ_author_name
'Hull, A.W.'
_cell_length_a 2.47
_cell_length_b 2.47
_cell_length_c 6.8
_cell_angle_alpha 90.
_cell_angle_beta 90.
_cell_angle_gamma 120.
_cell_volume 35.93
_cell_formula_units_Z 4
_symmetry_space_group_name_H-M 'P 63/m m c'
_symmetry_Int_Tables_number 194
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, x-y, -z+1/2'
2 '-x+y, y, -z+1/2'
3 '-y, -x, -z+1/2'
4 '-x+y, -x, -z+1/2'
5 '-y, x-y, -z+1/2'
6 'x, y, -z+1/2'
7 '-x, -x+y, z+1/2'
8 'x-y, -y, z+1/2'
9 'y, x, z+1/2'
10 'x-y, x, z+1/2'
11 'y, -x+y, z+1/2'
12 '-x, -y, z+1/2'
13 '-x, -x+y, -z'
14 'x-y, -y, -z'
15 'y, x, -z'
16 'x-y, x, -z'
17 'y, -x+y, -z'
18 '-x, -y, -z'
19 'x, x-y, z'
20 '-x+y, y, z'
21 '-y, -x, z'
22 '-x+y, -x, z'
23 '-y, x-y, z'
24 'x, y, z'
loop_
_atom_type_symbol
_atom_type_oxidation_number
C0+ 0
loop_
_atom_site_label
_atom_site_type_symbol
_atom_site_symmetry_multiplicity
_atom_site_Wyckoff_symbol
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_B_iso_or_equiv
_atom_site_occupancy
_atom_site_attached_hydrogens
C1 C0+ 2 b 0 0 0.25 . 1. 0
C2 C0+ 2 c 0.3333 0.6667 0.25 . 1. 0"""
for l1, l2, l3 in zip(str(c).split("\n"), cif_str.split("\n"),
cif_str_2.split("\n")):
self.assertEqual(l1.strip(), l2.strip())
self.assertEqual(l2.strip(), l3.strip())
def test_double_quotes_and_underscore_data(self):
cif_str = """data_test
_symmetry_space_group_name_H-M "P -3 m 1"
_thing '_annoying_data'"""
cb = CifBlock.from_string(cif_str)
self.assertEqual(cb["_symmetry_space_group_name_H-M"], "P -3 m 1")
self.assertEqual(cb["_thing"], "_annoying_data")
self.assertEqual(str(cb), cif_str.replace('"', "'"))
def test_double_quoted_data(self):
cif_str = """data_test
_thing ' '_annoying_data''
_other " "_more_annoying_data""
_more ' "even more" ' """
cb = CifBlock.from_string(cif_str)
self.assertEqual(cb["_thing"], " '_annoying_data'")
self.assertEqual(cb["_other"], ' "_more_annoying_data"')
self.assertEqual(cb["_more"], ' "even more" ')
def test_nested_fake_multiline_quotes(self):
cif_str = """data_test
_thing
;
long quotes
;
still in the quote
;
actually going to end now
;"""
cb = CifBlock.from_string(cif_str)
self.assertEqual(cb["_thing"], " long quotes ; still in the quote"
" ; actually going to end now")
def test_long_loop(self):
data = {'_stuff1': ['A' * 30] * 2,
'_stuff2': ['B' * 30] * 2,
'_stuff3': ['C' * 30] * 2}
loops = [['_stuff1', '_stuff2', '_stuff3']]
cif_str = """data_test
loop_
_stuff1
_stuff2
_stuff3
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
CCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
CCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"""
self.assertEqual(str(CifBlock(data, loops, 'test')), cif_str)
class CifIOTest(PymatgenTest):
def test_CifParser(self):
parser = CifParser(os.path.join(test_dir, 'LiFePO4.cif'))
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Li4 Fe4 P4 O16",
"Incorrectly parsed cif.")
parser = CifParser(os.path.join(test_dir, 'V2O3.cif'))
for s in parser.get_structures(True):
self.assertEqual(s.formula, "V4 O6")
parser = CifParser(os.path.join(test_dir, 'Li2O.cif'))
prim = parser.get_structures(True)[0]
self.assertEqual(prim.formula, "Li2 O1")
conv = parser.get_structures(False)[0]
self.assertEqual(conv.formula, "Li8 O4")
#test for disordered structures
parser = CifParser(os.path.join(test_dir, 'Li10GeP2S12.cif'))
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Li20.2 Ge2.06 P3.94 S24",
"Incorrectly parsed cif.")
cif_str = """#\#CIF1.1
##########################################################################
# Crystallographic Information Format file
# Produced by PyCifRW module
#
# This is a CIF file. CIF has been adopted by the International
# Union of Crystallography as the standard for data archiving and
# transmission.
#
# For information on this file format, follow the CIF links at
# http://www.iucr.org
##########################################################################
data_FePO4
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 10.4117668699
_cell_length_b 6.06717187997
_cell_length_c 4.75948953998
loop_ # sometimes this is in a loop (incorrectly)
_cell_angle_alpha
91.0
_cell_angle_beta 92.0
_cell_angle_gamma 93.0
_chemical_name_systematic 'Generated by pymatgen'
_symmetry_Int_Tables_number 1
_chemical_formula_structural FePO4
_chemical_formula_sum 'Fe4 P4 O16'
_cell_volume 300.65685512
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_attached_hydrogens
_atom_site_B_iso_or_equiv
_atom_site_occupancy
Fe Fe1 1 0.218728 0.750000 0.474867 0 . 1
Fe JJ2 1 0.281272 0.250000 0.974867 0 . 1
# there's a typo here, parser should read the symbol from the
# _atom_site_type_symbol
Fe Fe3 1 0.718728 0.750000 0.025133 0 . 1
Fe Fe4 1 0.781272 0.250000 0.525133 0 . 1
P P5 1 0.094613 0.250000 0.418243 0 . 1
P P6 1 0.405387 0.750000 0.918243 0 . 1
P P7 1 0.594613 0.250000 0.081757 0 . 1
P P8 1 0.905387 0.750000 0.581757 0 . 1
O O9 1 0.043372 0.750000 0.707138 0 . 1
O O10 1 0.096642 0.250000 0.741320 0 . 1
O O11 1 0.165710 0.046072 0.285384 0 . 1
O O12 1 0.165710 0.453928 0.285384 0 . 1
O O13 1 0.334290 0.546072 0.785384 0 . 1
O O14 1 0.334290 0.953928 0.785384 0 . 1
O O15 1 0.403358 0.750000 0.241320 0 . 1
O O16 1 0.456628 0.250000 0.207138 0 . 1
O O17 1 0.543372 0.750000 0.792862 0 . 1
O O18 1 0.596642 0.250000 0.758680 0 . 1
O O19 1 0.665710 0.046072 0.214616 0 . 1
O O20 1 0.665710 0.453928 0.214616 0 . 1
O O21 1 0.834290 0.546072 0.714616 0 . 1
O O22 1 0.834290 0.953928 0.714616 0 . 1
O O23 1 0.903358 0.750000 0.258680 0 . 1
O O24 1 0.956628 0.250000 0.292862 0 . 1
"""
parser = CifParser.from_string(cif_str)
struct = parser.get_structures(primitive=False)[0]
self.assertEqual(struct.formula, "Fe4 P4 O16")
self.assertAlmostEqual(struct.lattice.a, 10.4117668699)
self.assertAlmostEqual(struct.lattice.b, 6.06717187997)
self.assertAlmostEqual(struct.lattice.c, 4.75948953998)
self.assertAlmostEqual(struct.lattice.alpha, 91)
self.assertAlmostEqual(struct.lattice.beta, 92)
self.assertAlmostEqual(struct.lattice.gamma, 93)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(os.path.join(test_dir, 'srycoo.cif'))
self.assertEqual(parser.get_structures()[0].formula,
"Sr5.6 Y2.4 Co8 O21")
# Test with a decimal Xyz. This should parse as two atoms in
# conventional cell if it is correct, one if not.
parser = CifParser(os.path.join(test_dir, "Fe.cif"))
self.assertEqual(len(parser.get_structures(primitive=False)[0]), 2)
self.assertFalse(parser.has_errors)
def test_site_symbol_preference(self):
parser = CifParser(os.path.join(test_dir, 'site_type_symbol_test.cif'))
self.assertEqual(parser.get_structures()[0].formula, "Ge0.4 Sb0.4 Te1")
def test_implicit_hydrogen(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(os.path.join(test_dir, 'Senegalite_implicit_hydrogen.cif'))
for s in parser.get_structures():
self.assertEqual(s.formula, "Al8 P4 O32")
self.assertEqual(sum(s.site_properties['implicit_hydrogens']), 20)
def test_CifParserSpringerPauling(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Below are 10 tests for CIFs from the Springer Materials/Pauling file DBs.
# Partial occupancy on sites, incorrect label, previously unparsable
parser = CifParser(os.path.join(test_dir, 'PF_sd_1928405.cif'))
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Er1 Mn3.888 Fe2.112 Sn6")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, previously parsed as an ordered structure
parser = CifParser(os.path.join(test_dir, 'PF_sd_1011081.cif'))
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Zr0.2 Nb0.8")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, incorrect label, previously unparsable
parser = CifParser(os.path.join(test_dir, 'PF_sd_1615854.cif'))
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Na2 Al2 Si6 O16")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, incorrect label, previously unparsable
parser = CifParser(os.path.join(test_dir, 'PF_sd_1622133.cif'))
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Ca0.184 Mg13.016 Fe2.8 Si16 O48")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, previously parsed as an ordered structure
parser = CifParser(os.path.join(test_dir, 'PF_sd_1908491.cif'))
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Mn0.48 Zn0.52 Ga2 Se4")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, incorrect label, previously unparsable
parser = CifParser(os.path.join(test_dir, 'PF_sd_1811457.cif'))
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Ba2 Mg0.6 Zr0.2 Ta1.2 O6")
self.assertTrue(parser.has_errors)
# Incomplete powder diffraction data, previously unparsable
# This CIF file contains the molecular species "NH3" which is
# parsed as "N" because the label is "N{x}" (x = 1,2,..) and the
# corresponding symbol is "NH3". Since, the label and symbol are switched
# in CIFs from Springer Materials/Pauling file DBs, CifParser parses the
# element as "N".
parser = CifParser(os.path.join(test_dir, 'PF_sd_1002871.cif'))
self.assertEqual(parser.get_structures(True)[0].formula, "Cu1 Br2 N6")
self.assertEqual(parser.get_structures(True)[1].formula, "Cu1 Br4 N6")
self.assertTrue(parser.has_errors)
# Incomplete powder diffraction data, previously unparsable
parser = CifParser(os.path.join(test_dir, 'PF_sd_1704003.cif'))
for s in parser.get_structures():
self.assertEqual(s.formula, "Rb4 Mn2 F12")
self.assertTrue(parser.has_errors)
# Unparsable species 'OH/OH2', previously parsed as "O"
parser = CifParser(os.path.join(test_dir, 'PF_sd_1500382.cif'))
for s in parser.get_structures():
self.assertEqual(s.formula, "Mg6 B2 O6 F1.764")
self.assertTrue(parser.has_errors)
# Unparsable species 'OH/OH2', previously parsed as "O"
parser = CifParser(os.path.join(test_dir, 'PF_sd_1601634.cif'))
for s in parser.get_structures():
self.assertEqual(s.formula, "Zn1.29 Fe0.69 As2 Pb1.02 O8")
def test_CifParserCod(self):
"""
Parsing problematic cif files from the COD database
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Symbol in capital letters
parser = CifParser(os.path.join(test_dir, 'Cod_2100513.cif'))
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Ca4 Nb2.0 Al2 O12")
# Label in capital letters
parser = CifParser(os.path.join(test_dir, 'Cod_4115344.cif'))
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Mo4 P2 H60 C60 I4 O4")
def test_parse_symbol(self):
"""
Test the _parse_symbol function with several potentially
problematic examples of symbols and labels.
"""
test_cases = {
"MgT": "Mg",
"MgT1": "Mg",
"H(46A)": "H",
"O(M)": "O",
"N(Am)": "N",
"H1N2a": "H",
"CO(1)": "Co",
"Wat1": "O",
"MgM2A": "Mg",
"CaX": "Ca",
"X1": "X",
"X": "X",
"OA1": "O",
"NaA2": "Na",
"O-H2": "O",
"OD2": "O",
"OW": "O",
"SiT": "Si",
"SiTet": "Si",
"Na-Int": "Na",
"CaD1": "Ca",
"KAm": "K",
"D+1": "D",
"D": "D",
"D1-": "D",
"D4": "D",
"D0": "D",
"NH": "N",
"NH2": "N",
"NH3": "N",
"SH": "S"
}
for e in Element:
name = e.name
test_cases[name] = name
if len(name) == 2:
test_cases[name.upper()] = name
test_cases[name.upper() + str(1)] = name
test_cases[name.upper() + "A"] = name
test_cases[name + str(1)] = name
test_cases[name + str(2)] = name
test_cases[name + str(3)] = name
test_cases[name + str(1) + "A"] = name
special = {"Hw": "H", "Ow": "O", "Wat": "O",
"wat": "O", "OH": "", "OH2": ""}
test_cases.update(special)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(os.path.join(test_dir, 'LiFePO4.cif'))
for sym, expected_symbol in test_cases.items():
self.assertEqual(parser._parse_symbol(sym), expected_symbol)
def test_CifWriter(self):
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
writer = CifWriter(poscar.structure, symprec=0.01)
ans = """# generated using pymatgen
data_FePO4
_symmetry_space_group_name_H-M Pnma
_cell_length_a 10.41176687
_cell_length_b 6.06717188
_cell_length_c 4.75948954
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 62
_chemical_formula_structural FePO4
_chemical_formula_sum 'Fe4 P4 O16'
_cell_volume 300.65685512
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
2 '-x, -y, -z'
3 '-x+1/2, -y, z+1/2'
4 'x+1/2, y, -z+1/2'
5 'x+1/2, -y+1/2, -z+1/2'
6 '-x+1/2, y+1/2, z+1/2'
7 '-x, y+1/2, -z'
8 'x, -y+1/2, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Fe Fe1 4 0.218728 0.250000 0.525133 1
P P2 4 0.094613 0.750000 0.581757 1
O O3 8 0.165710 0.546072 0.714616 1
O O4 4 0.043372 0.250000 0.292862 1
O O5 4 0.096642 0.750000 0.258680 1"""
for l1, l2 in zip(str(writer).split("\n"), ans.split("\n")):
self.assertEqual(l1.strip(), l2.strip())
def test_symmetrized(self):
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath, check_for_POTCAR=False)
writer = CifWriter(poscar.structure, symprec=0.1)
ans = """# generated using pymatgen
data_FePO4
_symmetry_space_group_name_H-M Pnma
_cell_length_a 10.41176687
_cell_length_b 6.06717188
_cell_length_c 4.75948954
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 62
_chemical_formula_structural FePO4
_chemical_formula_sum 'Fe4 P4 O16'
_cell_volume 300.65685512
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
2 '-x, -y, -z'
3 '-x+1/2, -y, z+1/2'
4 'x+1/2, y, -z+1/2'
5 'x+1/2, -y+1/2, -z+1/2'
6 '-x+1/2, y+1/2, z+1/2'
7 '-x, y+1/2, -z'
8 'x, -y+1/2, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Fe Fe1 4 0.218728 0.250000 0.525133 1
P P2 4 0.094613 0.750000 0.581757 1
O O3 8 0.165710 0.546072 0.714616 1
O O4 4 0.043372 0.250000 0.292862 1
O O5 4 0.096642 0.750000 0.258680 1"""
for l1, l2 in zip(str(writer).split("\n"), ans.split("\n")):
self.assertEqual(l1.strip(), l2.strip())
ans = """# generated using pymatgen
data_LiFePO4
_symmetry_space_group_name_H-M Pnma
_cell_length_a 10.41037000
_cell_length_b 6.06577000
_cell_length_c 4.74480000
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 62
_chemical_formula_structural LiFePO4
_chemical_formula_sum 'Li4 Fe4 P4 O16'
_cell_volume 299.619458734
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
2 '-x, -y, -z'
3 '-x+1/2, -y, z+1/2'
4 'x+1/2, y, -z+1/2'
5 'x+1/2, -y+1/2, -z+1/2'
6 '-x+1/2, y+1/2, z+1/2'
7 '-x, y+1/2, -z'
8 'x, -y+1/2, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Li Li1 4 0.000000 0.000000 0.000000 1.0
Fe Fe2 4 0.218845 0.750000 0.474910 1.0
P P3 4 0.094445 0.250000 0.417920 1.0
O O4 8 0.165815 0.044060 0.286540 1.0
O O5 4 0.043155 0.750000 0.708460 1.0
O O6 4 0.096215 0.250000 0.741480 1.0
"""
s = Structure.from_file(os.path.join(test_dir, 'LiFePO4.cif'))
writer = CifWriter(s, symprec=0.1)
s2 = CifParser.from_string(str(writer)).get_structures()[0]
m = StructureMatcher()
self.assertTrue(m.fit(s, s2))
s = self.get_structure("Li2O")
writer = CifWriter(s, symprec=0.1)
ans = """# generated using pymatgen
data_Li2O
_symmetry_space_group_name_H-M Fm-3m
_cell_length_a 4.65884171
_cell_length_b 4.65884171
_cell_length_c 4.65884171
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 225
_chemical_formula_structural Li2O
_chemical_formula_sum 'Li8 O4'
_cell_volume 101.11925577
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
2 '-x, -y, -z'
3 'z, y, -x'
4 '-z, -y, x'
5 '-x, y, -z'
6 'x, -y, z'
7 '-z, y, x'
8 'z, -y, -x'
9 'x, -y, -z'
10 '-x, y, z'
11 'z, -y, x'
12 '-z, y, -x'
13 '-x, -y, z'
14 'x, y, -z'
15 '-z, -y, -x'
16 'z, y, x'
17 'y, -z, -x'
18 '-y, z, x'
19 'y, x, -z'
20 '-y, -x, z'
21 'y, z, x'
22 '-y, -z, -x'
23 'y, -x, z'
24 '-y, x, -z'
25 '-y, z, -x'
26 'y, -z, x'
27 '-y, -x, -z'
28 'y, x, z'
29 '-y, -z, x'
30 'y, z, -x'
31 '-y, x, z'
32 'y, -x, -z'
33 '-z, x, -y'
34 'z, -x, y'
35 'x, z, -y'
36 '-x, -z, y'
37 'z, -x, -y'
38 '-z, x, y'
39 '-x, -z, -y'
40 'x, z, y'
41 'z, x, y'
42 '-z, -x, -y'
43 '-x, z, y'
44 'x, -z, -y'
45 '-z, -x, y'
46 'z, x, -y'
47 'x, -z, y'
48 '-x, z, -y'
49 'x+1/2, y+1/2, z'
50 '-x+1/2, -y+1/2, -z'
51 'z+1/2, y+1/2, -x'
52 '-z+1/2, -y+1/2, x'
53 '-x+1/2, y+1/2, -z'
54 'x+1/2, -y+1/2, z'
55 '-z+1/2, y+1/2, x'
56 'z+1/2, -y+1/2, -x'
57 'x+1/2, -y+1/2, -z'
58 '-x+1/2, y+1/2, z'
59 'z+1/2, -y+1/2, x'
60 '-z+1/2, y+1/2, -x'
61 '-x+1/2, -y+1/2, z'
62 'x+1/2, y+1/2, -z'
63 '-z+1/2, -y+1/2, -x'
64 'z+1/2, y+1/2, x'
65 'y+1/2, -z+1/2, -x'
66 '-y+1/2, z+1/2, x'
67 'y+1/2, x+1/2, -z'
68 '-y+1/2, -x+1/2, z'
69 'y+1/2, z+1/2, x'
70 '-y+1/2, -z+1/2, -x'
71 'y+1/2, -x+1/2, z'
72 '-y+1/2, x+1/2, -z'
73 '-y+1/2, z+1/2, -x'
74 'y+1/2, -z+1/2, x'
75 '-y+1/2, -x+1/2, -z'
76 'y+1/2, x+1/2, z'
77 '-y+1/2, -z+1/2, x'
78 'y+1/2, z+1/2, -x'
79 '-y+1/2, x+1/2, z'
80 'y+1/2, -x+1/2, -z'
81 '-z+1/2, x+1/2, -y'
82 'z+1/2, -x+1/2, y'
83 'x+1/2, z+1/2, -y'
84 '-x+1/2, -z+1/2, y'
85 'z+1/2, -x+1/2, -y'
86 '-z+1/2, x+1/2, y'
87 '-x+1/2, -z+1/2, -y'
88 'x+1/2, z+1/2, y'
89 'z+1/2, x+1/2, y'
90 '-z+1/2, -x+1/2, -y'
91 '-x+1/2, z+1/2, y'
92 'x+1/2, -z+1/2, -y'
93 '-z+1/2, -x+1/2, y'
94 'z+1/2, x+1/2, -y'
95 'x+1/2, -z+1/2, y'
96 '-x+1/2, z+1/2, -y'
97 'x+1/2, y, z+1/2'
98 '-x+1/2, -y, -z+1/2'
99 'z+1/2, y, -x+1/2'
100 '-z+1/2, -y, x+1/2'
101 '-x+1/2, y, -z+1/2'
102 'x+1/2, -y, z+1/2'
103 '-z+1/2, y, x+1/2'
104 'z+1/2, -y, -x+1/2'
105 'x+1/2, -y, -z+1/2'
106 '-x+1/2, y, z+1/2'
107 'z+1/2, -y, x+1/2'
108 '-z+1/2, y, -x+1/2'
109 '-x+1/2, -y, z+1/2'
110 'x+1/2, y, -z+1/2'
111 '-z+1/2, -y, -x+1/2'
112 'z+1/2, y, x+1/2'
113 'y+1/2, -z, -x+1/2'
114 '-y+1/2, z, x+1/2'
115 'y+1/2, x, -z+1/2'
116 '-y+1/2, -x, z+1/2'
117 'y+1/2, z, x+1/2'
118 '-y+1/2, -z, -x+1/2'
119 'y+1/2, -x, z+1/2'
120 '-y+1/2, x, -z+1/2'
121 '-y+1/2, z, -x+1/2'
122 'y+1/2, -z, x+1/2'
123 '-y+1/2, -x, -z+1/2'
124 'y+1/2, x, z+1/2'
125 '-y+1/2, -z, x+1/2'
126 'y+1/2, z, -x+1/2'
127 '-y+1/2, x, z+1/2'
128 'y+1/2, -x, -z+1/2'
129 '-z+1/2, x, -y+1/2'
130 'z+1/2, -x, y+1/2'
131 'x+1/2, z, -y+1/2'
132 '-x+1/2, -z, y+1/2'
133 'z+1/2, -x, -y+1/2'
134 '-z+1/2, x, y+1/2'
135 '-x+1/2, -z, -y+1/2'
136 'x+1/2, z, y+1/2'
137 'z+1/2, x, y+1/2'
138 '-z+1/2, -x, -y+1/2'
139 '-x+1/2, z, y+1/2'
140 'x+1/2, -z, -y+1/2'
141 '-z+1/2, -x, y+1/2'
142 'z+1/2, x, -y+1/2'
143 'x+1/2, -z, y+1/2'
144 '-x+1/2, z, -y+1/2'
145 'x, y+1/2, z+1/2'
146 '-x, -y+1/2, -z+1/2'
147 'z, y+1/2, -x+1/2'
148 '-z, -y+1/2, x+1/2'
149 '-x, y+1/2, -z+1/2'
150 'x, -y+1/2, z+1/2'
151 '-z, y+1/2, x+1/2'
152 'z, -y+1/2, -x+1/2'
153 'x, -y+1/2, -z+1/2'
154 '-x, y+1/2, z+1/2'
155 'z, -y+1/2, x+1/2'
156 '-z, y+1/2, -x+1/2'
157 '-x, -y+1/2, z+1/2'
158 'x, y+1/2, -z+1/2'
159 '-z, -y+1/2, -x+1/2'
160 'z, y+1/2, x+1/2'
161 'y, -z+1/2, -x+1/2'
162 '-y, z+1/2, x+1/2'
163 'y, x+1/2, -z+1/2'
164 '-y, -x+1/2, z+1/2'
165 'y, z+1/2, x+1/2'
166 '-y, -z+1/2, -x+1/2'
167 'y, -x+1/2, z+1/2'
168 '-y, x+1/2, -z+1/2'
169 '-y, z+1/2, -x+1/2'
170 'y, -z+1/2, x+1/2'
171 '-y, -x+1/2, -z+1/2'
172 'y, x+1/2, z+1/2'
173 '-y, -z+1/2, x+1/2'
174 'y, z+1/2, -x+1/2'
175 '-y, x+1/2, z+1/2'
176 'y, -x+1/2, -z+1/2'
177 '-z, x+1/2, -y+1/2'
178 'z, -x+1/2, y+1/2'
179 'x, z+1/2, -y+1/2'
180 '-x, -z+1/2, y+1/2'
181 'z, -x+1/2, -y+1/2'
182 '-z, x+1/2, y+1/2'
183 '-x, -z+1/2, -y+1/2'
184 'x, z+1/2, y+1/2'
185 'z, x+1/2, y+1/2'
186 '-z, -x+1/2, -y+1/2'
187 '-x, z+1/2, y+1/2'
188 'x, -z+1/2, -y+1/2'
189 '-z, -x+1/2, y+1/2'
190 'z, x+1/2, -y+1/2'
191 'x, -z+1/2, y+1/2'
192 '-x, z+1/2, -y+1/2'
loop_
_atom_type_symbol
_atom_type_oxidation_number
Li+ 1.0
O2- -2.0
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Li+ Li1 8 0.250000 0.250000 0.250000 1
O2- O2 4 0.000000 0.000000 0.000000 1"""
for l1, l2 in zip(str(writer).split("\n"), ans.split("\n")):
self.assertEqual(l1.strip(), l2.strip())
def test_disordered(self):
si = Element("Si")
n = Element("N")
coords = list()
coords.append(np.array([0, 0, 0]))
coords.append(np.array([0.75, 0.5, 0.75]))
lattice = Lattice(np.array([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]]))
struct = Structure(lattice, [si, {si:0.5, n:0.5}], coords)
writer = CifWriter(struct)
ans = """# generated using pymatgen
data_Si1.5N0.5
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 3.84019793
_cell_length_b 3.84019899
_cell_length_c 3.84019793
_cell_angle_alpha 119.99999086
_cell_angle_beta 90.00000000
_cell_angle_gamma 60.00000914
_symmetry_Int_Tables_number 1
_chemical_formula_structural Si1.5N0.5
_chemical_formula_sum 'Si1.5 N0.5'
_cell_volume 40.04479464
_cell_formula_units_Z 1
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Si Si1 1 0.000000 0.000000 0.000000 1
Si Si2 1 0.750000 0.500000 0.750000 0.5
N N3 1 0.750000 0.500000 0.750000 0.5
"""
for l1, l2 in zip(str(writer).split("\n"), ans.split("\n")):
self.assertEqual(l1.strip(), l2.strip())
def test_specie_cifwriter(self):
si4 = Specie("Si", 4)
si3 = Specie("Si", 3)
n = DummySpecie("X", -3)
coords = list()
coords.append(np.array([0.5, 0.5, 0.5]))
coords.append(np.array([0.75, 0.5, 0.75]))
coords.append(np.array([0, 0, 0]))
lattice = Lattice(np.array([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]]))
struct = Structure(lattice, [n, {si3:0.5, n:0.5}, si4], coords)
writer = CifWriter(struct)
ans = """# generated using pymatgen
data_X1.5Si1.5
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 3.84019793
_cell_length_b 3.84019899
_cell_length_c 3.84019793
_cell_angle_alpha 119.99999086
_cell_angle_beta 90.00000000
_cell_angle_gamma 60.00000914
_symmetry_Int_Tables_number 1
_chemical_formula_structural X1.5Si1.5
_chemical_formula_sum 'X1.5 Si1.5'
_cell_volume 40.04479464
_cell_formula_units_Z 1
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_type_symbol
_atom_type_oxidation_number
X3- -3.0
Si3+ 3.0
Si4+ 4.0
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
X3- X1 1 0.500000 0.500000 0.500000 1
X3- X2 1 0.750000 0.500000 0.750000 0.5
Si3+ Si3 1 0.750000 0.500000 0.750000 0.5
Si4+ Si4 1 0.000000 0.000000 0.000000 1
"""
for l1, l2 in zip(str(writer).split("\n"), ans.split("\n")):
self.assertEqual(l1.strip(), l2.strip())
# test that mixed valence works properly
s2 = Structure.from_str(ans, "cif")
self.assertEqual(struct.composition, s2.composition)
def test_primes(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(os.path.join(test_dir, 'C26H16BeN2O2S2.cif'))
for s in parser.get_structures(False):
self.assertEqual(s.composition, 8 * Composition('C26H16BeN2O2S2'))
def test_missing_atom_site_type_with_oxistates(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(os.path.join(test_dir, 'P24Ru4H252C296S24N16.cif'))
c = Composition({'S0+': 24, 'Ru0+': 4, 'H0+': 252, 'C0+': 296,
'N0+': 16, 'P0+': 24})
for s in parser.get_structures(False):
self.assertEqual(s.composition, c)
def test_no_coords_or_species(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
string= """#generated using pymatgen
data_Si1.5N1.5
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 3.84019793
_cell_length_b 3.84019899
_cell_length_c 3.84019793
_cell_angle_alpha 119.99999086
_cell_angle_beta 90.00000000
_cell_angle_gamma 60.00000914
_symmetry_Int_Tables_number 1
_chemical_formula_structural Si1.5N1.5
_chemical_formula_sum 'Si1.5 N1.5'
_cell_volume 40.0447946443
_cell_formula_units_Z 0
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_type_symbol
_atom_type_oxidation_number
Si3+ 3.0
Si4+ 4.0
N3- -3.0
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
? ? ? ? ? ? ?
"""
parser = CifParser.from_string(string)
self.assertRaises(ValueError, parser.get_structures)
def test_get_lattice_from_lattice_type(self):
cif_structure = """#generated using pymatgen
data_FePO4
_symmetry_space_group_name_H-M Pnma
_cell_length_a 10.41176687
_cell_length_b 6.06717188
_cell_length_c 4.75948954
_chemical_formula_structural FePO4
_chemical_formula_sum 'Fe4 P4 O16'
_cell_volume 300.65685512
_cell_formula_units_Z 4
_symmetry_cell_setting Orthorhombic
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Fe Fe1 1 0.218728 0.750000 0.474867 1
Fe Fe2 1 0.281272 0.250000 0.974867 1
Fe Fe3 1 0.718728 0.750000 0.025133 1
Fe Fe4 1 0.781272 0.250000 0.525133 1
P P5 1 0.094613 0.250000 0.418243 1
P P6 1 0.405387 0.750000 0.918243 1
P P7 1 0.594613 0.250000 0.081757 1
P P8 1 0.905387 0.750000 0.581757 1
O O9 1 0.043372 0.750000 0.707138 1
O O10 1 0.096642 0.250000 0.741320 1
O O11 1 0.165710 0.046072 0.285384 1
O O12 1 0.165710 0.453928 0.285384 1
O O13 1 0.334290 0.546072 0.785384 1
O O14 1 0.334290 0.953928 0.785384 1
O O15 1 0.403358 0.750000 0.241320 1
O O16 1 0.456628 0.250000 0.207138 1
O O17 1 0.543372 0.750000 0.792862 1
O O18 1 0.596642 0.250000 0.758680 1
O O19 1 0.665710 0.046072 0.214616 1
O O20 1 0.665710 0.453928 0.214616 1
O O21 1 0.834290 0.546072 0.714616 1
O O22 1 0.834290 0.953928 0.714616 1
O O23 1 0.903358 0.750000 0.258680 1
O O24 1 0.956628 0.250000 0.292862 1
"""
cp = CifParser.from_string(cif_structure)
s_test = cp.get_structures(False)[0]
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
s_ref = poscar.structure
sm = StructureMatcher(stol=0.05, ltol=0.01, angle_tol=0.1)
self.assertTrue(sm.fit(s_ref, s_test))
def test_empty(self):
# single line
cb = CifBlock.from_string("data_mwe\nloop_\n_tag\n ''")
self.assertEqual(cb.data['_tag'][0], '')
# multi line
cb = CifBlock.from_string("data_mwe\nloop_\n_tag\n;\n;")
self.assertEqual(cb.data['_tag'][0], '')
cb2 = CifBlock.from_string(str(cb))
self.assertEqual(cb, cb2)
def test_bad_cif(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
f = os.path.join(test_dir, "bad_occu.cif")
p = CifParser(f)
self.assertRaises(ValueError, p.get_structures)
p = CifParser(f, occupancy_tolerance=2)
s = p.get_structures()[0]
self.assertAlmostEqual(s[0].species_and_occu["Al3+"], 0.5)
def test_one_line_symm(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
f = os.path.join(test_dir, "OneLineSymmP1.cif")
p = CifParser(f)
s = p.get_structures()[0]
self.assertEqual(s.formula, "Ga4 Pb2 O8")
def test_no_symmops(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
f = os.path.join(test_dir, "nosymm.cif")
p = CifParser(f)
s = p.get_structures()[0]
self.assertEqual(s.formula, "H96 C60 O8")
def test_dot_positions(self):
f = os.path.join(test_dir, "ICSD59959.cif")
p = CifParser(f)
s = p.get_structures()[0]
self.assertEqual(s.formula, "K1 Mn1 F3")
class MagCifTest(unittest.TestCase):
def setUp(self):
self.mcif = CifParser(os.path.join(test_dir,
"magnetic.example.NiO.mcif"))
self.mcif_ncl = CifParser(os.path.join(test_dir,
"magnetic.ncl.example.GdB4.mcif"))
self.mcif_incom = CifParser(os.path.join(test_dir,
"magnetic.incommensurate.example.Cr.mcif"))
self.mcif_disord = CifParser(os.path.join(test_dir,
"magnetic.disordered.example.CuMnO2.mcif"))
self.mcif_ncl2 = CifParser(os.path.join(test_dir,
"Mn3Ge_IR2.mcif"))
def test_mcif_detection(self):
self.assertTrue(self.mcif.feature_flags["magcif"])
self.assertTrue(self.mcif_ncl.feature_flags["magcif"])
self.assertTrue(self.mcif_incom.feature_flags["magcif"])
self.assertTrue(self.mcif_disord.feature_flags["magcif"])
self.assertFalse(self.mcif.feature_flags["magcif_incommensurate"])
self.assertFalse(self.mcif_ncl.feature_flags["magcif_incommensurate"])
self.assertTrue(self.mcif_incom.feature_flags["magcif_incommensurate"])
self.assertFalse(self.mcif_disord.feature_flags["magcif_incommensurate"])
def test_get_structures(self):
# incommensurate structures not currently supported
self.assertRaises(NotImplementedError, self.mcif_incom.get_structures)
# disordered magnetic structures not currently supported
self.assertRaises(NotImplementedError, self.mcif_disord.get_structures)
# taken from self.mcif_ncl, removing explicit magnetic symmops
# so that MagneticSymmetryGroup() has to be invoked
magcifstr = """
data_5yOhtAoR
_space_group.magn_name_BNS "P 4/m' b' m' "
_cell_length_a 7.1316
_cell_length_b 7.1316
_cell_length_c 4.0505
_cell_angle_alpha 90.00
_cell_angle_beta 90.00
_cell_angle_gamma 90.00
loop_
_atom_site_label
_atom_site_type_symbol
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Gd1 Gd 0.31746 0.81746 0.00000 1
B1 B 0.00000 0.00000 0.20290 1
B2 B 0.17590 0.03800 0.50000 1
B3 B 0.08670 0.58670 0.50000 1
loop_
_atom_site_moment_label
_atom_site_moment_crystalaxis_x
_atom_site_moment_crystalaxis_y
_atom_site_moment_crystalaxis_z
Gd1 5.05 5.05 0.0"""
s = self.mcif.get_structures(primitive=False)[0]
self.assertEqual(s.formula, "Ni32 O32")
self.assertTrue(Magmom.are_collinear(s.site_properties['magmom']))
# example with non-collinear spin
s_ncl = self.mcif_ncl.get_structures(primitive=False)[0]
s_ncl_from_msg = CifParser.from_string(magcifstr).get_structures(primitive=False)[0]
self.assertEqual(s_ncl.formula, "Gd4 B16")
self.assertFalse(Magmom.are_collinear(s_ncl.site_properties['magmom']))
self.assertTrue(s_ncl.matches(s_ncl_from_msg))
def test_write(self):
cw_ref_string = """# generated using pymatgen
data_GdB4
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 7.13160000
_cell_length_b 7.13160000
_cell_length_c 4.05050000
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 1
_chemical_formula_structural GdB4
_chemical_formula_sum 'Gd4 B16'
_cell_volume 206.00729003
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Gd Gd1 1 0.317460 0.817460 0.000000 1.0
Gd Gd2 1 0.182540 0.317460 0.000000 1.0
Gd Gd3 1 0.817460 0.682540 0.000000 1.0
Gd Gd4 1 0.682540 0.182540 0.000000 1.0
B B5 1 0.000000 0.000000 0.202900 1.0
B B6 1 0.500000 0.500000 0.797100 1.0
B B7 1 0.000000 0.000000 0.797100 1.0
B B8 1 0.500000 0.500000 0.202900 1.0
B B9 1 0.175900 0.038000 0.500000 1.0
B B10 1 0.962000 0.175900 0.500000 1.0
B B11 1 0.038000 0.824100 0.500000 1.0
B B12 1 0.675900 0.462000 0.500000 1.0
B B13 1 0.324100 0.538000 0.500000 1.0
B B14 1 0.824100 0.962000 0.500000 1.0
B B15 1 0.538000 0.675900 0.500000 1.0
B B16 1 0.462000 0.324100 0.500000 1.0
B B17 1 0.086700 0.586700 0.500000 1.0
B B18 1 0.413300 0.086700 0.500000 1.0
B B19 1 0.586700 0.913300 0.500000 1.0
B B20 1 0.913300 0.413300 0.500000 1.0
loop_
_atom_site_moment_label
_atom_site_moment_crystalaxis_x
_atom_site_moment_crystalaxis_y
_atom_site_moment_crystalaxis_z
Gd1 5.05000 5.05000 0.00000
Gd2 -5.05000 5.05000 0.00000
Gd3 5.05000 -5.05000 0.00000
Gd4 -5.05000 -5.05000 0.00000
"""
s_ncl = self.mcif_ncl.get_structures(primitive=False)[0]
cw = CifWriter(s_ncl, write_magmoms=True)
self.assertEqual(cw.__str__(), cw_ref_string)
# from list-type magmoms
list_magmoms = [list(m) for m in s_ncl.site_properties['magmom']]
# float magmoms (magnitude only)
float_magmoms = [float(m) for m in s_ncl.site_properties['magmom']]
s_ncl.add_site_property('magmom', list_magmoms)
cw = CifWriter(s_ncl, write_magmoms=True)
self.assertEqual(cw.__str__(), cw_ref_string)
s_ncl.add_site_property('magmom', float_magmoms)
cw = CifWriter(s_ncl, write_magmoms=True)
cw_ref_string_magnitudes = """# generated using pymatgen
data_GdB4
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 7.13160000
_cell_length_b 7.13160000
_cell_length_c 4.05050000
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 1
_chemical_formula_structural GdB4
_chemical_formula_sum 'Gd4 B16'
_cell_volume 206.00729003
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Gd Gd1 1 0.317460 0.817460 0.000000 1.0
Gd Gd2 1 0.182540 0.317460 0.000000 1.0
Gd Gd3 1 0.817460 0.682540 0.000000 1.0
Gd Gd4 1 0.682540 0.182540 0.000000 1.0
B B5 1 0.000000 0.000000 0.202900 1.0
B B6 1 0.500000 0.500000 0.797100 1.0
B B7 1 0.000000 0.000000 0.797100 1.0
B B8 1 0.500000 0.500000 0.202900 1.0
B B9 1 0.175900 0.038000 0.500000 1.0
B B10 1 0.962000 0.175900 0.500000 1.0
B B11 1 0.038000 0.824100 0.500000 1.0
B B12 1 0.675900 0.462000 0.500000 1.0
B B13 1 0.324100 0.538000 0.500000 1.0
B B14 1 0.824100 0.962000 0.500000 1.0
B B15 1 0.538000 0.675900 0.500000 1.0
B B16 1 0.462000 0.324100 0.500000 1.0
B B17 1 0.086700 0.586700 0.500000 1.0
B B18 1 0.413300 0.086700 0.500000 1.0
B B19 1 0.586700 0.913300 0.500000 1.0
B B20 1 0.913300 0.413300 0.500000 1.0
loop_
_atom_site_moment_label
_atom_site_moment_crystalaxis_x
_atom_site_moment_crystalaxis_y
_atom_site_moment_crystalaxis_z
Gd1 0.00000 0.00000 7.14178
Gd2 0.00000 0.00000 7.14178
Gd3 0.00000 0.00000 -7.14178
Gd4 0.00000 0.00000 -7.14178
"""
self.assertEqual(cw.__str__(), cw_ref_string_magnitudes)
# test we're getting correct magmoms in ncl case
s_ncl2 = self.mcif_ncl2.get_structures()[0]
list_magmoms = [list(m) for m in s_ncl2.site_properties['magmom']]
self.assertEqual(list_magmoms[0][0], 0.0)
self.assertAlmostEqual(list_magmoms[0][1], 5.9160793408726366)
self.assertAlmostEqual(list_magmoms[1][0], -5.1234749999999991)
self.assertAlmostEqual(list_magmoms[1][1], 2.9580396704363183)
@unittest.skipIf(pybtex is None, "pybtex not present")
def test_bibtex(self):
ref_bibtex_string = """@article{cif-reference-0,
author = "Blanco, J.A.",
journal = "PHYSICAL REVIEW B",
volume = "73",
year = "2006",
pages = "?--?"
}
"""
self.assertEqual(self.mcif_ncl.get_bibtex_string(), ref_bibtex_string)
if __name__ == '__main__':
unittest.main()
|
nisse3000/pymatgen
|
pymatgen/io/tests/test_cif.py
|
Python
|
mit
| 43,915
|
[
"VASP",
"pymatgen"
] |
80ff38bec050403367af0d5b2ec010c00491abad627261c8ef2742496c83df7e
|
#
#@BEGIN LICENSE
#
# PSI4: an ab initio quantum chemistry software package
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#@END LICENSE
#
"""
| Database (Hobza) of interaction energies for bimolecular complexes.
| Geometries and reference energies from Rezac et al. JCTC 7 2427 (2011).
- **cp** ``'off'`` || ``'on'``
- **rlxd** ``'off'``
- **subset**
- ``'small'``
- ``'large'``
- ``'HB'`` hydrogen-bonded systems
- ``'MX'`` mixed-influence systems
- ``'DD'`` dispersion-dominated systems
"""
import re
import qcdb
# <<< S66 Database Module >>>
dbse = 'S66'
# <<< Database Members >>>
HRXN = range(1, 67)
HRXN_SM = [1, 12, 59]
HRXN_LG = [26, 34]
HB = range(1, 24)
MX = range(47, 67)
DD = range(24, 47)
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV_CP = {} # order of active reagents per counterpoise-corrected reaction
ACTV_SA = {} # order of active reagents for non-supermolecular calculations
for rxn in HRXN:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-%s-monoA-unCP' % (dbse, rxn) : -1,
'%s-%s-monoB-unCP' % (dbse, rxn) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-unCP' % (dbse, rxn),
'%s-%s-monoB-unCP' % (dbse, rxn) ]
# <<< Reference Values [kcal/mol] >>>
BIND = {}
BIND['%s-%s' % (dbse, '1' )] = -4.918
BIND['%s-%s' % (dbse, '2' )] = -5.592
BIND['%s-%s' % (dbse, '3' )] = -6.908
BIND['%s-%s' % (dbse, '4' )] = -8.103
BIND['%s-%s' % (dbse, '5' )] = -5.757
BIND['%s-%s' % (dbse, '6' )] = -7.554
BIND['%s-%s' % (dbse, '7' )] = -8.230
BIND['%s-%s' % (dbse, '8' )] = -5.009
BIND['%s-%s' % (dbse, '9' )] = -3.059
BIND['%s-%s' % (dbse, '10' )] = -4.160
BIND['%s-%s' % (dbse, '11' )] = -5.419
BIND['%s-%s' % (dbse, '12' )] = -7.266
BIND['%s-%s' % (dbse, '13' )] = -6.187
BIND['%s-%s' % (dbse, '14' )] = -7.454
BIND['%s-%s' % (dbse, '15' )] = -8.630
BIND['%s-%s' % (dbse, '16' )] = -5.124
BIND['%s-%s' % (dbse, '17' )] = -17.182
BIND['%s-%s' % (dbse, '18' )] = -6.857
BIND['%s-%s' % (dbse, '19' )] = -7.410
BIND['%s-%s' % (dbse, '20' )] = -19.093
BIND['%s-%s' % (dbse, '21' )] = -16.265
BIND['%s-%s' % (dbse, '22' )] = -19.491
BIND['%s-%s' % (dbse, '23' )] = -19.189
BIND['%s-%s' % (dbse, '24' )] = -2.822
BIND['%s-%s' % (dbse, '25' )] = -3.895
BIND['%s-%s' % (dbse, '26' )] = -9.829
BIND['%s-%s' % (dbse, '27' )] = -3.439
BIND['%s-%s' % (dbse, '28' )] = -5.713
BIND['%s-%s' % (dbse, '29' )] = -6.819
BIND['%s-%s' % (dbse, '30' )] = -1.432
BIND['%s-%s' % (dbse, '31' )] = -3.380
BIND['%s-%s' % (dbse, '32' )] = -3.738
BIND['%s-%s' % (dbse, '33' )] = -1.872
BIND['%s-%s' % (dbse, '34' )] = -3.776
BIND['%s-%s' % (dbse, '35' )] = -2.613
BIND['%s-%s' % (dbse, '36' )] = -1.777
BIND['%s-%s' % (dbse, '37' )] = -2.404
BIND['%s-%s' % (dbse, '38' )] = -2.997
BIND['%s-%s' % (dbse, '39' )] = -3.575
BIND['%s-%s' % (dbse, '40' )] = -2.895
BIND['%s-%s' % (dbse, '41' )] = -4.848
BIND['%s-%s' % (dbse, '42' )] = -4.138
BIND['%s-%s' % (dbse, '43' )] = -3.712
BIND['%s-%s' % (dbse, '44' )] = -2.005
BIND['%s-%s' % (dbse, '45' )] = -1.748
BIND['%s-%s' % (dbse, '46' )] = -4.264
BIND['%s-%s' % (dbse, '47' )] = -2.876
BIND['%s-%s' % (dbse, '48' )] = -3.535
BIND['%s-%s' % (dbse, '49' )] = -3.331
BIND['%s-%s' % (dbse, '50' )] = -2.867
BIND['%s-%s' % (dbse, '51' )] = -1.524
BIND['%s-%s' % (dbse, '52' )] = -4.707
BIND['%s-%s' % (dbse, '53' )] = -4.361
BIND['%s-%s' % (dbse, '54' )] = -3.277
BIND['%s-%s' % (dbse, '55' )] = -4.188
BIND['%s-%s' % (dbse, '56' )] = -3.231
BIND['%s-%s' % (dbse, '57' )] = -5.282
BIND['%s-%s' % (dbse, '58' )] = -4.146
BIND['%s-%s' % (dbse, '59' )] = -2.850
BIND['%s-%s' % (dbse, '60' )] = -4.868
BIND['%s-%s' % (dbse, '61' )] = -2.912
BIND['%s-%s' % (dbse, '62' )] = -3.534
BIND['%s-%s' % (dbse, '63' )] = -3.801
BIND['%s-%s' % (dbse, '64' )] = -2.999
BIND['%s-%s' % (dbse, '65' )] = -3.991
BIND['%s-%s' % (dbse, '66' )] = -3.968
# <<< Comment Lines >>>
TAGL = {}
TAGL['%s-%s' % (dbse, '1' )] = """Water Dimer """
TAGL['%s-%s-dimer' % (dbse, '1' )] = """Dimer from Water Dimer """
TAGL['%s-%s-monoA-CP' % (dbse, '1' )] = """Monomer A from Water Dimer """
TAGL['%s-%s-monoB-CP' % (dbse, '1' )] = """Monomer B from Water Dimer """
TAGL['%s-%s-monoA-unCP' % (dbse, '1' )] = """Monomer A from Water Dimer """
TAGL['%s-%s-monoB-unCP' % (dbse, '1' )] = """Monomer B from Water Dimer """
TAGL['%s-%s' % (dbse, '2' )] = """Water-Methanol """
TAGL['%s-%s-dimer' % (dbse, '2' )] = """Dimer from Water-Methanol """
TAGL['%s-%s-monoA-CP' % (dbse, '2' )] = """Monomer A from Water-Methanol """
TAGL['%s-%s-monoB-CP' % (dbse, '2' )] = """Monomer B from Water-Methanol """
TAGL['%s-%s-monoA-unCP' % (dbse, '2' )] = """Monomer A from Water-Methanol """
TAGL['%s-%s-monoB-unCP' % (dbse, '2' )] = """Monomer B from Water-Methanol """
TAGL['%s-%s' % (dbse, '3' )] = """Water-Methylamine """
TAGL['%s-%s-dimer' % (dbse, '3' )] = """Dimer from Water-Methylamine """
TAGL['%s-%s-monoA-CP' % (dbse, '3' )] = """Monomer A from Water-Methylamine """
TAGL['%s-%s-monoB-CP' % (dbse, '3' )] = """Monomer B from Water-Methylamine """
TAGL['%s-%s-monoA-unCP' % (dbse, '3' )] = """Monomer A from Water-Methylamine """
TAGL['%s-%s-monoB-unCP' % (dbse, '3' )] = """Monomer B from Water-Methylamine """
TAGL['%s-%s' % (dbse, '4' )] = """Water-N-methylacetamide """
TAGL['%s-%s-dimer' % (dbse, '4' )] = """Dimer from Water-N-methylacetamide """
TAGL['%s-%s-monoA-CP' % (dbse, '4' )] = """Monomer A from Water-N-methylacetamide """
TAGL['%s-%s-monoB-CP' % (dbse, '4' )] = """Monomer B from Water-N-methylacetamide """
TAGL['%s-%s-monoA-unCP' % (dbse, '4' )] = """Monomer A from Water-N-methylacetamide """
TAGL['%s-%s-monoB-unCP' % (dbse, '4' )] = """Monomer B from Water-N-methylacetamide """
TAGL['%s-%s' % (dbse, '5' )] = """Methanol Dimer """
TAGL['%s-%s-dimer' % (dbse, '5' )] = """Dimer from Methanol Dimer """
TAGL['%s-%s-monoA-CP' % (dbse, '5' )] = """Monomer A from Methanol Dimer """
TAGL['%s-%s-monoB-CP' % (dbse, '5' )] = """Monomer B from Methanol Dimer """
TAGL['%s-%s-monoA-unCP' % (dbse, '5' )] = """Monomer A from Methanol Dimer """
TAGL['%s-%s-monoB-unCP' % (dbse, '5' )] = """Monomer B from Methanol Dimer """
TAGL['%s-%s' % (dbse, '6' )] = """Methanol-Methylamine """
TAGL['%s-%s-dimer' % (dbse, '6' )] = """Dimer from Methanol-Methylamine """
TAGL['%s-%s-monoA-CP' % (dbse, '6' )] = """Monomer A from Methanol-Methylamine """
TAGL['%s-%s-monoB-CP' % (dbse, '6' )] = """Monomer B from Methanol-Methylamine """
TAGL['%s-%s-monoA-unCP' % (dbse, '6' )] = """Monomer A from Methanol-Methylamine """
TAGL['%s-%s-monoB-unCP' % (dbse, '6' )] = """Monomer B from Methanol-Methylamine """
TAGL['%s-%s' % (dbse, '7' )] = """Methanol-N-methylacetamide """
TAGL['%s-%s-dimer' % (dbse, '7' )] = """Dimer from Methanol-N-methylacetamide """
TAGL['%s-%s-monoA-CP' % (dbse, '7' )] = """Monomer A from Methanol-N-methylacetamide """
TAGL['%s-%s-monoB-CP' % (dbse, '7' )] = """Monomer B from Methanol-N-methylacetamide """
TAGL['%s-%s-monoA-unCP' % (dbse, '7' )] = """Monomer A from Methanol-N-methylacetamide """
TAGL['%s-%s-monoB-unCP' % (dbse, '7' )] = """Monomer B from Methanol-N-methylacetamide """
TAGL['%s-%s' % (dbse, '8' )] = """Methanol-Water """
TAGL['%s-%s-dimer' % (dbse, '8' )] = """Dimer from Methanol-Water """
TAGL['%s-%s-monoA-CP' % (dbse, '8' )] = """Monomer A from Methanol-Water """
TAGL['%s-%s-monoB-CP' % (dbse, '8' )] = """Monomer B from Methanol-Water """
TAGL['%s-%s-monoA-unCP' % (dbse, '8' )] = """Monomer A from Methanol-Water """
TAGL['%s-%s-monoB-unCP' % (dbse, '8' )] = """Monomer B from Methanol-Water """
TAGL['%s-%s' % (dbse, '9' )] = """Methylamine-Methanol """
TAGL['%s-%s-dimer' % (dbse, '9' )] = """Dimer from Methylamine-Methanol """
TAGL['%s-%s-monoA-CP' % (dbse, '9' )] = """Monomer A from Methylamine-Methanol """
TAGL['%s-%s-monoB-CP' % (dbse, '9' )] = """Monomer B from Methylamine-Methanol """
TAGL['%s-%s-monoA-unCP' % (dbse, '9' )] = """Monomer A from Methylamine-Methanol """
TAGL['%s-%s-monoB-unCP' % (dbse, '9' )] = """Monomer B from Methylamine-Methanol """
TAGL['%s-%s' % (dbse, '10' )] = """Methylamine Dimer """
TAGL['%s-%s-dimer' % (dbse, '10' )] = """Dimer from Methylamine Dimer """
TAGL['%s-%s-monoA-CP' % (dbse, '10' )] = """Monomer A from Methylamine Dimer """
TAGL['%s-%s-monoB-CP' % (dbse, '10' )] = """Monomer B from Methylamine Dimer """
TAGL['%s-%s-monoA-unCP' % (dbse, '10' )] = """Monomer A from Methylamine Dimer """
TAGL['%s-%s-monoB-unCP' % (dbse, '10' )] = """Monomer B from Methylamine Dimer """
TAGL['%s-%s' % (dbse, '11' )] = """Methylamine-N-methylacetamide """
TAGL['%s-%s-dimer' % (dbse, '11' )] = """Dimer from Methylamine-N-methylacetamide """
TAGL['%s-%s-monoA-CP' % (dbse, '11' )] = """Monomer A from Methylamine-N-methylacetamide """
TAGL['%s-%s-monoB-CP' % (dbse, '11' )] = """Monomer B from Methylamine-N-methylacetamide """
TAGL['%s-%s-monoA-unCP' % (dbse, '11' )] = """Monomer A from Methylamine-N-methylacetamide """
TAGL['%s-%s-monoB-unCP' % (dbse, '11' )] = """Monomer B from Methylamine-N-methylacetamide """
TAGL['%s-%s' % (dbse, '12' )] = """Methylamine-Water """
TAGL['%s-%s-dimer' % (dbse, '12' )] = """Dimer from Methylamine-Water """
TAGL['%s-%s-monoA-CP' % (dbse, '12' )] = """Monomer A from Methylamine-Water """
TAGL['%s-%s-monoB-CP' % (dbse, '12' )] = """Monomer B from Methylamine-Water """
TAGL['%s-%s-monoA-unCP' % (dbse, '12' )] = """Monomer A from Methylamine-Water """
TAGL['%s-%s-monoB-unCP' % (dbse, '12' )] = """Monomer B from Methylamine-Water """
TAGL['%s-%s' % (dbse, '13' )] = """N-methylacetamide-Methanol """
TAGL['%s-%s-dimer' % (dbse, '13' )] = """Dimer from N-methylacetamide-Methanol """
TAGL['%s-%s-monoA-CP' % (dbse, '13' )] = """Monomer A from N-methylacetamide-Methanol """
TAGL['%s-%s-monoB-CP' % (dbse, '13' )] = """Monomer B from N-methylacetamide-Methanol """
TAGL['%s-%s-monoA-unCP' % (dbse, '13' )] = """Monomer A from N-methylacetamide-Methanol """
TAGL['%s-%s-monoB-unCP' % (dbse, '13' )] = """Monomer B from N-methylacetamide-Methanol """
TAGL['%s-%s' % (dbse, '14' )] = """N-methylacetamide-Methylamine """
TAGL['%s-%s-dimer' % (dbse, '14' )] = """Dimer from N-methylacetamide-Methylamine """
TAGL['%s-%s-monoA-CP' % (dbse, '14' )] = """Monomer A from N-methylacetamide-Methylamine """
TAGL['%s-%s-monoB-CP' % (dbse, '14' )] = """Monomer B from N-methylacetamide-Methylamine """
TAGL['%s-%s-monoA-unCP' % (dbse, '14' )] = """Monomer A from N-methylacetamide-Methylamine """
TAGL['%s-%s-monoB-unCP' % (dbse, '14' )] = """Monomer B from N-methylacetamide-Methylamine """
TAGL['%s-%s' % (dbse, '15' )] = """N-methylacetamide Dimer """
TAGL['%s-%s-dimer' % (dbse, '15' )] = """Dimer from N-methylacetamide Dimer """
TAGL['%s-%s-monoA-CP' % (dbse, '15' )] = """Monomer A from N-methylacetamide Dimer """
TAGL['%s-%s-monoB-CP' % (dbse, '15' )] = """Monomer B from N-methylacetamide Dimer """
TAGL['%s-%s-monoA-unCP' % (dbse, '15' )] = """Monomer A from N-methylacetamide Dimer """
TAGL['%s-%s-monoB-unCP' % (dbse, '15' )] = """Monomer B from N-methylacetamide Dimer """
TAGL['%s-%s' % (dbse, '16' )] = """N-methylacetamide-Water """
TAGL['%s-%s-dimer' % (dbse, '16' )] = """Dimer from N-methylacetamide-Water """
TAGL['%s-%s-monoA-CP' % (dbse, '16' )] = """Monomer A from N-methylacetamide-Water """
TAGL['%s-%s-monoB-CP' % (dbse, '16' )] = """Monomer B from N-methylacetamide-Water """
TAGL['%s-%s-monoA-unCP' % (dbse, '16' )] = """Monomer A from N-methylacetamide-Water """
TAGL['%s-%s-monoB-unCP' % (dbse, '16' )] = """Monomer B from N-methylacetamide-Water """
TAGL['%s-%s' % (dbse, '17' )] = """Uracil Dimer, HB """
TAGL['%s-%s-dimer' % (dbse, '17' )] = """Dimer from Uracil Dimer, HB """
TAGL['%s-%s-monoA-CP' % (dbse, '17' )] = """Monomer A from Uracil Dimer, HB """
TAGL['%s-%s-monoB-CP' % (dbse, '17' )] = """Monomer B from Uracil Dimer, HB """
TAGL['%s-%s-monoA-unCP' % (dbse, '17' )] = """Monomer A from Uracil Dimer, HB """
TAGL['%s-%s-monoB-unCP' % (dbse, '17' )] = """Monomer B from Uracil Dimer, HB """
TAGL['%s-%s' % (dbse, '18' )] = """Water-Pyridine """
TAGL['%s-%s-dimer' % (dbse, '18' )] = """Dimer from Water-Pyridine """
TAGL['%s-%s-monoA-CP' % (dbse, '18' )] = """Monomer A from Water-Pyridine """
TAGL['%s-%s-monoB-CP' % (dbse, '18' )] = """Monomer B from Water-Pyridine """
TAGL['%s-%s-monoA-unCP' % (dbse, '18' )] = """Monomer A from Water-Pyridine """
TAGL['%s-%s-monoB-unCP' % (dbse, '18' )] = """Monomer B from Water-Pyridine """
TAGL['%s-%s' % (dbse, '19' )] = """Methanol-Pyridine """
TAGL['%s-%s-dimer' % (dbse, '19' )] = """Dimer from Methanol-Pyridine """
TAGL['%s-%s-monoA-CP' % (dbse, '19' )] = """Monomer A from Methanol-Pyridine """
TAGL['%s-%s-monoB-CP' % (dbse, '19' )] = """Monomer B from Methanol-Pyridine """
TAGL['%s-%s-monoA-unCP' % (dbse, '19' )] = """Monomer A from Methanol-Pyridine """
TAGL['%s-%s-monoB-unCP' % (dbse, '19' )] = """Monomer B from Methanol-Pyridine """
TAGL['%s-%s' % (dbse, '20' )] = """Acetic Acid Dimer """
TAGL['%s-%s-dimer' % (dbse, '20' )] = """Dimer from Acetic Acid Dimer """
TAGL['%s-%s-monoA-CP' % (dbse, '20' )] = """Monomer A from Acetic Acid Dimer """
TAGL['%s-%s-monoB-CP' % (dbse, '20' )] = """Monomer B from Acetic Acid Dimer """
TAGL['%s-%s-monoA-unCP' % (dbse, '20' )] = """Monomer A from Acetic Acid Dimer """
TAGL['%s-%s-monoB-unCP' % (dbse, '20' )] = """Monomer B from Acetic Acid Dimer """
TAGL['%s-%s' % (dbse, '21' )] = """Acetamide Dimer """
TAGL['%s-%s-dimer' % (dbse, '21' )] = """Dimer from Acetamide Dimer """
TAGL['%s-%s-monoA-CP' % (dbse, '21' )] = """Monomer A from Acetamide Dimer """
TAGL['%s-%s-monoB-CP' % (dbse, '21' )] = """Monomer B from Acetamide Dimer """
TAGL['%s-%s-monoA-unCP' % (dbse, '21' )] = """Monomer A from Acetamide Dimer """
TAGL['%s-%s-monoB-unCP' % (dbse, '21' )] = """Monomer B from Acetamide Dimer """
TAGL['%s-%s' % (dbse, '22' )] = """Acetic Acid-Uracil """
TAGL['%s-%s-dimer' % (dbse, '22' )] = """Dimer from Acetic Acid-Uracil """
TAGL['%s-%s-monoA-CP' % (dbse, '22' )] = """Monomer A from Acetic Acid-Uracil """
TAGL['%s-%s-monoB-CP' % (dbse, '22' )] = """Monomer B from Acetic Acid-Uracil """
TAGL['%s-%s-monoA-unCP' % (dbse, '22' )] = """Monomer A from Acetic Acid-Uracil """
TAGL['%s-%s-monoB-unCP' % (dbse, '22' )] = """Monomer B from Acetic Acid-Uracil """
TAGL['%s-%s' % (dbse, '23' )] = """Acetamide-Uracil """
TAGL['%s-%s-dimer' % (dbse, '23' )] = """Dimer from Acetamide-Uracil """
TAGL['%s-%s-monoA-CP' % (dbse, '23' )] = """Monomer A from Acetamide-Uracil """
TAGL['%s-%s-monoB-CP' % (dbse, '23' )] = """Monomer B from Acetamide-Uracil """
TAGL['%s-%s-monoA-unCP' % (dbse, '23' )] = """Monomer A from Acetamide-Uracil """
TAGL['%s-%s-monoB-unCP' % (dbse, '23' )] = """Monomer B from Acetamide-Uracil """
TAGL['%s-%s' % (dbse, '24' )] = """Benzene Dimer, pi-pi """
TAGL['%s-%s-dimer' % (dbse, '24' )] = """Dimer from Benzene Dimer, pi-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '24' )] = """Monomer A from Benzene Dimer, pi-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '24' )] = """Monomer B from Benzene Dimer, pi-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '24' )] = """Monomer A from Benzene Dimer, pi-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '24' )] = """Monomer B from Benzene Dimer, pi-pi """
TAGL['%s-%s' % (dbse, '25' )] = """Pyridine Dimer, pi-pi """
TAGL['%s-%s-dimer' % (dbse, '25' )] = """Dimer from Pyridine Dimer, pi-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '25' )] = """Monomer A from Pyridine Dimer, pi-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '25' )] = """Monomer B from Pyridine Dimer, pi-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '25' )] = """Monomer A from Pyridine Dimer, pi-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '25' )] = """Monomer B from Pyridine Dimer, pi-pi """
TAGL['%s-%s' % (dbse, '26' )] = """Uracil Dimer, pi-pi """
TAGL['%s-%s-dimer' % (dbse, '26' )] = """Dimer from Uracil Dimer, pi-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '26' )] = """Monomer A from Uracil Dimer, pi-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '26' )] = """Monomer B from Uracil Dimer, pi-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '26' )] = """Monomer A from Uracil Dimer, pi-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '26' )] = """Monomer B from Uracil Dimer, pi-pi """
TAGL['%s-%s' % (dbse, '27' )] = """Benzene-Pyridine, pi-pi """
TAGL['%s-%s-dimer' % (dbse, '27' )] = """Dimer from Benzene-Pyridine, pi-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '27' )] = """Monomer A from Benzene-Pyridine, pi-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '27' )] = """Monomer B from Benzene-Pyridine, pi-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '27' )] = """Monomer A from Benzene-Pyridine, pi-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '27' )] = """Monomer B from Benzene-Pyridine, pi-pi """
TAGL['%s-%s' % (dbse, '28' )] = """Benzene-Uracil, pi-pi """
TAGL['%s-%s-dimer' % (dbse, '28' )] = """Dimer from Benzene-Uracil, pi-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '28' )] = """Monomer A from Benzene-Uracil, pi-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '28' )] = """Monomer B from Benzene-Uracil, pi-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '28' )] = """Monomer A from Benzene-Uracil, pi-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '28' )] = """Monomer B from Benzene-Uracil, pi-pi """
TAGL['%s-%s' % (dbse, '29' )] = """Pyridine-Uracil, pi-pi """
TAGL['%s-%s-dimer' % (dbse, '29' )] = """Dimer from Pyridine-Uracil, pi-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '29' )] = """Monomer A from Pyridine-Uracil, pi-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '29' )] = """Monomer B from Pyridine-Uracil, pi-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '29' )] = """Monomer A from Pyridine-Uracil, pi-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '29' )] = """Monomer B from Pyridine-Uracil, pi-pi """
TAGL['%s-%s' % (dbse, '30' )] = """Benzene-Ethene """
TAGL['%s-%s-dimer' % (dbse, '30' )] = """Dimer from Benzene-Ethene """
TAGL['%s-%s-monoA-CP' % (dbse, '30' )] = """Monomer A from Benzene-Ethene """
TAGL['%s-%s-monoB-CP' % (dbse, '30' )] = """Monomer B from Benzene-Ethene """
TAGL['%s-%s-monoA-unCP' % (dbse, '30' )] = """Monomer A from Benzene-Ethene """
TAGL['%s-%s-monoB-unCP' % (dbse, '30' )] = """Monomer B from Benzene-Ethene """
TAGL['%s-%s' % (dbse, '31' )] = """Uracil-Ethene """
TAGL['%s-%s-dimer' % (dbse, '31' )] = """Dimer from Uracil-Ethene """
TAGL['%s-%s-monoA-CP' % (dbse, '31' )] = """Monomer A from Uracil-Ethene """
TAGL['%s-%s-monoB-CP' % (dbse, '31' )] = """Monomer B from Uracil-Ethene """
TAGL['%s-%s-monoA-unCP' % (dbse, '31' )] = """Monomer A from Uracil-Ethene """
TAGL['%s-%s-monoB-unCP' % (dbse, '31' )] = """Monomer B from Uracil-Ethene """
TAGL['%s-%s' % (dbse, '32' )] = """Uracil-Ethyne """
TAGL['%s-%s-dimer' % (dbse, '32' )] = """Dimer from Uracil-Ethyne """
TAGL['%s-%s-monoA-CP' % (dbse, '32' )] = """Monomer A from Uracil-Ethyne """
TAGL['%s-%s-monoB-CP' % (dbse, '32' )] = """Monomer B from Uracil-Ethyne """
TAGL['%s-%s-monoA-unCP' % (dbse, '32' )] = """Monomer A from Uracil-Ethyne """
TAGL['%s-%s-monoB-unCP' % (dbse, '32' )] = """Monomer B from Uracil-Ethyne """
TAGL['%s-%s' % (dbse, '33' )] = """Pyridine-Ethene """
TAGL['%s-%s-dimer' % (dbse, '33' )] = """Dimer from Pyridine-Ethene """
TAGL['%s-%s-monoA-CP' % (dbse, '33' )] = """Monomer A from Pyridine-Ethene """
TAGL['%s-%s-monoB-CP' % (dbse, '33' )] = """Monomer B from Pyridine-Ethene """
TAGL['%s-%s-monoA-unCP' % (dbse, '33' )] = """Monomer A from Pyridine-Ethene """
TAGL['%s-%s-monoB-unCP' % (dbse, '33' )] = """Monomer B from Pyridine-Ethene """
TAGL['%s-%s' % (dbse, '34' )] = """Pentane Dimer """
TAGL['%s-%s-dimer' % (dbse, '34' )] = """Dimer from Pentane Dimer """
TAGL['%s-%s-monoA-CP' % (dbse, '34' )] = """Monomer A from Pentane Dimer """
TAGL['%s-%s-monoB-CP' % (dbse, '34' )] = """Monomer B from Pentane Dimer """
TAGL['%s-%s-monoA-unCP' % (dbse, '34' )] = """Monomer A from Pentane Dimer """
TAGL['%s-%s-monoB-unCP' % (dbse, '34' )] = """Monomer B from Pentane Dimer """
TAGL['%s-%s' % (dbse, '35' )] = """Neopentane-Pentane """
TAGL['%s-%s-dimer' % (dbse, '35' )] = """Dimer from Neopentane-Pentane """
TAGL['%s-%s-monoA-CP' % (dbse, '35' )] = """Monomer A from Neopentane-Pentane """
TAGL['%s-%s-monoB-CP' % (dbse, '35' )] = """Monomer B from Neopentane-Pentane """
TAGL['%s-%s-monoA-unCP' % (dbse, '35' )] = """Monomer A from Neopentane-Pentane """
TAGL['%s-%s-monoB-unCP' % (dbse, '35' )] = """Monomer B from Neopentane-Pentane """
TAGL['%s-%s' % (dbse, '36' )] = """Neopentane Dimer """
TAGL['%s-%s-dimer' % (dbse, '36' )] = """Dimer from Neopentane Dimer """
TAGL['%s-%s-monoA-CP' % (dbse, '36' )] = """Monomer A from Neopentane Dimer """
TAGL['%s-%s-monoB-CP' % (dbse, '36' )] = """Monomer B from Neopentane Dimer """
TAGL['%s-%s-monoA-unCP' % (dbse, '36' )] = """Monomer A from Neopentane Dimer """
TAGL['%s-%s-monoB-unCP' % (dbse, '36' )] = """Monomer B from Neopentane Dimer """
TAGL['%s-%s' % (dbse, '37' )] = """Cyclopentane-Neopentane """
TAGL['%s-%s-dimer' % (dbse, '37' )] = """Dimer from Cyclopentane-Neopentane """
TAGL['%s-%s-monoA-CP' % (dbse, '37' )] = """Monomer A from Cyclopentane-Neopentane """
TAGL['%s-%s-monoB-CP' % (dbse, '37' )] = """Monomer B from Cyclopentane-Neopentane """
TAGL['%s-%s-monoA-unCP' % (dbse, '37' )] = """Monomer A from Cyclopentane-Neopentane """
TAGL['%s-%s-monoB-unCP' % (dbse, '37' )] = """Monomer B from Cyclopentane-Neopentane """
TAGL['%s-%s' % (dbse, '38' )] = """Cyclopentane Dimer """
TAGL['%s-%s-dimer' % (dbse, '38' )] = """Dimer from Cyclopentane Dimer """
TAGL['%s-%s-monoA-CP' % (dbse, '38' )] = """Monomer A from Cyclopentane Dimer """
TAGL['%s-%s-monoB-CP' % (dbse, '38' )] = """Monomer B from Cyclopentane Dimer """
TAGL['%s-%s-monoA-unCP' % (dbse, '38' )] = """Monomer A from Cyclopentane Dimer """
TAGL['%s-%s-monoB-unCP' % (dbse, '38' )] = """Monomer B from Cyclopentane Dimer """
TAGL['%s-%s' % (dbse, '39' )] = """Benzene-Cyclopentane """
TAGL['%s-%s-dimer' % (dbse, '39' )] = """Dimer from Benzene-Cyclopentane """
TAGL['%s-%s-monoA-CP' % (dbse, '39' )] = """Monomer A from Benzene-Cyclopentane """
TAGL['%s-%s-monoB-CP' % (dbse, '39' )] = """Monomer B from Benzene-Cyclopentane """
TAGL['%s-%s-monoA-unCP' % (dbse, '39' )] = """Monomer A from Benzene-Cyclopentane """
TAGL['%s-%s-monoB-unCP' % (dbse, '39' )] = """Monomer B from Benzene-Cyclopentane """
TAGL['%s-%s' % (dbse, '40' )] = """Benzene-Neopentane """
TAGL['%s-%s-dimer' % (dbse, '40' )] = """Dimer from Benzene-Neopentane """
TAGL['%s-%s-monoA-CP' % (dbse, '40' )] = """Monomer A from Benzene-Neopentane """
TAGL['%s-%s-monoB-CP' % (dbse, '40' )] = """Monomer B from Benzene-Neopentane """
TAGL['%s-%s-monoA-unCP' % (dbse, '40' )] = """Monomer A from Benzene-Neopentane """
TAGL['%s-%s-monoB-unCP' % (dbse, '40' )] = """Monomer B from Benzene-Neopentane """
TAGL['%s-%s' % (dbse, '41' )] = """Uracil-Pentane """
TAGL['%s-%s-dimer' % (dbse, '41' )] = """Dimer from Uracil-Pentane """
TAGL['%s-%s-monoA-CP' % (dbse, '41' )] = """Monomer A from Uracil-Pentane """
TAGL['%s-%s-monoB-CP' % (dbse, '41' )] = """Monomer B from Uracil-Pentane """
TAGL['%s-%s-monoA-unCP' % (dbse, '41' )] = """Monomer A from Uracil-Pentane """
TAGL['%s-%s-monoB-unCP' % (dbse, '41' )] = """Monomer B from Uracil-Pentane """
TAGL['%s-%s' % (dbse, '42' )] = """Uracil-Cyclopentane """
TAGL['%s-%s-dimer' % (dbse, '42' )] = """Dimer from Uracil-Cyclopentane """
TAGL['%s-%s-monoA-CP' % (dbse, '42' )] = """Monomer A from Uracil-Cyclopentane """
TAGL['%s-%s-monoB-CP' % (dbse, '42' )] = """Monomer B from Uracil-Cyclopentane """
TAGL['%s-%s-monoA-unCP' % (dbse, '42' )] = """Monomer A from Uracil-Cyclopentane """
TAGL['%s-%s-monoB-unCP' % (dbse, '42' )] = """Monomer B from Uracil-Cyclopentane """
TAGL['%s-%s' % (dbse, '43' )] = """Uracil-Neopentane """
TAGL['%s-%s-dimer' % (dbse, '43' )] = """Dimer from Uracil-Neopentane """
TAGL['%s-%s-monoA-CP' % (dbse, '43' )] = """Monomer A from Uracil-Neopentane """
TAGL['%s-%s-monoB-CP' % (dbse, '43' )] = """Monomer B from Uracil-Neopentane """
TAGL['%s-%s-monoA-unCP' % (dbse, '43' )] = """Monomer A from Uracil-Neopentane """
TAGL['%s-%s-monoB-unCP' % (dbse, '43' )] = """Monomer B from Uracil-Neopentane """
TAGL['%s-%s' % (dbse, '44' )] = """Ethene-Pentane """
TAGL['%s-%s-dimer' % (dbse, '44' )] = """Dimer from Ethene-Pentane """
TAGL['%s-%s-monoA-CP' % (dbse, '44' )] = """Monomer A from Ethene-Pentane """
TAGL['%s-%s-monoB-CP' % (dbse, '44' )] = """Monomer B from Ethene-Pentane """
TAGL['%s-%s-monoA-unCP' % (dbse, '44' )] = """Monomer A from Ethene-Pentane """
TAGL['%s-%s-monoB-unCP' % (dbse, '44' )] = """Monomer B from Ethene-Pentane """
TAGL['%s-%s' % (dbse, '45' )] = """Ethyne-Pentane """
TAGL['%s-%s-dimer' % (dbse, '45' )] = """Dimer from Ethyne-Pentane """
TAGL['%s-%s-monoA-CP' % (dbse, '45' )] = """Monomer A from Ethyne-Pentane """
TAGL['%s-%s-monoB-CP' % (dbse, '45' )] = """Monomer B from Ethyne-Pentane """
TAGL['%s-%s-monoA-unCP' % (dbse, '45' )] = """Monomer A from Ethyne-Pentane """
TAGL['%s-%s-monoB-unCP' % (dbse, '45' )] = """Monomer B from Ethyne-Pentane """
TAGL['%s-%s' % (dbse, '46' )] = """N-methylacetamide-Pentane """
TAGL['%s-%s-dimer' % (dbse, '46' )] = """Dimer from N-methylacetamide-Pentane """
TAGL['%s-%s-monoA-CP' % (dbse, '46' )] = """Monomer A from N-methylacetamide-Pentane """
TAGL['%s-%s-monoB-CP' % (dbse, '46' )] = """Monomer B from N-methylacetamide-Pentane """
TAGL['%s-%s-monoA-unCP' % (dbse, '46' )] = """Monomer A from N-methylacetamide-Pentane """
TAGL['%s-%s-monoB-unCP' % (dbse, '46' )] = """Monomer B from N-methylacetamide-Pentane """
TAGL['%s-%s' % (dbse, '47' )] = """Benzene Dimer, CH-pi """
TAGL['%s-%s-dimer' % (dbse, '47' )] = """Dimer from Benzene Dimer, CH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '47' )] = """Monomer A from Benzene Dimer, CH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '47' )] = """Monomer B from Benzene Dimer, CH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '47' )] = """Monomer A from Benzene Dimer, CH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '47' )] = """Monomer B from Benzene Dimer, CH-pi """
TAGL['%s-%s' % (dbse, '48' )] = """Pyridine Dimer, CH-pi """
TAGL['%s-%s-dimer' % (dbse, '48' )] = """Dimer from Pyridine Dimer, CH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '48' )] = """Monomer A from Pyridine Dimer, CH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '48' )] = """Monomer B from Pyridine Dimer, CH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '48' )] = """Monomer A from Pyridine Dimer, CH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '48' )] = """Monomer B from Pyridine Dimer, CH-pi """
TAGL['%s-%s' % (dbse, '49' )] = """Benzene-Pyridine, CH-pi """
TAGL['%s-%s-dimer' % (dbse, '49' )] = """Dimer from Benzene-Pyridine, CH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '49' )] = """Monomer A from Benzene-Pyridine, CH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '49' )] = """Monomer B from Benzene-Pyridine, CH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '49' )] = """Monomer A from Benzene-Pyridine, CH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '49' )] = """Monomer B from Benzene-Pyridine, CH-pi """
TAGL['%s-%s' % (dbse, '50' )] = """Benzene-Ethyne, CH-pi """
TAGL['%s-%s-dimer' % (dbse, '50' )] = """Dimer from Benzene-Ethyne, CH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '50' )] = """Monomer A from Benzene-Ethyne, CH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '50' )] = """Monomer B from Benzene-Ethyne, CH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '50' )] = """Monomer A from Benzene-Ethyne, CH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '50' )] = """Monomer B from Benzene-Ethyne, CH-pi """
TAGL['%s-%s' % (dbse, '51' )] = """Ethyne Dimer, CH-pi """
TAGL['%s-%s-dimer' % (dbse, '51' )] = """Dimer from Ethyne Dimer, CH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '51' )] = """Monomer A from Ethyne Dimer, CH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '51' )] = """Monomer B from Ethyne Dimer, CH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '51' )] = """Monomer A from Ethyne Dimer, CH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '51' )] = """Monomer B from Ethyne Dimer, CH-pi """
TAGL['%s-%s' % (dbse, '52' )] = """Benzene-Acetic Acid, OH-pi """
TAGL['%s-%s-dimer' % (dbse, '52' )] = """Dimer from Benzene-Acetic Acid, OH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '52' )] = """Monomer A from Benzene-Acetic Acid, OH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '52' )] = """Monomer B from Benzene-Acetic Acid, OH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '52' )] = """Monomer A from Benzene-Acetic Acid, OH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '52' )] = """Monomer B from Benzene-Acetic Acid, OH-pi """
TAGL['%s-%s' % (dbse, '53' )] = """Benzene-Acetamide, NH-pi """
TAGL['%s-%s-dimer' % (dbse, '53' )] = """Dimer from Benzene-Acetamide, NH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '53' )] = """Monomer A from Benzene-Acetamide, NH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '53' )] = """Monomer B from Benzene-Acetamide, NH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '53' )] = """Monomer A from Benzene-Acetamide, NH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '53' )] = """Monomer B from Benzene-Acetamide, NH-pi """
TAGL['%s-%s' % (dbse, '54' )] = """Benzene-Water, OH-pi """
TAGL['%s-%s-dimer' % (dbse, '54' )] = """Dimer from Benzene-Water, OH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '54' )] = """Monomer A from Benzene-Water, OH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '54' )] = """Monomer B from Benzene-Water, OH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '54' )] = """Monomer A from Benzene-Water, OH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '54' )] = """Monomer B from Benzene-Water, OH-pi """
TAGL['%s-%s' % (dbse, '55' )] = """Benzene-Methanol, OH-pi """
TAGL['%s-%s-dimer' % (dbse, '55' )] = """Dimer from Benzene-Methanol, OH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '55' )] = """Monomer A from Benzene-Methanol, OH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '55' )] = """Monomer B from Benzene-Methanol, OH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '55' )] = """Monomer A from Benzene-Methanol, OH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '55' )] = """Monomer B from Benzene-Methanol, OH-pi """
TAGL['%s-%s' % (dbse, '56' )] = """Benzene-Methylamine, NH-pi """
TAGL['%s-%s-dimer' % (dbse, '56' )] = """Dimer from Benzene-Methylamine, NH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '56' )] = """Monomer A from Benzene-Methylamine, NH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '56' )] = """Monomer B from Benzene-Methylamine, NH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '56' )] = """Monomer A from Benzene-Methylamine, NH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '56' )] = """Monomer B from Benzene-Methylamine, NH-pi """
TAGL['%s-%s' % (dbse, '57' )] = """Benzene-N-methylacetamide, NH-pi """
TAGL['%s-%s-dimer' % (dbse, '57' )] = """Dimer from Benzene-N-methylacetamide, NH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '57' )] = """Monomer A from Benzene-N-methylacetamide, NH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '57' )] = """Monomer B from Benzene-N-methylacetamide, NH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '57' )] = """Monomer A from Benzene-N-methylacetamide, NH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '57' )] = """Monomer B from Benzene-N-methylacetamide, NH-pi """
TAGL['%s-%s' % (dbse, '58' )] = """Pyridine Dimer, CH-N """
TAGL['%s-%s-dimer' % (dbse, '58' )] = """Dimer from Pyridine Dimer, CH-N """
TAGL['%s-%s-monoA-CP' % (dbse, '58' )] = """Monomer A from Pyridine Dimer, CH-N """
TAGL['%s-%s-monoB-CP' % (dbse, '58' )] = """Monomer B from Pyridine Dimer, CH-N """
TAGL['%s-%s-monoA-unCP' % (dbse, '58' )] = """Monomer A from Pyridine Dimer, CH-N """
TAGL['%s-%s-monoB-unCP' % (dbse, '58' )] = """Monomer B from Pyridine Dimer, CH-N """
TAGL['%s-%s' % (dbse, '59' )] = """Ethyne-Water, CH-O """
TAGL['%s-%s-dimer' % (dbse, '59' )] = """Dimer from Ethyne-Water, CH-O """
TAGL['%s-%s-monoA-CP' % (dbse, '59' )] = """Monomer A from Ethyne-Water, CH-O """
TAGL['%s-%s-monoB-CP' % (dbse, '59' )] = """Monomer B from Ethyne-Water, CH-O """
TAGL['%s-%s-monoA-unCP' % (dbse, '59' )] = """Monomer A from Ethyne-Water, CH-O """
TAGL['%s-%s-monoB-unCP' % (dbse, '59' )] = """Monomer B from Ethyne-Water, CH-O """
TAGL['%s-%s' % (dbse, '60' )] = """Ethyne-Acetic Acid, OH-pi """
TAGL['%s-%s-dimer' % (dbse, '60' )] = """Dimer from Ethyne-Acetic Acid, OH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '60' )] = """Monomer A from Ethyne-Acetic Acid, OH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '60' )] = """Monomer B from Ethyne-Acetic Acid, OH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '60' )] = """Monomer A from Ethyne-Acetic Acid, OH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '60' )] = """Monomer B from Ethyne-Acetic Acid, OH-pi """
TAGL['%s-%s' % (dbse, '61' )] = """Pentane-Acetic Acid """
TAGL['%s-%s-dimer' % (dbse, '61' )] = """Dimer from Pentane-Acetic Acid """
TAGL['%s-%s-monoA-CP' % (dbse, '61' )] = """Monomer A from Pentane-Acetic Acid """
TAGL['%s-%s-monoB-CP' % (dbse, '61' )] = """Monomer B from Pentane-Acetic Acid """
TAGL['%s-%s-monoA-unCP' % (dbse, '61' )] = """Monomer A from Pentane-Acetic Acid """
TAGL['%s-%s-monoB-unCP' % (dbse, '61' )] = """Monomer B from Pentane-Acetic Acid """
TAGL['%s-%s' % (dbse, '62' )] = """Pentane-Acetamide """
TAGL['%s-%s-dimer' % (dbse, '62' )] = """Dimer from Pentane-Acetamide """
TAGL['%s-%s-monoA-CP' % (dbse, '62' )] = """Monomer A from Pentane-Acetamide """
TAGL['%s-%s-monoB-CP' % (dbse, '62' )] = """Monomer B from Pentane-Acetamide """
TAGL['%s-%s-monoA-unCP' % (dbse, '62' )] = """Monomer A from Pentane-Acetamide """
TAGL['%s-%s-monoB-unCP' % (dbse, '62' )] = """Monomer B from Pentane-Acetamide """
TAGL['%s-%s' % (dbse, '63' )] = """Benzene-Acetic Acid """
TAGL['%s-%s-dimer' % (dbse, '63' )] = """Dimer from Benzene-Acetic Acid """
TAGL['%s-%s-monoA-CP' % (dbse, '63' )] = """Monomer A from Benzene-Acetic Acid """
TAGL['%s-%s-monoB-CP' % (dbse, '63' )] = """Monomer B from Benzene-Acetic Acid """
TAGL['%s-%s-monoA-unCP' % (dbse, '63' )] = """Monomer A from Benzene-Acetic Acid """
TAGL['%s-%s-monoB-unCP' % (dbse, '63' )] = """Monomer B from Benzene-Acetic Acid """
TAGL['%s-%s' % (dbse, '64' )] = """N-methylacetamide-Ethene """
TAGL['%s-%s-dimer' % (dbse, '64' )] = """Dimer from N-methylacetamide-Ethene """
TAGL['%s-%s-monoA-CP' % (dbse, '64' )] = """Monomer A from N-methylacetamide-Ethene """
TAGL['%s-%s-monoB-CP' % (dbse, '64' )] = """Monomer B from N-methylacetamide-Ethene """
TAGL['%s-%s-monoA-unCP' % (dbse, '64' )] = """Monomer A from N-methylacetamide-Ethene """
TAGL['%s-%s-monoB-unCP' % (dbse, '64' )] = """Monomer B from N-methylacetamide-Ethene """
TAGL['%s-%s' % (dbse, '65' )] = """Pyridine-Ethyne """
TAGL['%s-%s-dimer' % (dbse, '65' )] = """Dimer from Pyridine-Ethyne """
TAGL['%s-%s-monoA-CP' % (dbse, '65' )] = """Monomer A from Pyridine-Ethyne """
TAGL['%s-%s-monoB-CP' % (dbse, '65' )] = """Monomer B from Pyridine-Ethyne """
TAGL['%s-%s-monoA-unCP' % (dbse, '65' )] = """Monomer A from Pyridine-Ethyne """
TAGL['%s-%s-monoB-unCP' % (dbse, '65' )] = """Monomer B from Pyridine-Ethyne """
TAGL['%s-%s' % (dbse, '66' )] = """Methylamine-Pyridine """
TAGL['%s-%s-dimer' % (dbse, '66' )] = """Dimer from Methylamine-Pyridine """
TAGL['%s-%s-monoA-CP' % (dbse, '66' )] = """Monomer A from Methylamine-Pyridine """
TAGL['%s-%s-monoB-CP' % (dbse, '66' )] = """Monomer B from Methylamine-Pyridine """
TAGL['%s-%s-monoA-unCP' % (dbse, '66' )] = """Monomer A from Methylamine-Pyridine """
TAGL['%s-%s-monoB-unCP' % (dbse, '66' )] = """Monomer B from Methylamine-Pyridine """
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-dimer' % (dbse, '1')] = qcdb.Molecule("""
0 1
O -0.70219605 -0.05606026 0.00994226
H -1.02219322 0.84677578 -0.01148871
H 0.25752106 0.04212150 0.00521900
--
0 1
O 2.22087107 0.02671679 0.00062048
H 2.59749268 -0.41166327 0.76674486
H 2.59313538 -0.44949618 -0.74478203
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '2')] = qcdb.Molecule("""
0 1
O -0.52532979 -0.05097108 -0.31451686
H -0.94200663 0.74790163 0.01125282
H 0.40369652 0.05978598 -0.07356837
--
0 1
O 2.31663329 0.04550085 0.07185839
H 2.68461611 -0.52657655 0.74938672
C 2.78163836 -0.42612907 -1.19030072
H 2.35082127 0.22496462 -1.94341475
H 3.86760205 -0.37533621 -1.26461265
H 2.45329574 -1.44599856 -1.38938136
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '3')] = qcdb.Molecule("""
0 1
O -0.68746490 -0.11174433 -0.01962547
H -1.04612154 0.77593821 0.01270684
H 0.27404252 0.02585065 -0.00349726
--
0 1
N 2.23397617 0.10318260 0.00585368
H 2.52934060 -0.44945538 -0.78893718
H 2.54405666 -0.40753849 0.82271317
C 2.89331145 1.41154656 -0.03438796
H 2.58276902 1.99327152 0.83012746
H 3.98462074 1.37225159 -0.04334363
H 2.56659917 1.94746403 -0.92221177
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '4')] = qcdb.Molecule("""
0 1
O -0.39201845 -0.38471874 0.07607132
H -0.91146085 0.41381204 0.17764877
H 0.52490382 -0.06848469 0.09051136
--
0 1
C 2.19770521 -2.24540349 -0.23031325
H 2.84766805 -3.10651537 -0.36322864
H 1.51672924 -2.16793143 -1.07417853
H 1.58468831 -2.38419948 0.65669511
C 2.95243729 -0.94739061 -0.09771974
O 2.37572184 0.12790424 0.05886900
N 4.30307041 -1.04489330 -0.16233771
H 4.70402204 -1.95542728 -0.29185281
C 5.17131253 0.10707716 -0.05289463
H 4.53481840 0.97537761 0.08188998
H 5.83690203 0.01562196 0.80319825
H 5.76577825 0.23649765 -0.95515382
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '5')] = qcdb.Molecule("""
0 1
O -0.63613493 -0.02328241 0.28059932
H 0.30809737 -0.04707875 0.07646369
C -1.15206541 -1.31128778 0.01525955
H -2.20994502 -1.29626539 0.26395586
H -1.05661024 -1.59267086 -1.03619061
H -0.67483575 -2.08627276 0.62051145
--
0 1
O 2.21041928 -0.12212177 -0.01210270
H 2.67920859 0.49226275 -0.58176865
C 2.71925320 0.03489717 1.30961462
H 2.16568412 -0.65329926 1.93974550
H 3.77824931 -0.21554173 1.36633776
H 2.56681356 1.04559122 1.68750717
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '6')] = qcdb.Molecule("""
0 1
O -0.70692019 0.04583037 0.00638610
H 0.26562361 0.07171014 0.00133929
C -1.07667067 -1.31391581 0.00161428
H -2.16292358 -1.36319577 0.00586542
H -0.72340594 -1.84465168 -0.88774350
H -0.71607978 -1.85282083 0.88307978
--
0 1
N 2.20127244 -0.03642087 -0.00333839
H 2.57189199 0.47135563 0.78979400
H 2.57201528 0.42791769 -0.82259722
C 2.67902438 -1.42245432 0.03412282
H 2.28713954 -1.95647960 -0.82806891
H 3.76573553 -1.52918949 0.03715731
H 2.28689798 -1.90918449 0.92375496
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '7')] = qcdb.Molecule("""
0 1
O -0.20877739 -0.21687067 -1.03240597
H 0.71112593 -0.38689175 -0.77396240
C -1.02217337 -0.74117114 -0.00545419
H -2.05749119 -0.53870733 -0.26859725
H -0.90774336 -1.82182632 0.10853710
H -0.82463111 -0.27549472 0.96464547
--
0 1
C 1.97349049 1.90322403 0.43230118
H 2.47988412 2.86467311 0.39743082
H 1.56294637 1.75708815 1.43017782
H 1.14384269 1.89371075 -0.26920435
C 2.88912087 0.74828521 0.11638497
O 2.46492608 -0.37162558 -0.16869657
N 4.21525779 1.01000949 0.17558433
H 4.51327024 1.92043762 0.47327152
C 5.19766382 -0.03010182 -0.04715949
H 4.84110663 -0.68103914 -0.83933645
H 6.13803306 0.42342202 -0.34567319
H 5.35717393 -0.63462872 0.84491605
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '8')] = qcdb.Molecule("""
0 1
O -0.78656202 0.04516844 -0.00718912
H 0.17770677 0.01269590 -0.00683539
C -1.24799094 -1.29028354 0.00108362
H -2.33427744 -1.25889710 0.00022120
H -0.92596575 -1.84976810 -0.88044538
H -0.92702783 -1.83846288 0.89007652
--
0 1
O 2.12888314 -0.05133660 -0.00474093
H 2.56808728 0.33681560 -0.76461362
H 2.56676744 0.35126768 0.74834860
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '9')] = qcdb.Molecule("""
0 1
N -0.89345122 -0.04384432 -0.04299745
H 0.09694826 -0.25605945 -0.07106993
H -1.36843879 -0.93339065 0.03383773
C -1.17578248 0.75790769 1.14523719
H -2.24162660 0.97221601 1.19502464
H -0.88078955 0.30424674 2.09720910
H -0.66300572 1.71432940 1.06080916
--
0 1
O 2.28445953 -0.04747650 0.02782522
H 2.56648565 0.32247227 -0.81203886
C 2.67037338 0.86410776 1.04726138
H 2.34719033 0.43447509 1.99032792
H 3.75142862 1.00319123 1.08630135
H 2.19189882 1.83770561 0.93208484
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '10')] = qcdb.Molecule("""
0 1
N -0.63864138 0.47091637 0.04456848
H 0.18995436 -0.11393716 -0.00577361
H -1.30046894 0.08125680 -0.61366848
C -1.19865882 0.39139858 1.39194660
H -2.09273777 1.00924471 1.45316749
H -1.46274551 -0.61584367 1.72945219
H -0.48027554 0.79867491 2.10108731
--
0 1
N 2.39889347 -0.45552115 0.19704452
H 2.69516214 -0.18098342 -0.73094072
H 3.02244314 -1.20321147 0.47223938
C 2.55912345 0.67968944 1.11071982
H 2.28893315 0.36499366 2.11637293
H 3.56653376 1.10146600 1.14769156
H 1.86658307 1.46546492 0.81806258
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '11')] = qcdb.Molecule("""
0 1
N -0.56970824 0.81437245 0.10109775
H 0.13087774 0.56141065 -0.58761455
H -1.46125215 0.52691480 -0.28042996
C -0.30551437 0.06571030 1.32879173
H -1.05714948 0.31427017 2.07595940
H -0.28802353 -1.02229248 1.21484626
H 0.66045772 0.36850913 1.73024224
--
0 1
C 2.25689155 2.69009990 -0.14932730
H 2.38151002 3.10127663 -1.14837163
H 2.76346292 3.33109245 0.56845722
H 1.19047979 2.66357037 0.06909413
C 2.76888324 1.27230222 -0.14703327
O 2.30890335 0.40656580 -0.88620788
N 3.75536621 0.99926987 0.74529744
H 4.15512723 1.75420265 1.27065019
C 4.34381155 -0.32032067 0.82279701
H 3.55563493 -1.06165082 0.72977641
H 5.06507133 -0.49231605 0.02425262
H 4.83846506 -0.43618886 1.78273654
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '12')] = qcdb.Molecule("""
0 1
N -0.53346397 -0.27959351 0.10699576
H -0.62915138 -1.24842455 0.38284867
H -1.12260363 -0.16615944 -0.70776410
C -1.01690943 0.58848610 1.18737346
H -0.91275967 1.62555174 0.87952116
H -2.05473726 0.41508213 1.47850360
H -0.38502338 0.44880090 2.06061419
--
0 1
O 2.09326841 0.91731136 0.21209725
H 1.27575101 0.42103887 0.03894435
H 2.67516986 0.65881349 -0.50364884
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '13')] = qcdb.Molecule("""
0 1
C -0.84931672 -0.33949876 2.49171664
H 0.18434396 -0.01104732 2.41618542
H -0.88249791 -1.34205140 2.91270310
H -1.39080263 0.31687828 3.16842897
C -1.56403192 -0.35332311 1.15947545
O -2.74952638 -0.65153776 1.05676087
N -0.80165352 -0.02735461 0.08834167
H 0.16118756 0.24036035 0.21871364
C -1.38534986 -0.00235149 -1.23413683
H -1.89161720 -0.94280123 -1.44009631
H -2.11997230 0.79621180 -1.33087952
H -0.59464593 0.14957065 -1.96312772
--
0 1
O 2.13706570 0.25201737 0.45371880
H 2.85792051 0.87931700 0.54413361
C 2.65614986 -1.05334828 0.68760059
H 1.82357836 -1.74213597 0.58202402
H 3.42228862 -1.32234103 -0.03928018
H 3.06424691 -1.15479748 1.69323508
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '14')] = qcdb.Molecule("""
0 1
C -0.77857334 -0.46332064 2.49038768
H 0.22474462 -0.05095294 2.41348355
H -0.72247994 -1.48709180 2.85458464
H -1.35190757 0.11081693 3.21368365
C -1.52050259 -0.45662769 1.17232500
O -2.70083521 -0.78358573 1.08959682
N -0.79195361 -0.06964048 0.10058937
H 0.19411165 0.14570790 0.20292464
C -1.39779834 -0.05608245 -1.21131793
H -2.31492801 0.52889121 -1.19970991
H -0.69880422 0.38726130 -1.91536621
H -1.65298232 -1.06152895 -1.54543495
--
0 1
N 2.23828822 0.25457428 0.28251924
H 2.64195454 0.79449381 1.03771933
H 2.65629209 0.62195553 -0.56312668
C 2.61059106 -1.15660854 0.43627199
H 2.18430366 -1.72764112 -0.38510346
H 3.68598970 -1.34329798 0.46205539
H 2.17611849 -1.54101555 1.35610799
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '15')] = qcdb.Molecule("""
0 1
C -0.70150294 -0.29062770 2.40688440
H -1.18329596 0.39564777 3.09887422
H 0.34956157 -0.03032157 2.30783303
H -0.79405685 -1.29160545 2.82403929
C -1.44854625 -0.24487664 1.09181530
O -2.66045000 -0.42847909 1.03434577
N -0.67005656 0.00591656 0.00977691
H 0.32667532 0.12256396 0.14159284
C -1.22705457 0.08979374 -1.31996754
H -2.29202426 -0.10650119 -1.24087756
H -1.07780169 1.07994030 -1.74854354
H -0.77662849 -0.64799919 -1.98337273
--
0 1
C 2.04177491 -2.35169797 0.68639761
H 2.59999972 -3.26170120 0.48048961
H 1.11308306 -2.35822742 0.12207220
H 1.78255599 -2.32825127 1.74333861
C 2.80941086 -1.09728593 0.35016088
O 2.26422421 0.00415088 0.29318848
N 4.13616907 -1.26609970 0.13641291
H 4.51249037 -2.19334539 0.21317023
C 5.02340725 -0.15963372 -0.15253563
H 4.40921487 0.73117605 -0.23235934
H 5.75082180 -0.02016799 0.64486768
H 5.54839755 -0.31961545 -1.09167796
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '16')] = qcdb.Molecule("""
0 1
C -0.72430464 -0.70493582 2.28386786
H 0.33531828 -0.62994325 2.05318235
H -0.95169666 -1.71198961 2.62565146
H -0.96962784 -0.02207955 3.09376537
C -1.61493501 -0.38742925 1.10406897
O -2.83732387 -0.41502209 1.19413277
N -0.95342037 -0.07640442 -0.04081980
H 0.05380860 -0.07556651 -0.03664022
C -1.65812397 0.25009358 -1.25855306
H -2.72037197 0.17694444 -1.04665270
H -1.43030493 1.26296263 -1.58809384
H -1.40562611 -0.44433518 -2.05858358
--
0 1
O 2.10277707 -0.05840697 -0.15507669
H 2.66775436 -0.77136560 -0.46027609
H 2.68252869 0.70578659 -0.13117819
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '17')] = qcdb.Molecule("""
0 1
N -0.72999913 0.02276763 0.00091465
H 0.29842255 0.07400447 0.00162304
C -1.29682453 -1.24042682 0.00150234
O -0.59409886 -2.25351751 0.00263371
C -2.74362229 -1.26233170 0.00047938
H -3.24959045 -2.21183517 0.00083311
C -3.42201997 -0.09590921 -0.00092259
H -4.50089709 -0.04921603 -0.00174546
N -2.77483684 1.10540895 -0.00141807
H -3.28383807 1.97387739 -0.00248574
C -1.39147866 1.23701978 -0.00052538
O -0.83984371 2.31703528 -0.00100125
--
0 1
N 4.14382946 -1.08570382 0.00049928
H 4.59107325 -0.17913062 0.00088609
C 4.99987723 -2.20032161 -0.00100060
O 6.20932926 -2.04861719 -0.00174980
C 4.28565880 -3.46249515 -0.00150500
H 4.85224335 -4.37752590 -0.00264363
C 2.93548983 -3.46631302 -0.00054490
H 2.35852659 -4.37927779 -0.00086358
N 2.19749842 -2.31543218 0.00090551
H 1.17116216 -2.33687498 0.00158258
C 2.77026935 -1.07076714 0.00145616
O 2.11994847 -0.02954883 0.00269255
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '18')] = qcdb.Molecule("""
0 1
O -0.55283102 -0.10169749 -0.00049879
H -0.87175963 0.80179220 0.00014440
H 0.41265950 -0.00183225 -0.00025181
--
0 1
N 2.36402099 0.09662268 0.00014680
C 3.05992763 0.06265189 1.14489465
H 2.47525508 0.08626283 2.05576267
C 4.44895122 -0.00253054 1.19489071
H 4.95485760 -0.02738470 2.14921983
C 5.16011436 -0.03565634 -0.00002044
H 6.23995431 -0.08742989 -0.00010086
C 4.44880607 -0.00259720 -1.19482173
H 4.95460301 -0.02747022 -2.14922033
C 3.05977605 0.06259779 -1.14467547
H 2.47500717 0.08619845 -2.05546803
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '19')] = qcdb.Molecule("""
0 1
O -0.62765177 0.08746727 0.00147128
H 0.34360203 0.12230333 -0.00060045
C -0.97793123 -1.27855601 0.00123841
H -2.06339209 -1.34204332 0.00500898
H -0.61488369 -1.80637584 -0.88538395
H -0.60864033 -1.80823682 0.88417273
--
0 1
N 2.27233665 0.01643230 -0.00162684
C 2.96870504 -0.00800303 -1.14634644
H 2.38422645 0.01522051 -2.05732188
C 4.35834211 -0.05774589 -1.19503169
H 4.86569445 -0.07503793 -2.14881442
C 5.06871533 -0.08345851 0.00058133
H 6.14905134 -0.12122326 0.00143063
C 4.35646788 -0.05843740 1.19512119
H 4.86226662 -0.07626173 2.14960688
C 2.96691424 -0.00868772 1.14416710
H 2.38090845 0.01398671 2.05428579
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '20')] = qcdb.Molecule("""
0 1
C -1.06170920 1.29714057 0.29206000
O -0.35816112 2.27045861 0.53181267
O -0.58930352 0.09491776 0.00378881
H 0.40443566 0.12772262 0.01841184
C -2.55842780 1.34254982 0.29625732
H -2.89599798 2.34746400 0.51831634
H -2.93288928 1.02239045 -0.67299555
H -2.93721196 0.64491043 1.03955708
--
0 1
C 2.78934845 1.10841924 0.27118376
O 2.08573008 0.13510475 0.03139616
O 2.31692211 2.31085463 0.55896223
H 1.32313357 2.27795640 0.54456172
C 4.28606090 1.06251650 0.26921936
H 4.62364046 0.06119730 0.03169387
H 4.66755944 1.77286944 -0.46024953
H 4.65757721 1.36521101 1.24527472
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '21')] = qcdb.Molecule("""
0 1
C -1.30974974 1.18017617 -0.02517034
O -0.72530044 2.15514767 0.45271335
N -0.66562116 0.09505470 -0.49199449
H 0.35458266 0.05144817 -0.45930922
H -1.18362704 -0.67359969 -0.87075610
C -2.81671934 1.15599865 -0.11060597
H -3.22062895 1.26254146 0.89308239
H -3.20942754 0.24863402 -0.56190009
H -3.14315813 2.01659563 -0.68889311
--
0 1
C 2.77960183 1.06388568 0.13435724
O 2.19518007 0.08986525 -0.34537373
N 2.13551426 2.14862891 0.60220379
H 1.11540890 2.19306669 0.56790248
H 2.65353833 2.91659011 0.98232444
C 4.28660101 1.08817006 0.21958232
H 4.67847207 1.98781958 0.68676633
H 4.69015720 1.00062503 -0.78619798
H 4.61437977 0.21759516 0.78176266
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '22')] = qcdb.Molecule("""
0 1
C -1.11362611 1.32702009 0.27516705
O -0.46708264 2.34938778 0.46153746
O -0.57808939 0.13692049 0.04961747
H 0.41332036 0.20325661 0.05548711
C -2.61142469 1.28618957 0.27736131
H -3.00664872 2.27688545 0.46578983
H -2.96425623 0.91525868 -0.68200123
H -2.95311421 0.59179821 1.04124041
--
0 1
N 4.18869738 1.08795338 0.18288157
H 4.58190249 0.17256315 0.01116215
C 5.11022529 2.13606900 0.36433468
O 6.30737167 1.91777319 0.31145472
C 4.47115922 3.41553138 0.60494183
H 5.09069398 4.28245626 0.75641911
C 3.12407502 3.49552153 0.63432307
H 2.60123483 4.42396853 0.80962128
N 2.32034427 2.40483955 0.44391704
H 1.29629244 2.47478724 0.46770730
C 2.82027675 1.15461676 0.20974482
O 2.10824430 0.16511187 0.03627464
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '23')] = qcdb.Molecule("""
0 1
C -1.23272700 1.21163896 -0.14162406
O -0.57127667 2.24201573 0.02561679
N -0.67058051 0.00388878 -0.31428147
H 0.34384695 -0.09056011 -0.30832667
H -1.24421373 -0.80632370 -0.44668271
C -2.73824495 1.26675766 -0.15588657
H -3.07797534 1.64660511 0.80450159
H -3.20211503 0.30286549 -0.34621112
H -3.04998747 1.97549049 -0.91859737
--
0 1
N 4.19521289 1.11742864 -0.11954193
H 4.68524234 0.24147146 -0.23748040
C 4.99883890 2.26027358 0.03093977
O 6.21440093 2.16465126 0.01575499
C 4.22624673 3.47559007 0.19408371
H 4.74800972 4.40878293 0.31711883
C 2.87708602 3.41391454 0.18840695
H 2.25668197 4.29027492 0.30608385
N 2.19200391 2.24163303 0.03384119
H 1.15921343 2.23257196 0.03300387
C 2.82289388 1.03716353 -0.12841885
O 2.22570515 -0.02675243 -0.27022634
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '24')] = qcdb.Molecule("""
0 1
C 0.71264532 1.12099570 0.06054078
H 1.35784165 1.98639917 0.12773717
C 1.25823573 -0.15925190 0.12423352
H 2.32495428 -0.28709988 0.24674303
C 0.42688496 -1.27452666 0.04265043
H 0.85044465 -2.26843268 0.09474995
C -0.94957784 -1.11007406 -0.10031360
H -1.59445570 -1.97627370 -0.16371348
C -1.49552564 0.17105056 -0.16154602
H -2.56378279 0.29922115 -0.27370311
C -0.66382760 1.28664289 -0.08340143
H -1.08690070 2.28100020 -0.13288613
--
0 1
C 1.98776046 1.10975720 3.71031958
H 2.63260558 1.97594094 3.77407030
C 2.53371358 -0.17139390 3.77183931
H 3.60192047 -0.29954095 3.88458353
C 1.70206410 -1.28699400 3.69318889
H 2.12514581 -2.28134643 3.74284255
C 0.32566254 -1.12135897 3.54847214
H -0.31944006 -1.98676921 3.48083951
C -0.21989733 0.15887378 3.48450631
H -1.28652536 0.28670299 3.36132755
C 0.61137962 1.27415454 3.56657725
H 0.18785474 2.26805957 3.51420832
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '25')] = qcdb.Molecule("""
0 1
N 1.57248145 0.25454916 -0.25648131
C 0.96935990 -0.90316032 0.04452614
H 1.61363891 -1.77218120 0.10234520
C -0.39815811 -1.02881911 0.28096043
H -0.81842477 -1.99173710 0.53356364
C -1.19580525 0.10655779 0.19539732
H -2.26068964 0.04953865 0.37344280
C -0.58712829 1.31741239 -0.12010544
H -1.16181223 2.22950003 -0.20046257
C 0.78854733 1.33970567 -0.33224053
H 1.28843202 2.26879436 -0.57852690
--
0 1
N -0.53372327 -1.51586163 3.84414371
C -1.46620136 -0.55523217 3.91799487
H -2.46899061 -0.88618697 4.16018773
C -1.20419832 0.79583625 3.70861549
H -2.00275608 1.52034169 3.78688658
C 0.09522901 1.18507754 3.39834708
H 0.33721357 2.22407602 3.22247582
C 1.07478832 0.20217938 3.31498561
H 2.09708956 0.44892512 3.06654863
C 0.71230860 -1.12295838 3.54817861
H 1.45616936 -1.90851301 3.49173001
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '26')] = qcdb.Molecule("""
0 1
N 1.37690111 0.83974747 0.73462494
H 1.05181240 1.38622385 1.52335563
C 1.30898271 1.45752981 -0.52065500
O 0.92056136 2.61107777 -0.62597673
N 2.01142293 -1.21320830 -0.09807182
H 1.72728551 0.99084268 -2.61199556
C 2.02573687 -0.69717123 -1.36439740
H 2.29751698 -1.39106004 -2.14564531
C 1.71451235 0.59193780 -1.61248722
H 2.12945422 -2.20152091 0.05682913
C 1.64594503 -0.48520598 1.01871830
O 1.56111602 -0.97181638 2.12980905
--
0 1
N -1.35546089 -0.83604594 0.73462494
H -1.03037218 -1.38252232 1.52335563
C -1.28754249 -1.45382828 -0.52065500
O -0.89912114 -2.60737623 -0.62597673
N -1.98998271 1.21690983 -0.09807182
H -1.70584529 -0.98714115 -2.61199556
C -2.00429665 0.70087276 -1.36439740
H -2.27607676 1.39476157 -2.14564531
C -1.69307213 -0.58823627 -1.61248722
H -2.10801399 2.20522244 0.05682913
C -1.62450481 0.48890751 1.01871830
O -1.53967580 0.97551791 2.12980905
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '27')] = qcdb.Molecule("""
0 1
C 0.81874699 0.86417234 0.18828612
H 1.46611361 1.71666767 0.34472141
C 1.36899712 -0.39052394 -0.06669818
H 2.44303637 -0.51186194 -0.11057444
C 0.53437860 -1.48849320 -0.27188804
H 0.96084825 -2.46156422 -0.47550749
C -0.84911561 -1.33050735 -0.21989643
H -1.49706942 -2.18186028 -0.37955321
C -1.39948546 -0.07603020 0.04043417
H -2.47268667 0.04490778 0.09338206
C -0.56529230 1.02140336 0.24227921
H -0.99255667 1.99366131 0.44625817
--
0 1
N -2.39843199 0.16214088 3.52041137
C -1.78354606 1.31980869 3.80047556
H -2.43115011 2.17298014 3.96298765
C -0.40133116 1.46065642 3.89064637
H 0.03051760 2.42430654 4.12186267
C 0.39962023 0.34367712 3.67643246
H 1.47718940 0.41406140 3.73126697
C -0.22093167 -0.86497792 3.38277288
H 0.35484284 -1.76059980 3.19869795
C -1.61144595 -0.90301580 3.31732347
H -2.12029887 -1.83146918 3.08848079
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '28')] = qcdb.Molecule("""
0 1
C 0.82576911 1.23652484 -0.04025044
H 1.52101317 2.06312520 -0.08247145
C 1.30015992 -0.06294088 0.12725601
H 2.36365753 -0.24226113 0.20767420
C 0.40352312 -1.12855218 0.19824486
H 0.77375338 -2.13742677 0.32412109
C -0.96780949 -0.89519049 0.10313994
H -1.66520900 -1.71998342 0.16042745
C -1.44350838 0.40448328 -0.06244130
H -2.50751124 0.58550112 -0.12415016
C -0.54575549 1.46876875 -0.13624741
H -0.91422190 2.47742220 -0.26785516
--
0 1
N -0.27488064 0.67158742 3.21864568
H -0.64818803 1.57334885 2.95575271
C 1.11726604 0.59860052 3.35065902
O 1.80817636 1.59302421 3.20582496
C 1.59616616 -0.73547719 3.66876922
H 2.65321825 -0.88769313 3.80289036
C 0.71645693 -1.74985837 3.79498575
H 1.02238445 -2.75827898 4.03151011
N -0.62878896 -1.56482645 3.62489361
H -1.27753679 -2.32738539 3.72376278
C -1.20323727 -0.34002542 3.32547899
O -2.40102568 -0.18920215 3.18336680
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '29')] = qcdb.Molecule("""
0 1
N 1.21075533 0.02867578 0.32971111
C 0.61193497 -1.15844901 0.15345176
H 1.25147791 -2.02952340 0.21929295
C -0.75131399 -1.30864956 -0.08883407
H -1.17041577 -2.29686932 -0.21338320
C -1.54786767 -0.16994027 -0.15646691
H -2.61101275 -0.24595469 -0.33875574
C -0.94362237 1.07063612 0.01982310
H -1.51881431 1.98450028 -0.01164403
C 0.42771857 1.11610863 0.25734879
H 0.92469451 2.06805173 0.39754798
--
0 1
N -0.71316758 -0.28394932 3.29752332
H -1.60805660 -0.71581281 3.11291983
C -0.71291270 1.11386048 3.39053432
O -1.75279577 1.74206028 3.27568419
C 0.60658206 1.67294182 3.61809739
H 0.70789842 2.74016399 3.71396557
C 1.67645565 0.85424952 3.68961744
H 2.68033469 1.22291422 3.83804398
N 1.55839451 -0.50304375 3.57706278
H 2.37183050 -1.09523110 3.56889514
C 0.35794757 -1.15027617 3.35068108
O 0.26581032 -2.35569425 3.21710180
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '30')] = qcdb.Molecule("""
0 1
C 0.83551718 1.11516693 0.02140131
H 1.48432398 1.98060858 0.01953430
C 1.38327497 -0.16614721 0.02376531
H 2.45714902 -0.29520468 0.02277108
C 0.54755466 -1.28131632 0.02168563
H 0.97293610 -2.27580453 0.01977853
C -0.83552313 -1.11516159 0.02139907
H -1.48433419 -1.98060640 0.01953009
C -1.38328358 0.16615413 0.02375775
H -2.45715618 0.29520906 0.02275707
C -0.54756577 1.28132347 0.02168025
H -0.97294284 2.27580548 0.01976873
--
0 1
C 0.65578060 -0.11679048 3.53075174
H 1.04724138 -1.12390931 3.52628348
H 1.37085438 0.69327350 3.52625015
C -0.65577592 0.11679215 3.53076063
H -1.37084787 -0.69327237 3.52626454
H -1.04723903 1.12391105 3.52630243
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '31')] = qcdb.Molecule("""
0 1
N -0.05087365 -0.98008127 0.03396219
H -0.05322205 -1.99069374 0.04982167
C -1.30881316 -0.36187638 0.00402596
O -2.32722000 -1.03255492 -0.00582886
C -1.23681849 1.08804829 -0.01222440
H -2.15273897 1.65146044 -0.05477443
C -0.03519433 1.69783584 0.03370483
H 0.07036636 2.77247575 0.03188224
N 1.13452913 0.99028251 0.09184461
H 2.02372032 1.45677218 0.15569277
C 1.19318599 -0.39183287 0.11577512
O 2.23639797 -1.01118826 0.19418562
--
0 1
C 0.72600726 0.02505349 3.39819044
H 1.24312499 -0.84593440 3.02096384
H 1.33161826 0.81204754 3.82550477
C -0.60276924 0.12564394 3.34894351
H -1.21477213 -0.66183565 2.93204279
H -1.11459423 0.99671353 3.73294327
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '32')] = qcdb.Molecule("""
0 1
N -0.05545357 -0.94799090 0.01001028
H -0.05731609 -1.95771330 0.05505287
C -1.31395971 -0.33514498 -0.06458622
O -2.32889664 -1.00790087 -0.12310273
C -1.24835877 1.11605191 -0.06650860
H -2.16434937 1.67533298 -0.14710244
C -0.05308010 1.73142748 0.03419541
H 0.04811054 2.80642986 0.04341968
N 1.11592628 1.02759107 0.13516893
H 1.99665515 1.49727976 0.26162029
C 1.17534700 -0.35380470 0.17616616
O 2.21463146 -0.96646542 0.33517250
--
0 1
C 0.70785184 -0.17230221 3.27635136
H 1.70367011 -0.52628807 3.16213263
C -0.43675225 0.21415547 3.38254320
H -1.44163480 0.54285582 3.48290737
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '33')] = qcdb.Molecule("""
0 1
N 1.38138219 -0.00023348 0.13146374
C 0.67935079 -1.14023946 0.09207966
H 1.25871960 -2.05496223 0.12588361
C -0.70972232 -1.19311407 0.00666426
H -1.21408768 -2.14856163 -0.02530851
C -1.42161357 0.00013343 -0.04081690
H -2.50069615 0.00025757 -0.10916973
C -0.70940120 1.19317538 0.00652198
H -1.21351163 2.14874784 -0.02552831
C 0.67965167 1.13995623 0.09189303
H 1.25926073 2.05451090 0.12550248
--
0 1
C 0.01960458 0.66643934 3.48727228
H 0.93007858 1.22592506 3.32815744
H -0.88994292 1.22884357 3.64423278
C 0.01993726 -0.66624796 3.48740452
H 0.93067296 -1.22533044 3.32839408
H -0.88935083 -1.22907273 3.64449367
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '34')] = qcdb.Molecule("""
0 1
C -2.53330865 -0.29487907 0.71314876
H -2.56362682 -0.97708181 -0.13642264
H -2.56697835 -0.89587590 1.62173177
H -3.43442611 0.31595713 0.68410447
C -1.27188487 0.55765547 0.67435468
H -1.27102630 1.25656571 1.51431940
H -1.26663255 1.16789581 -0.23182653
C -0.00013504 -0.27841822 0.71960315
H -0.00015938 -0.88722952 1.62863709
H -0.00036543 -0.98071418 -0.11940439
C 1.27189476 0.55738219 0.67406108
H 1.27097175 1.25663331 1.51370541
H 1.26663649 1.16718250 -0.23238692
C 2.53340376 -0.29494176 0.71328015
H 2.56391919 -0.97777410 -0.13577836
H 3.43430956 0.31625432 0.68359945
H 2.56755821 -0.89520887 1.62232865
--
0 1
C 2.53355730 0.29502133 4.51309986
H 2.56814179 0.89482803 3.60377431
H 2.56406061 0.97822791 5.36184468
H 3.43423799 -0.31647598 4.54330880
C 1.27173110 -0.55686594 4.55240411
H 1.26628739 -1.16659365 5.45890107
H 1.27060059 -1.25621968 3.71282305
C -0.00004389 0.27923316 4.50678767
H -0.00019882 0.98154314 5.34577214
H 0.00003301 0.88800958 3.59771803
C -1.27180473 -0.55690882 4.55205921
H -1.26642249 -1.16701827 5.45830931
H -1.27069839 -1.25593171 3.71219555
C -2.53352396 0.29513749 4.51308150
H -2.56771726 0.89567116 3.60420474
H -3.43432593 -0.31616087 4.54259468
H -2.56406349 0.97772373 5.36234289
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '35')] = qcdb.Molecule("""
0 1
C -2.53038287 -0.41757533 0.68130643
H -2.55988603 -0.98278998 -0.25015619
H -2.55403625 -1.13386495 1.50265790
H -3.43621355 0.18414376 0.73677133
C -1.27615683 0.44363493 0.75002483
H -1.27808384 1.02521785 1.67508548
H -1.28033899 1.16855564 -0.06715806
C 0.00220470 -0.38071620 0.67899257
H 0.00782894 -1.11141304 1.49383122
H 0.00624866 -0.96052270 -0.24882046
C 1.26833347 0.46239635 0.74936913
H 1.26201986 1.04425029 1.67424645
H 1.26163488 1.18705711 -0.06803458
C 2.53496627 -0.38042469 0.68068636
H 2.57244024 -0.94571652 -0.25045186
H 3.43198117 0.23441492 0.73557772
H 2.56920771 -1.09581003 1.50245608
--
0 1
C -0.00052120 0.06397129 5.24130633
C 0.00055054 -0.07615981 6.76103928
H -0.88648549 0.38791623 7.19440870
H 0.00980204 -1.12694006 7.05404915
H 0.87921076 0.40350475 7.19468235
C -1.23997654 -0.61768074 4.66740782
H -1.26327576 -0.52872361 3.58057863
H -1.25206217 -1.67895713 4.92042102
H -2.15092026 -0.16538948 5.06249294
C 1.25208391 -0.59356951 4.66783599
H 1.27341069 -0.50528385 3.58086503
H 1.28521444 -1.65413035 4.92192831
H 2.15389614 -0.12292620 5.06225711
C -0.01476908 1.54376378 4.86668505
H 0.86299692 2.05435080 5.26564018
H -0.01529328 1.67021871 3.78303336
H -0.90287503 2.03709750 5.26447319
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '36')] = qcdb.Molecule("""
0 1
C 0.38252221 -0.07060697 0.76689582
C -1.04063947 0.39681125 1.06093593
H -1.77157460 -0.28150025 0.61833023
H -1.22471777 0.43573509 2.13551890
H -1.21406603 1.39372444 0.65309065
C 0.59084747 -1.46681814 1.34797791
H 1.60291380 -1.82295000 1.15010285
H 0.43896858 -1.46674598 2.42828668
H -0.10991906 -2.17868425 0.90931390
C 1.37826905 0.89843536 1.39914944
H 2.40439397 0.58544074 1.20073365
H 1.24378092 0.94597430 2.48070991
H 1.24837318 1.90502262 0.99895071
C 0.60196094 -0.11103419 -0.74309659
H 0.45921182 0.87703910 -1.18289819
H 1.61369399 -0.44345945 -0.97967210
H -0.09953078 -0.79754982 -1.21922069
--
0 1
C -0.37502842 0.06931363 5.96648833
C 1.04778403 -0.39965237 5.67308879
H 1.23222323 -0.43898152 4.59856833
H 1.77921818 0.27802046 6.11582437
H 1.22004770 -1.39665841 6.08120936
C -0.58142523 1.46587516 5.38565786
H -1.59338833 1.82286061 5.58250538
H 0.11949337 2.17694663 5.82537963
H -0.42831602 1.46607177 4.30551550
C -0.59532291 0.10948985 7.47634196
H -1.60653907 0.44376683 7.71241515
H 0.10718954 0.79443888 7.95318018
H -0.45475982 -0.87903049 7.91579370
C -1.37149114 -0.89846403 5.33334194
H -1.24256513 -1.90543941 5.73292091
H -2.39738024 -0.58469117 5.53172979
H -1.23678678 -0.94543842 4.25176527
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '37')] = qcdb.Molecule("""
0 1
C 0.79991408 -1.02205164 0.68773696
H 0.85355588 -1.12205101 -0.39801435
H 1.49140210 -1.74416936 1.11972040
C 1.11688700 0.42495279 1.09966205
H 1.83814230 0.89014504 0.43045256
H 1.55556959 0.43982464 2.09708356
C -0.24455916 1.16568959 1.10297714
H -0.25807760 2.00086313 0.40532333
H -0.44880450 1.57699582 2.09098447
C -1.29871418 0.10381191 0.73930899
H -1.47356078 0.10524338 -0.33800545
H -2.25673428 0.27804118 1.22715843
C -0.64687993 -1.22006836 1.13630660
H -1.12443918 -2.08762702 0.68299327
H -0.68601864 -1.34528332 2.22022006
--
0 1
C 0.04984615 0.09420760 5.61627735
C -0.04649805 -0.05787837 7.13191782
H 0.94604832 -0.07334458 7.58427505
H -0.60542282 0.77000613 7.57035274
H -0.55366275 -0.98654445 7.39726741
C 0.76389939 1.40111272 5.28065247
H 0.84541894 1.53461185 4.20097059
H 0.22042700 2.25580115 5.68615385
H 1.77150393 1.41176313 5.69888547
C -1.35516567 0.11403225 5.01895782
H -1.31823408 0.23122219 3.93510886
H -1.93746520 0.94145581 5.42730374
H -1.88506873 -0.81375459 5.24028712
C 0.83774596 -1.07927730 5.03893917
H 0.34252564 -2.02626804 5.25918232
H 0.93258913 -0.99209454 3.95580439
H 1.84246405 -1.11668194 5.46268763
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '38')] = qcdb.Molecule("""
0 1
C 0.95688019 -0.89184563 1.14195000
H 1.50456597 -1.27835762 0.28342019
H 1.42138447 -1.31477793 2.03102546
C 0.99094943 0.65850830 1.14550384
H 1.51059446 1.02309646 0.25994788
H 1.51625823 1.05981813 2.01053703
C -0.47945194 1.10231879 1.10387910
H -0.61626861 2.06487722 0.61356737
H -0.87474223 1.18907144 2.11806960
C -1.18210650 -0.05279656 0.39334575
H -0.94888216 -0.02683030 -0.67380459
H -2.26566452 -0.03356474 0.50127403
C -0.53065958 -1.27488954 1.03930959
H -0.69039061 -2.19702093 0.48299221
H -0.95084939 -1.41541197 2.03674782
--
0 1
C -1.13198517 -0.38391856 5.05596626
H -1.46511966 -0.14721994 4.04338190
H -1.93677357 -0.92701702 5.54895277
C 0.18162128 -1.17946347 5.00820507
H 0.23156623 -1.83720616 4.14207124
H 0.26190891 -1.81082110 5.89259036
C 1.31093651 -0.11675764 5.00880116
H 1.93220146 -0.17743649 4.11692754
H 1.96834600 -0.26664069 5.86420633
C 0.60076314 1.24491110 5.11666799
H 0.42089996 1.65340289 4.12066887
H 1.18114710 1.97931461 5.67264126
C -0.74128932 0.91043867 5.76647985
H -1.48095789 1.70295043 5.66159855
H -0.60124939 0.71879862 6.83302881
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '39')] = qcdb.Molecule("""
0 1
C 0.76554546 0.86824433 0.82099095
H 1.43747647 1.68000664 1.06510281
C 1.23765260 -0.44283807 0.79388795
H 2.27575877 -0.64853808 1.01771141
C 0.37223723 -1.48853667 0.47726862
H 0.73818789 -2.50608012 0.45705609
C -0.96493318 -1.22297162 0.18687834
H -1.63645949 -2.03456079 -0.05777362
C -1.43706509 0.08840558 0.21327714
H -2.47468432 0.29430216 -0.01146746
C -0.57190649 1.13402416 0.53081281
H -0.93769935 2.15171058 0.55107764
--
0 1
C -0.76345318 -0.72677383 4.05982770
H -0.86970702 -0.55182467 2.98752083
H -1.41509075 -1.55603772 4.33297836
C 0.70608801 -0.98383692 4.40395757
H 1.20131879 -1.62142197 3.67337330
H 0.76936719 -1.48405069 5.37142421
C 1.34622506 0.42155976 4.49491043
H 1.99649337 0.61423069 3.64305751
H 1.95909224 0.51072918 5.39063579
C 0.16717893 1.42073677 4.52178247
H 0.05002744 1.87970717 3.53949713
H 0.31277252 2.22224160 5.24418107
C -1.06659283 0.56364158 4.81743133
H -1.99758134 1.03937903 4.51151819
H -1.13201859 0.35432067 5.88796657
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '40')] = qcdb.Molecule("""
0 1
C 0.31195353 0.56102334 0.49669886
H 0.74213608 1.55336911 0.48156571
C 1.14218235 -0.55807461 0.53606185
H 2.21651131 -0.43425014 0.55235015
C 0.58780415 -1.83668705 0.55414435
H 1.23191239 -2.70484153 0.58522179
C -0.79665772 -1.99637562 0.53296300
H -1.22677442 -2.98844427 0.54863708
C -1.62689297 -0.87747365 0.49416828
H -2.70112211 -1.00134997 0.47981498
C -1.07266525 0.40120590 0.47597397
H -1.71697357 1.26940117 0.44591995
--
0 1
C 0.17046797 0.50613197 4.83469402
C 1.61671665 0.68491933 4.37973254
H 2.03257337 1.61819721 4.76315552
H 2.24011597 -0.13569629 4.73858640
H 1.67732578 0.70431062 3.29079832
C 0.11607660 0.47476083 6.35955934
H -0.90971343 0.34734041 6.70864711
H 0.71148250 -0.35092603 6.75211308
H 0.50437108 1.40264546 6.78246492
C -0.37891207 -0.80336000 4.27439800
H -1.41378567 -0.95363504 4.58706959
H 0.20754451 -1.65233376 4.63020927
H -0.35013224 -0.80381278 3.18408376
C -0.67090481 1.67070366 4.31848855
H -0.64936386 1.70673405 3.22848999
H -1.71069396 1.56693409 4.63297103
H -0.29525222 2.62139813 4.70059546
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '41')] = qcdb.Molecule("""
0 1
N -0.20890478 -0.96458262 0.53476104
H -0.22415099 -1.97310940 0.60508386
C -1.44634208 -0.34458112 0.30665858
O -2.46123675 -1.01079161 0.19789196
C -1.35778219 1.10318559 0.22814378
H -2.25657214 1.66773071 0.04984731
C -0.16300320 1.70989257 0.38112632
H -0.04629046 2.78244591 0.33334968
N 0.98545210 1.00082412 0.61120636
H 1.86755978 1.46692777 0.74478430
C 1.02702092 -0.37917011 0.71264723
O 2.04919670 -0.99739548 0.93725979
--
0 1
C 1.14141247 2.35703152 4.05707817
H 0.71056385 2.66808022 3.10429560
H 0.50717856 2.76246464 4.84532582
H 2.12429249 2.81747894 4.15019966
C 1.21442893 0.83816057 4.14659651
H 1.64481257 0.54859772 5.10788747
H 1.88901852 0.44700002 3.38147835
C -0.15035626 0.17999392 3.99177975
H -0.82160052 0.54886973 4.77339899
H -0.59782713 0.49025894 3.04187953
C -0.09406732 -1.34069263 4.05141525
H 0.32953817 -1.64312304 5.01205144
H 0.59745442 -1.70257157 3.28691282
C -1.46335024 -1.98256584 3.86764160
H -1.90172924 -1.70910816 2.90745609
H -1.40641145 -3.06933423 3.91169879
H -2.15131302 -1.65421986 4.64687465
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '42')] = qcdb.Molecule("""
0 1
N 0.19572959 -0.84468925 0.82384642
H 0.45039753 -1.79675294 1.04976794
C -1.17904919 -0.57368440 0.75948349
O -1.99364624 -1.45626526 0.96690066
C -1.47671471 0.81115567 0.43755952
H -2.50635592 1.11565059 0.36389469
C -0.46811280 1.68296245 0.23489084
H -0.63843522 2.72164296 -0.00616410
N 0.84562854 1.30599113 0.32683051
H 1.58969256 1.96887924 0.18595979
C 1.25426147 0.01946187 0.63624397
O 2.42230438 -0.30171639 0.73187948
--
0 1
C 1.05672314 -0.86351031 4.39874366
H 1.51057565 -0.95556655 3.41076111
H 1.60122564 -1.52749058 5.06794134
C 1.11103661 0.60244169 4.83167965
H 2.06932660 1.07534062 4.62095536
H 0.92292133 0.68407923 5.90490278
C -0.05631497 1.21525617 4.06090845
H 0.21798930 1.30403777 3.00743682
H -0.34072939 2.20639729 4.41254246
C -1.17325946 0.17768426 4.23193676
H -1.89879874 0.20129811 3.42056485
H -1.71734509 0.38238141 5.15418538
C -0.45022312 -1.18886357 4.33559365
H -0.69288766 -1.83301970 3.49223397
H -0.76532935 -1.71626599 5.23468007
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '43')] = qcdb.Molecule("""
0 1
N 0.62608128 -0.85091265 0.80591569
H 0.40918989 -1.81150056 1.03440142
C -0.43245619 -0.08733581 0.29466376
O -1.53077162 -0.58840313 0.12359257
C -0.06687462 1.29127521 0.01963739
H -0.80974352 1.95181039 -0.39283965
C 1.18354208 1.71793501 0.29053321
H 1.50185022 2.73387064 0.10983284
N 2.13412979 0.88660160 0.81908177
H 3.05533594 1.22390137 1.04342778
C 1.90278319 -0.44317844 1.12831175
O 2.74380631 -1.16392354 1.62858730
--
0 1
C -0.62370220 -0.02971796 4.73188916
C -1.94044838 0.71157084 4.94676206
H -2.64751979 0.09336465 5.50162440
H -1.78094882 1.63175538 5.51094708
H -2.39815816 0.97306786 3.99160840
C -0.00826558 -0.38315588 6.08316660
H 0.93489659 -0.91552919 5.95238477
H 0.18875537 0.51658585 6.66796874
H -0.67955960 -1.02089289 6.65990335
C 0.34142207 0.86375986 3.95610006
H 1.28999256 0.35116515 3.78574607
H 0.54671227 1.78189631 4.50952643
H -0.08097331 1.14224647 2.98863562
C -0.88501939 -1.30975236 3.94152426
H -1.34875779 -1.08791865 2.97889962
H 0.04755691 -1.84815128 3.76188758
H -1.55552720 -1.97156632 4.49170918
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '44')] = qcdb.Molecule("""
0 1
C 0.66640038 0.18381078 0.41973683
H 1.22888182 -0.32988301 1.18625971
H 1.22803556 0.69720813 -0.34760989
C -0.66597358 0.18297343 0.41961191
H -1.22792171 -0.33149890 1.18610334
H -1.22818427 0.69564575 -0.34774808
--
0 1
C -2.53275995 -0.39365922 4.14534248
H -2.56225339 -1.00668000 3.24415261
H -2.56889390 -1.06787984 5.00095950
H -3.43393131 0.21735721 4.16258843
C -1.27132347 0.45901620 4.18116042
H -1.27172933 1.07910977 5.08055437
H -1.26293512 1.14592451 3.33210001
C -0.00004920 -0.37854138 4.15421721
H -0.00020326 -1.06521408 5.00604923
H 0.00009186 -1.00611921 3.25757472
C 1.27117120 0.45904505 4.18162175
H 1.27144420 1.07885580 5.08110716
H 1.26297638 1.14611970 3.33271412
C 2.53262258 -0.39367946 4.14579757
H 2.56224605 -1.00653596 3.24448839
H 3.43380069 0.21725671 4.16337561
H 2.56854094 -1.06813554 5.00130328
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '45')] = qcdb.Molecule("""
0 1
C -0.60618936 0.05587406 0.58900491
H -1.66803667 0.05577624 0.58901162
C 0.60584873 0.05554087 0.58926624
H 1.66767817 0.05486328 0.58972794
--
0 1
C -2.53040391 -0.34745600 4.21851416
H -2.53877054 -1.00940954 3.35210357
H -2.58232224 -0.97372522 5.10910493
H -3.43281853 0.26144806 4.18575253
C -1.26987178 0.50714472 4.22958343
H -1.28652345 1.18014394 5.08999255
H -1.24460479 1.14136072 3.34078732
C 0.00004684 -0.33118629 4.27003876
H 0.00004957 -0.94897593 5.17310016
H 0.00011393 -1.01948544 3.42079757
C 1.26994540 0.50718978 4.22967030
H 1.28657322 1.18015690 5.09009161
H 1.24480048 1.14136210 3.34086911
C 2.53046789 -0.34744680 4.21872389
H 2.53884766 -1.00942955 3.35234481
H 3.43284666 0.26148455 4.18599753
H 2.58228512 -0.97366153 5.10935743
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '46')] = qcdb.Molecule("""
0 1
C 1.37219093 1.01247736 0.97082468
H 0.95217623 2.01404955 1.03311725
H 1.94742170 0.92651560 0.05071776
H 2.05170208 0.85182517 1.80295247
C 0.32673706 -0.07764727 0.98819876
O 0.61882128 -1.25248130 1.17128126
N -0.95002884 0.34488680 0.77391491
H -1.10467156 1.32202550 0.60611216
C -2.05985440 -0.57736895 0.68015349
H -1.66935602 -1.56679601 0.89718425
H -2.83459176 -0.33138032 1.40366139
H -2.49097050 -0.57892483 -0.31993926
--
0 1
C 2.66066552 0.46274539 4.85334645
H 2.77750480 1.21716129 4.07460163
H 2.57455515 0.98763172 5.80500251
H 3.57275696 -0.13149652 4.88015446
C 1.43239329 -0.40064212 4.59579490
H 1.33782394 -1.14609612 5.38884574
H 1.54881342 -0.95410645 3.66195110
C 0.14985545 0.41797183 4.53049355
H 0.03828513 0.99570671 5.45357719
H 0.22908959 1.15078674 3.72084090
C -1.09450084 -0.43236340 4.31361365
H -1.18530281 -1.14684989 5.13503088
H -0.96669384 -1.02130113 3.40339920
C -2.36133934 0.40792810 4.22349893
H -2.29442610 1.11497908 3.39572969
H -3.24668156 -0.20808939 4.06966602
H -2.51169538 0.98413919 5.13671852
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '47')] = qcdb.Molecule("""
0 1
C 0.72918867 1.11310122 0.32672825
H 1.30321590 2.01422234 0.15916027
C 1.37508737 -0.11936635 0.41277695
H 2.45051474 -0.17462400 0.31330720
C 0.63503981 -1.28055339 0.62938541
H 1.13633448 -2.23601747 0.70021716
C -0.75098563 -1.20965430 0.75789034
H -1.32452590 -2.11141283 0.92419891
C -1.39703443 0.02267081 0.67308963
H -2.47242537 0.07848826 0.77399799
C -0.65689731 1.18429622 0.45833859
H -1.15782845 2.14058713 0.39509608
--
0 1
C 0.15810619 0.15289032 4.08588285
H 0.28023260 0.37837378 3.03545641
C -0.93297537 -0.60200829 4.51321912
H -1.65347990 -0.95852255 3.78952470
C -1.09367536 -0.89613361 5.86616918
H -1.94078294 -1.48210218 6.19641672
C -0.16179279 -0.43508023 6.79466326
H -0.28568629 -0.66304639 7.84467076
C 0.92979230 0.32002182 6.36942298
H 1.65291139 0.67785500 7.08980563
C 1.08859620 0.61350684 5.01593166
H 1.93585412 1.19958163 4.68588434
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '48')] = qcdb.Molecule("""
0 1
N 1.32276272 -0.01037598 1.01918373
C 0.65128601 -1.14899203 0.79680119
H 1.20041842 -2.06552808 0.97367282
C -0.67268130 -1.19471172 0.36665693
H -1.15719362 -2.14732141 0.20646407
C -1.34719676 0.00313399 0.15214401
H -2.37535653 0.00840542 -0.18229302
C -0.66455797 1.19409062 0.37900199
H -1.14262633 2.15155765 0.22872051
C 0.65889576 1.13497854 0.80885987
H 1.21410272 2.04591045 0.99543831
--
0 1
N 0.45011507 0.00130104 6.78095972
C 1.32078309 -0.00431175 5.76154669
H 2.36863966 -0.00306323 6.03584948
C 0.94739735 -0.01137951 4.41971862
H 1.69485802 -0.01554353 3.63861897
C -0.40865120 -0.01279358 4.10730315
H -0.73837988 -0.01824905 3.07702170
C -1.32675447 -0.00707849 5.15247277
H -2.39120450 -0.00792788 4.96373698
C -0.85115066 -0.00016084 6.46143162
H -1.54333433 0.00442229 7.29462282
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '49')] = qcdb.Molecule("""
0 1
C 0.84507720 1.05791869 0.69945490
H 1.50640601 1.90322178 0.83338235
C 1.37550931 -0.21745534 0.51116093
H 2.44718367 -0.36147258 0.50285232
C 0.52406810 -1.30704432 0.33319233
H 0.93572726 -2.29602641 0.18492305
C -0.85771573 -1.12146341 0.34638409
H -1.51838119 -1.96645805 0.20836325
C -1.38804570 0.15363438 0.53761349
H -2.45971752 0.29741587 0.55003229
C -0.53661315 1.24342221 0.71273882
H -0.94892427 2.23280628 0.85736635
--
0 1
N 0.02311730 0.35202455 6.77454464
C 0.17780112 1.28998616 5.82966776
H 0.31957195 2.30251216 6.18756949
C 0.16359185 1.02269639 4.46316833
H 0.29383191 1.82372219 3.74928292
C -0.02074646 -0.28893329 4.03787790
H -0.03731291 -0.53205196 2.98452996
C -0.18259538 -1.27396762 5.00673698
H -0.32913840 -2.30917859 4.73196547
C -0.15339291 -0.90663452 6.34982649
H -0.27698904 -1.65414849 7.12392749
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '50')] = qcdb.Molecule("""
0 1
C 0.83661195 1.11485600 0.23100790
H 1.48545250 1.97968049 0.21470491
C 1.38418781 -0.16696533 0.26005688
H 2.45768419 -0.29628753 0.26605977
C 0.54747934 -1.28184652 0.28693051
H 0.97191784 -2.27597918 0.31387670
C -0.83666710 -1.11500365 0.28456279
H -1.48555353 -1.97956851 0.30969784
C -1.38416274 0.16685015 0.25560540
H -2.45764469 0.29645927 0.25854055
C -0.54749833 1.28174826 0.22897743
H -0.97214124 2.27600137 0.21116093
--
0 1
C 0.00585466 0.07515017 3.77945155
H 0.00284553 0.05759463 2.71537604
C 0.00951511 0.09473103 4.99182772
H 0.01262752 0.11190396 6.05302473
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '51')] = qcdb.Molecule("""
0 1
C -0.60172996 -0.02857012 0.38493492
H -1.66373543 -0.02852657 0.37901431
C 0.61010917 -0.02866364 0.38816379
H 1.67213544 -0.02879308 0.38796752
--
0 1
C -0.00735998 0.10033739 4.14281190
H -0.00396560 0.06660234 3.07951502
C -0.01129640 0.13862741 5.35427728
H -0.01456263 0.17200329 6.41518870
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '52')] = qcdb.Molecule("""
0 1
C 0.96408039 0.87509331 0.37801364
H 1.65982961 1.69993082 0.44604227
C 1.43105709 -0.41313344 0.11899152
H 2.48952453 -0.58720917 -0.01701261
C 0.53412766 -1.47763890 0.04241755
H 0.89696129 -2.47738839 -0.15201199
C -0.83032682 -1.25360409 0.22085611
H -1.52576001 -2.07962435 0.16411655
C -1.29758715 0.03441261 0.48024263
H -2.35439607 0.20801612 0.62856096
C -0.40044509 1.09977921 0.56160137
H -0.76045514 2.09376880 0.78475698
--
0 1
C -0.11985517 0.53438939 4.36008118
O -0.58804476 1.58383601 3.98082079
O 0.28335741 -0.44317387 3.52079591
H 0.11465259 -0.11726029 2.61939066
C 0.09009913 0.13740231 5.79148697
H -0.21986702 0.94673889 6.44147585
H -0.48598160 -0.75922167 6.00843808
H 1.13859655 -0.09872978 5.95650555
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '53')] = qcdb.Molecule("""
0 1
C 0.85556074 0.35853244 1.04975426
H 1.51382550 0.90267956 1.71276582
C 1.34289713 -0.67537866 0.25115740
H 2.39288384 -0.93334472 0.28196305
C 0.47780661 -1.37670110 -0.58781577
H 0.85608399 -2.17890753 -1.20682428
C -0.87482983 -1.04255615 -0.63045178
H -1.54540573 -1.58570014 -1.28241614
C -1.36239729 -0.00701391 0.16584645
H -2.41157102 0.25346723 0.13077885
C -0.49844404 0.69315695 1.00699199
H -0.86611090 1.49033989 1.63803696
--
0 1
C 0.08192937 0.49753072 4.80472861
O 0.32841872 1.54095697 4.21748933
N -0.22211788 -0.65747581 4.15356127
H -0.19691756 -0.66449114 3.14692466
H -0.37789436 -1.51296813 4.64926298
C 0.10477407 0.40263889 6.31314609
H 1.13648787 0.48685118 6.64821988
H -0.31712984 -0.52400410 6.69417176
H -0.44469059 1.24648520 6.71991660
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '54')] = qcdb.Molecule("""
0 1
C 0.78014717 -0.60991473 -1.20755689
H 0.89619160 -1.13763959 -2.14414463
C 0.47794275 0.75099363 -1.20789541
H 0.35696423 1.27816780 -2.14405407
C 0.32728928 1.43186787 -0.00000000
H 0.09146503 2.48713922 0.00000000
C 0.47794275 0.75099363 1.20789541
H 0.35696423 1.27816780 2.14405407
C 0.78014717 -0.60991473 1.20755689
H 0.89619160 -1.13763959 2.14414463
C 0.93164831 -1.28998134 0.00000000
H 1.16848573 -2.34521369 -0.00000000
--
0 1
O -2.74383121 -0.26926257 0.00000000
H -2.57902721 -1.21398410 0.00000000
H -1.85653027 0.10232776 0.00000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '55')] = qcdb.Molecule("""
0 1
C 0.75974918 1.03127506 0.37377239
H 1.43501626 1.87566427 0.37470462
C 1.26661779 -0.26736234 0.42127308
H 2.33491597 -0.42918019 0.45943234
C 0.39532054 -1.35599116 0.42490511
H 0.78866193 -2.36249259 0.46303549
C -0.98220564 -1.14665441 0.38127024
H -1.65765632 -1.99114019 0.38512100
C -1.48934612 0.15114979 0.33757234
H -2.55794704 0.31375049 0.30771900
C -0.61877516 1.24033121 0.33388373
H -1.01176161 2.24710690 0.30436922
--
0 1
O 0.04701895 0.30618537 3.68511328
H 0.13311917 0.35605847 2.72791973
C -0.84913165 -0.75142870 3.96816832
H -0.94485234 -0.80816328 5.04910445
H -1.84128123 -0.57973096 3.54437811
H -0.48267133 -1.71446977 3.60525680
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '56')] = qcdb.Molecule("""
0 1
C 0.69231523 1.08829204 0.32484124
H 1.28194880 1.99194678 0.25251578
C 1.31818722 -0.15687008 0.28689607
H 2.39314337 -0.21947636 0.18840681
C 0.55801841 -1.32195045 0.38139986
H 1.04391922 -2.28757380 0.35761542
C -0.82755236 -1.24142187 0.51168501
H -1.41670095 -2.14525152 0.58533927
C -1.45341138 0.00367145 0.54838107
H -2.52823255 0.06570272 0.64984254
C -0.69346094 1.16840108 0.45622907
H -1.17873534 2.13440989 0.48572685
--
0 1
N 0.27506479 -0.22271725 3.85890709
H 0.40968315 -0.17867675 2.85583573
H 0.41655736 0.72242949 4.19137936
C -1.10103469 -0.62910066 4.13634288
H -1.25891125 -0.65764767 5.21289841
H -1.87233687 0.01128013 3.69622388
H -1.25572667 -1.63866846 3.76072118
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '57')] = qcdb.Molecule("""
0 1
C 0.40877989 1.05102502 0.37553605
H 1.01193875 1.94854570 0.36807788
C 1.01916788 -0.19976963 0.28905343
H 2.09557130 -0.27183333 0.21719099
C 0.24172263 -1.35688270 0.29668995
H 0.71521633 -2.32658869 0.22807218
C -1.14617971 -1.26425757 0.39390198
H -1.74918186 -2.16192663 0.39940980
C -1.75727780 -0.01396023 0.48295173
H -2.83351378 0.05824368 0.55903918
C -0.97968602 1.14420653 0.47228370
H -1.45405142 2.11400088 0.53713589
--
0 1
C 0.24562178 1.95675759 4.25663541
H -0.11252332 2.12248844 3.24334264
H 1.27020534 2.31346716 4.33807692
H -0.35847510 2.53039342 4.95498813
C 0.20877544 0.50359448 4.67234424
O 0.49340385 0.15123306 5.81088230
N -0.16361983 -0.36212226 3.69310315
H -0.32474773 -0.00413152 2.76703481
C -0.20041270 -1.78900149 3.91119021
H -0.12232513 -1.95590903 4.98118644
H -1.13565324 -2.20735207 3.54445210
H 0.62871378 -2.29287426 3.41385278
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '58')] = qcdb.Molecule("""
0 1
N -0.94121124 0.79004136 0.01171891
C -0.92275524 -0.55237814 0.03537875
H 0.05724051 -1.01558800 0.05135491
C -2.07651907 -1.33301813 0.03929035
H -1.99652895 -2.41058573 0.05887720
C -3.31631294 -0.70333955 0.01759905
H -4.23157489 -1.27908429 0.01979377
C -3.34889528 0.68701881 -0.00708596
H -4.28544414 1.22610455 -0.02465899
C -2.14310382 1.38263356 -0.00889005
H -2.13809974 2.46565258 -0.02778297
--
0 1
N 2.53321129 -0.95002930 0.04251789
C 3.73499010 -1.54320554 0.04459773
H 3.72976625 -2.62616799 0.06648690
C 4.94092634 -0.84824698 0.02059635
H 5.87736466 -1.38778216 0.02369036
C 4.90860873 0.54205748 -0.00715036
H 5.82398367 1.11730853 -0.02633187
C 3.66892840 1.17234361 -0.00962746
H 3.58915567 2.24990219 -0.03071603
C 2.51501483 0.39233399 0.01556620
H 1.53510443 0.85599657 0.01390336
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '59')] = qcdb.Molecule("""
0 1
C -1.00686722 -0.03056821 -0.02477285
H 0.05900333 -0.06093974 -0.04936562
C -2.21874380 0.00317347 0.00259920
H -3.27927730 0.03352491 0.02720048
--
0 1
O 2.26390460 -0.14557006 -0.11547082
H 2.83426102 -0.73533944 0.38155611
H 2.83590044 0.20541797 -0.80084297
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '60')] = qcdb.Molecule("""
0 1
C -0.61056257 0.22750310 -0.17060207
H 0.10738506 0.86143603 -0.63420924
C -1.38627573 -0.52532550 0.37997353
H -2.08070324 -1.17406739 0.85437937
--
0 1
C 2.83444960 -0.64143137 0.46593603
O 2.58027054 0.31467087 -0.23290172
O 1.88654498 -1.41577160 1.03362263
H 1.02554559 -1.04847261 0.76585149
C 4.21008475 -1.12288120 0.81608694
H 4.94847057 -0.48533112 0.34523661
H 4.33629527 -1.11102648 1.89612226
H 4.33236190 -2.15072575 0.48285261
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '61')] = qcdb.Molecule("""
0 1
C -2.27534498 -0.13507494 0.83133387
H -2.49071776 -0.72792669 -0.05756635
H -2.22632382 -0.81844641 1.67882341
H -3.11202566 0.54494342 0.98740008
C -0.96169812 0.61927789 0.66939920
H -0.78869920 1.25043181 1.54470266
H -1.02617687 1.29544524 -0.18645838
C 0.22650217 -0.31471031 0.47998579
H 0.30944439 -0.97513911 1.34803794
H 0.03915056 -0.96599875 -0.37878983
C 1.54300168 0.42117452 0.26899951
H 1.71163863 1.10777177 1.10244654
H 1.46609466 1.04374331 -0.62529358
C 2.72757633 -0.52686091 0.13745931
H 2.58874155 -1.20321391 -0.70575734
H 3.66150100 0.01169308 -0.01596863
H 2.83519407 -1.13740994 1.03407512
--
0 1
C -0.48356149 -0.28786315 4.12125154
O -0.90617543 -1.40304340 3.92410496
O -1.29725385 0.77110237 4.35384102
H -2.19801596 0.41672183 4.31330528
C 0.95670557 0.12180293 4.13845692
H 1.58252864 -0.74837801 3.98030176
H 1.13274299 0.85607656 3.35533234
H 1.19401682 0.59110388 5.09025931
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '62')] = qcdb.Molecule("""
0 1
C -2.58777605 -0.32310566 0.46945828
H -2.61038910 -0.87636604 -0.46961946
H -2.65974410 -1.05188654 1.27771411
H -3.47603507 0.30562460 0.50896129
C -1.30955982 0.49739424 0.58506260
H -1.31725060 1.08326190 1.50634108
H -1.26237673 1.21557375 -0.23677617
C -0.05682966 -0.36826029 0.55844017
H -0.08617526 -1.07335882 1.39587537
H -0.05380919 -0.97684333 -0.35147393
C 1.23159606 0.44006559 0.63203246
H 1.21328340 1.05356193 1.53459305
H 1.26629733 1.13137662 -0.21310563
C 2.47257523 -0.44314441 0.61922148
H 2.52071888 -1.03526342 -0.29489695
H 3.38773437 0.14408974 0.68390871
H 2.45929703 -1.13936423 1.45861821
--
0 1
C 0.04216222 0.20124208 4.11650819
O 0.06907449 1.38631556 3.82466701
N 1.17474249 -0.55063556 4.21932814
H 2.04568275 -0.12805505 3.95066588
H 1.13580453 -1.54252223 4.35075106
C -1.24805876 -0.53769541 4.38096202
H -1.10080876 -1.49841677 4.86808639
H -1.75428629 -0.69600434 3.43014867
H -1.88600271 0.08954102 4.99623387
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '63')] = qcdb.Molecule("""
0 1
C 0.60678496 1.33042185 0.31643451
H 1.24649846 2.20226434 0.33035231
C 1.11808466 0.08724886 0.68511652
H 2.15005753 -0.00388678 0.99375824
C 0.29290229 -1.03608737 0.66910727
H 0.68849686 -2.00096149 0.95537797
C -1.04283174 -0.91671112 0.28818964
H -1.68270956 -1.78848825 0.27934903
C -1.55358838 0.32734899 -0.07994317
H -2.58923495 0.42028908 -0.37734619
C -0.72804164 1.45084316 -0.06684834
H -1.12362379 2.41565865 -0.35386143
--
0 1
C 0.41898688 -0.27167884 4.02497697
O 1.61447955 -0.10772809 4.10149274
O -0.16051479 -1.48308380 4.22441532
H 0.57393607 -2.08419229 4.41745344
C -0.60289735 0.77225268 3.70429579
H -0.12460293 1.74319903 3.65747301
H -1.05569745 0.53905649 2.74158774
H -1.38774836 0.76671618 4.45679527
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '64')] = qcdb.Molecule("""
0 1
C 1.62971482 0.50301252 0.27011189
H 1.64157338 1.45923792 -0.24808286
H 2.31531919 -0.18355470 -0.21758635
H 1.96974564 0.64936024 1.29398105
C 0.26182776 -0.13286122 0.31456221
O 0.09925265 -1.30961602 0.61183995
N -0.77350225 0.70251214 0.02207590
H -0.56901138 1.66655677 -0.16581434
C -2.15001214 0.26596865 0.09505328
H -2.14473761 -0.81940745 0.10091210
H -2.64054318 0.61582035 1.00360442
H -2.70774393 0.62075110 -0.76826057
--
0 1
C -0.04575608 0.51799706 3.77621664
H -0.05063764 1.26017087 4.56209922
H -0.69428883 0.68576570 2.92753308
C 0.72275422 -0.56896486 3.84602626
H 1.36805919 -0.74079051 4.69615412
H 0.71764224 -1.30416499 3.05371698
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '65')] = qcdb.Molecule("""
0 1
N -0.08303249 0.00071459 1.05519999
C -0.20285376 -1.14172585 0.36493369
H -0.09848563 -2.05509795 0.93743262
C -0.44678144 -1.19176367 -1.00451226
H -0.53364921 -2.14585511 -1.50417155
C -0.57468209 0.00343953 -1.70430948
H -0.76368391 0.00448010 -2.76872670
C -0.45345675 1.19724254 -1.00091647
H -0.54563080 2.15227264 -1.49779508
C -0.20931111 1.14450759 0.36836730
H -0.11016707 2.05669726 0.94357396
--
0 1
C 0.47183602 -0.00605819 5.54171896
H 0.58724607 -0.00548400 6.59673278
C 0.33976626 -0.00660792 4.33547166
H 0.22161814 -0.00634549 3.27096619
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '66')] = qcdb.Molecule("""
0 1
N -0.54105920 0.02957620 -0.20899508
H 0.05555335 -0.78611810 -0.13029335
H -1.46966940 -0.27470845 0.05314338
C -0.07879927 1.04239036 0.73845886
H -0.72015294 1.91941377 0.67198026
H -0.05075819 0.72382293 1.78551453
H 0.92643072 1.35660379 0.46199919
--
0 1
N 2.34185022 -1.25680010 0.03015300
C 2.68028654 -0.44445604 -0.98155948
H 2.13761932 -0.58899402 -1.90694084
C 3.65161580 0.54767776 -0.88119247
H 3.87646824 1.17201804 -1.73404317
C 4.31245587 0.71721920 0.33107196
H 5.07030981 1.47945653 0.44745609
C 3.97232296 -0.11774333 1.39019492
H 4.45491136 -0.02728109 2.35289557
C 2.98854139 -1.08253234 1.19101154
H 2.70245706 -1.74627994 1.99762219
units angstrom
""")
# <<< Derived Geometry Strings >>>
for rxn in HRXN:
GEOS['%s-%s-monoA-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1)
GEOS['%s-%s-monoB-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2)
GEOS['%s-%s-monoA-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1, 2)
GEOS['%s-%s-monoB-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2, 1)
#########################################################################
# <<< Supplementary Quantum Chemical Results >>>
DATA = {}
DATA['NUCLEAR REPULSION ENERGY'] = {}
DATA['NUCLEAR REPULSION ENERGY']['S66-1-dimer' ] = 36.51369349
DATA['NUCLEAR REPULSION ENERGY']['S66-1-monoA-unCP' ] = 9.15671411
DATA['NUCLEAR REPULSION ENERGY']['S66-1-monoB-unCP' ] = 9.17259114
DATA['NUCLEAR REPULSION ENERGY']['S66-2-dimer' ] = 79.98338083
DATA['NUCLEAR REPULSION ENERGY']['S66-2-monoA-unCP' ] = 9.14996836
DATA['NUCLEAR REPULSION ENERGY']['S66-2-monoB-unCP' ] = 40.29463192
DATA['NUCLEAR REPULSION ENERGY']['S66-3-dimer' ] = 79.77996002
DATA['NUCLEAR REPULSION ENERGY']['S66-3-monoA-unCP' ] = 9.12565570
DATA['NUCLEAR REPULSION ENERGY']['S66-3-monoB-unCP' ] = 42.06267577
DATA['NUCLEAR REPULSION ENERGY']['S66-4-dimer' ] = 246.86074225
DATA['NUCLEAR REPULSION ENERGY']['S66-4-monoA-unCP' ] = 9.13184124
DATA['NUCLEAR REPULSION ENERGY']['S66-4-monoB-unCP' ] = 180.56084030
DATA['NUCLEAR REPULSION ENERGY']['S66-5-dimer' ] = 129.52156842
DATA['NUCLEAR REPULSION ENERGY']['S66-5-monoA-unCP' ] = 40.41731272
DATA['NUCLEAR REPULSION ENERGY']['S66-5-monoB-unCP' ] = 40.29806380
DATA['NUCLEAR REPULSION ENERGY']['S66-6-dimer' ] = 131.81617640
DATA['NUCLEAR REPULSION ENERGY']['S66-6-monoA-unCP' ] = 40.42467073
DATA['NUCLEAR REPULSION ENERGY']['S66-6-monoB-unCP' ] = 42.05202847
DATA['NUCLEAR REPULSION ENERGY']['S66-7-dimer' ] = 313.95975412
DATA['NUCLEAR REPULSION ENERGY']['S66-7-monoA-unCP' ] = 40.41876218
DATA['NUCLEAR REPULSION ENERGY']['S66-7-monoB-unCP' ] = 180.73873695
DATA['NUCLEAR REPULSION ENERGY']['S66-8-dimer' ] = 78.74537406
DATA['NUCLEAR REPULSION ENERGY']['S66-8-monoA-unCP' ] = 40.42326344
DATA['NUCLEAR REPULSION ENERGY']['S66-8-monoB-unCP' ] = 9.17236900
DATA['NUCLEAR REPULSION ENERGY']['S66-9-dimer' ] = 129.31867271
DATA['NUCLEAR REPULSION ENERGY']['S66-9-monoA-unCP' ] = 42.10593235
DATA['NUCLEAR REPULSION ENERGY']['S66-9-monoB-unCP' ] = 40.34710761
DATA['NUCLEAR REPULSION ENERGY']['S66-10-dimer' ] = 131.71717765
DATA['NUCLEAR REPULSION ENERGY']['S66-10-monoA-unCP' ] = 42.09217552
DATA['NUCLEAR REPULSION ENERGY']['S66-10-monoB-unCP' ] = 42.05982938
DATA['NUCLEAR REPULSION ENERGY']['S66-11-dimer' ] = 320.50976921
DATA['NUCLEAR REPULSION ENERGY']['S66-11-monoA-unCP' ] = 42.09328618
DATA['NUCLEAR REPULSION ENERGY']['S66-11-monoB-unCP' ] = 180.72211450
DATA['NUCLEAR REPULSION ENERGY']['S66-12-dimer' ] = 81.87844165
DATA['NUCLEAR REPULSION ENERGY']['S66-12-monoA-unCP' ] = 42.04336531
DATA['NUCLEAR REPULSION ENERGY']['S66-12-monoB-unCP' ] = 9.12312499
DATA['NUCLEAR REPULSION ENERGY']['S66-13-dimer' ] = 314.84789007
DATA['NUCLEAR REPULSION ENERGY']['S66-13-monoA-unCP' ] = 180.80545988
DATA['NUCLEAR REPULSION ENERGY']['S66-13-monoB-unCP' ] = 40.30378877
DATA['NUCLEAR REPULSION ENERGY']['S66-14-dimer' ] = 315.64348724
DATA['NUCLEAR REPULSION ENERGY']['S66-14-monoA-unCP' ] = 180.81499576
DATA['NUCLEAR REPULSION ENERGY']['S66-14-monoB-unCP' ] = 42.03791353
DATA['NUCLEAR REPULSION ENERGY']['S66-15-dimer' ] = 540.42243680
DATA['NUCLEAR REPULSION ENERGY']['S66-15-monoA-unCP' ] = 180.53794513
DATA['NUCLEAR REPULSION ENERGY']['S66-15-monoB-unCP' ] = 180.54327910
DATA['NUCLEAR REPULSION ENERGY']['S66-16-dimer' ] = 243.51194018
DATA['NUCLEAR REPULSION ENERGY']['S66-16-monoA-unCP' ] = 180.57089645
DATA['NUCLEAR REPULSION ENERGY']['S66-16-monoB-unCP' ] = 9.17374713
DATA['NUCLEAR REPULSION ENERGY']['S66-17-dimer' ] = 1040.55250335
DATA['NUCLEAR REPULSION ENERGY']['S66-17-monoA-unCP' ] = 357.25263911
DATA['NUCLEAR REPULSION ENERGY']['S66-17-monoB-unCP' ] = 357.22824169
DATA['NUCLEAR REPULSION ENERGY']['S66-18-dimer' ] = 269.39653929
DATA['NUCLEAR REPULSION ENERGY']['S66-18-monoA-unCP' ] = 9.12915636
DATA['NUCLEAR REPULSION ENERGY']['S66-18-monoB-unCP' ] = 206.28546361
DATA['NUCLEAR REPULSION ENERGY']['S66-19-dimer' ] = 337.49486033
DATA['NUCLEAR REPULSION ENERGY']['S66-19-monoA-unCP' ] = 40.42190801
DATA['NUCLEAR REPULSION ENERGY']['S66-19-monoB-unCP' ] = 206.28426737
DATA['NUCLEAR REPULSION ENERGY']['S66-20-dimer' ] = 381.47467603
DATA['NUCLEAR REPULSION ENERGY']['S66-20-monoA-unCP' ] = 121.35354216
DATA['NUCLEAR REPULSION ENERGY']['S66-20-monoB-unCP' ] = 121.35037507
DATA['NUCLEAR REPULSION ENERGY']['S66-21-dimer' ] = 373.66110820
DATA['NUCLEAR REPULSION ENERGY']['S66-21-monoA-unCP' ] = 121.85534909
DATA['NUCLEAR REPULSION ENERGY']['S66-21-monoB-unCP' ] = 121.85562743
DATA['NUCLEAR REPULSION ENERGY']['S66-22-dimer' ] = 685.96293615
DATA['NUCLEAR REPULSION ENERGY']['S66-22-monoA-unCP' ] = 121.30606379
DATA['NUCLEAR REPULSION ENERGY']['S66-22-monoB-unCP' ] = 357.30242624
DATA['NUCLEAR REPULSION ENERGY']['S66-23-dimer' ] = 682.46450694
DATA['NUCLEAR REPULSION ENERGY']['S66-23-monoA-unCP' ] = 121.91206440
DATA['NUCLEAR REPULSION ENERGY']['S66-23-monoB-unCP' ] = 357.16987646
DATA['NUCLEAR REPULSION ENERGY']['S66-24-dimer' ] = 623.71187998
DATA['NUCLEAR REPULSION ENERGY']['S66-24-monoA-unCP' ] = 203.71200257
DATA['NUCLEAR REPULSION ENERGY']['S66-24-monoB-unCP' ] = 203.71172379
DATA['NUCLEAR REPULSION ENERGY']['S66-25-dimer' ] = 637.14156863
DATA['NUCLEAR REPULSION ENERGY']['S66-25-monoA-unCP' ] = 206.22564193
DATA['NUCLEAR REPULSION ENERGY']['S66-25-monoB-unCP' ] = 206.22748415
DATA['NUCLEAR REPULSION ENERGY']['S66-26-dimer' ] = 1163.54572871
DATA['NUCLEAR REPULSION ENERGY']['S66-26-monoA-unCP' ] = 357.16027337
DATA['NUCLEAR REPULSION ENERGY']['S66-26-monoB-unCP' ] = 357.16027370
DATA['NUCLEAR REPULSION ENERGY']['S66-27-dimer' ] = 630.67443466
DATA['NUCLEAR REPULSION ENERGY']['S66-27-monoA-unCP' ] = 203.68422363
DATA['NUCLEAR REPULSION ENERGY']['S66-27-monoB-unCP' ] = 206.25955744
DATA['NUCLEAR REPULSION ENERGY']['S66-28-dimer' ] = 878.32907732
DATA['NUCLEAR REPULSION ENERGY']['S66-28-monoA-unCP' ] = 203.65134501
DATA['NUCLEAR REPULSION ENERGY']['S66-28-monoB-unCP' ] = 357.16948119
DATA['NUCLEAR REPULSION ENERGY']['S66-29-dimer' ] = 885.28192562
DATA['NUCLEAR REPULSION ENERGY']['S66-29-monoA-unCP' ] = 206.16040036
DATA['NUCLEAR REPULSION ENERGY']['S66-29-monoB-unCP' ] = 357.23565563
DATA['NUCLEAR REPULSION ENERGY']['S66-30-dimer' ] = 327.62509332
DATA['NUCLEAR REPULSION ENERGY']['S66-30-monoA-unCP' ] = 203.74228045
DATA['NUCLEAR REPULSION ENERGY']['S66-30-monoB-unCP' ] = 33.43000301
DATA['NUCLEAR REPULSION ENERGY']['S66-31-dimer' ] = 518.26358403
DATA['NUCLEAR REPULSION ENERGY']['S66-31-monoA-unCP' ] = 357.18726739
DATA['NUCLEAR REPULSION ENERGY']['S66-31-monoB-unCP' ] = 33.40409180
DATA['NUCLEAR REPULSION ENERGY']['S66-32-dimer' ] = 495.33117294
DATA['NUCLEAR REPULSION ENERGY']['S66-32-monoA-unCP' ] = 357.24995067
DATA['NUCLEAR REPULSION ENERGY']['S66-32-monoB-unCP' ] = 24.63459975
DATA['NUCLEAR REPULSION ENERGY']['S66-33-dimer' ] = 332.11307535
DATA['NUCLEAR REPULSION ENERGY']['S66-33-monoA-unCP' ] = 206.29228895
DATA['NUCLEAR REPULSION ENERGY']['S66-33-monoB-unCP' ] = 33.42391806
DATA['NUCLEAR REPULSION ENERGY']['S66-34-dimer' ] = 577.94330068
DATA['NUCLEAR REPULSION ENERGY']['S66-34-monoA-unCP' ] = 185.63664994
DATA['NUCLEAR REPULSION ENERGY']['S66-34-monoB-unCP' ] = 185.63558546
DATA['NUCLEAR REPULSION ENERGY']['S66-35-dimer' ] = 574.13141612
DATA['NUCLEAR REPULSION ENERGY']['S66-35-monoA-unCP' ] = 185.63471242
DATA['NUCLEAR REPULSION ENERGY']['S66-35-monoB-unCP' ] = 199.36895747
DATA['NUCLEAR REPULSION ENERGY']['S66-36-dimer' ] = 573.01241887
DATA['NUCLEAR REPULSION ENERGY']['S66-36-monoA-unCP' ] = 199.35493735
DATA['NUCLEAR REPULSION ENERGY']['S66-36-monoB-unCP' ] = 199.35496470
DATA['NUCLEAR REPULSION ENERGY']['S66-37-dimer' ] = 569.42803611
DATA['NUCLEAR REPULSION ENERGY']['S66-37-monoA-unCP' ] = 188.28929834
DATA['NUCLEAR REPULSION ENERGY']['S66-37-monoB-unCP' ] = 199.34481507
DATA['NUCLEAR REPULSION ENERGY']['S66-38-dimer' ] = 562.36494675
DATA['NUCLEAR REPULSION ENERGY']['S66-38-monoA-unCP' ] = 188.38358820
DATA['NUCLEAR REPULSION ENERGY']['S66-38-monoB-unCP' ] = 188.37865241
DATA['NUCLEAR REPULSION ENERGY']['S66-39-dimer' ] = 594.82529945
DATA['NUCLEAR REPULSION ENERGY']['S66-39-monoA-unCP' ] = 203.67735882
DATA['NUCLEAR REPULSION ENERGY']['S66-39-monoB-unCP' ] = 188.40454306
DATA['NUCLEAR REPULSION ENERGY']['S66-40-dimer' ] = 598.08168004
DATA['NUCLEAR REPULSION ENERGY']['S66-40-monoA-unCP' ] = 203.68538784
DATA['NUCLEAR REPULSION ENERGY']['S66-40-monoB-unCP' ] = 199.37329650
DATA['NUCLEAR REPULSION ENERGY']['S66-41-dimer' ] = 843.32242800
DATA['NUCLEAR REPULSION ENERGY']['S66-41-monoA-unCP' ] = 357.06617642
DATA['NUCLEAR REPULSION ENERGY']['S66-41-monoB-unCP' ] = 185.61673585
DATA['NUCLEAR REPULSION ENERGY']['S66-42-dimer' ] = 830.51659591
DATA['NUCLEAR REPULSION ENERGY']['S66-42-monoA-unCP' ] = 357.04169352
DATA['NUCLEAR REPULSION ENERGY']['S66-42-monoB-unCP' ] = 188.33728572
DATA['NUCLEAR REPULSION ENERGY']['S66-43-dimer' ] = 830.36688604
DATA['NUCLEAR REPULSION ENERGY']['S66-43-monoA-unCP' ] = 357.12713115
DATA['NUCLEAR REPULSION ENERGY']['S66-43-monoB-unCP' ] = 199.36153551
DATA['NUCLEAR REPULSION ENERGY']['S66-44-dimer' ] = 303.64951312
DATA['NUCLEAR REPULSION ENERGY']['S66-44-monoA-unCP' ] = 33.42556566
DATA['NUCLEAR REPULSION ENERGY']['S66-44-monoB-unCP' ] = 185.65594848
DATA['NUCLEAR REPULSION ENERGY']['S66-45-dimer' ] = 285.69697355
DATA['NUCLEAR REPULSION ENERGY']['S66-45-monoA-unCP' ] = 24.64923587
DATA['NUCLEAR REPULSION ENERGY']['S66-45-monoB-unCP' ] = 185.73197134
DATA['NUCLEAR REPULSION ENERGY']['S66-46-dimer' ] = 576.36980953
DATA['NUCLEAR REPULSION ENERGY']['S66-46-monoA-unCP' ] = 180.49044991
DATA['NUCLEAR REPULSION ENERGY']['S66-46-monoB-unCP' ] = 185.67687994
DATA['NUCLEAR REPULSION ENERGY']['S66-47-dimer' ] = 592.90348525
DATA['NUCLEAR REPULSION ENERGY']['S66-47-monoA-unCP' ] = 203.66921988
DATA['NUCLEAR REPULSION ENERGY']['S66-47-monoB-unCP' ] = 203.67694204
DATA['NUCLEAR REPULSION ENERGY']['S66-48-dimer' ] = 601.34387795
DATA['NUCLEAR REPULSION ENERGY']['S66-48-monoA-unCP' ] = 206.19608668
DATA['NUCLEAR REPULSION ENERGY']['S66-48-monoB-unCP' ] = 206.19869697
DATA['NUCLEAR REPULSION ENERGY']['S66-49-dimer' ] = 596.54644729
DATA['NUCLEAR REPULSION ENERGY']['S66-49-monoA-unCP' ] = 203.65045916
DATA['NUCLEAR REPULSION ENERGY']['S66-49-monoB-unCP' ] = 206.22459403
DATA['NUCLEAR REPULSION ENERGY']['S66-50-dimer' ] = 300.96547874
DATA['NUCLEAR REPULSION ENERGY']['S66-50-monoA-unCP' ] = 203.65156163
DATA['NUCLEAR REPULSION ENERGY']['S66-50-monoB-unCP' ] = 24.63554547
DATA['NUCLEAR REPULSION ENERGY']['S66-51-dimer' ] = 73.51391626
DATA['NUCLEAR REPULSION ENERGY']['S66-51-monoA-unCP' ] = 24.65072244
DATA['NUCLEAR REPULSION ENERGY']['S66-51-monoB-unCP' ] = 24.64312912
DATA['NUCLEAR REPULSION ENERGY']['S66-52-dimer' ] = 488.72204285
DATA['NUCLEAR REPULSION ENERGY']['S66-52-monoA-unCP' ] = 203.60587521
DATA['NUCLEAR REPULSION ENERGY']['S66-52-monoB-unCP' ] = 121.22680816
DATA['NUCLEAR REPULSION ENERGY']['S66-53-dimer' ] = 475.54833273
DATA['NUCLEAR REPULSION ENERGY']['S66-53-monoA-unCP' ] = 203.61290966
DATA['NUCLEAR REPULSION ENERGY']['S66-53-monoB-unCP' ] = 121.83743933
DATA['NUCLEAR REPULSION ENERGY']['S66-54-dimer' ] = 274.02041197
DATA['NUCLEAR REPULSION ENERGY']['S66-54-monoA-unCP' ] = 203.63390042
DATA['NUCLEAR REPULSION ENERGY']['S66-54-monoB-unCP' ] = 9.16766818
DATA['NUCLEAR REPULSION ENERGY']['S66-55-dimer' ] = 349.34385129
DATA['NUCLEAR REPULSION ENERGY']['S66-55-monoA-unCP' ] = 203.62143957
DATA['NUCLEAR REPULSION ENERGY']['S66-55-monoB-unCP' ] = 40.41522246
DATA['NUCLEAR REPULSION ENERGY']['S66-56-dimer' ] = 347.25412940
DATA['NUCLEAR REPULSION ENERGY']['S66-56-monoA-unCP' ] = 203.65859480
DATA['NUCLEAR REPULSION ENERGY']['S66-56-monoB-unCP' ] = 42.10725315
DATA['NUCLEAR REPULSION ENERGY']['S66-57-dimer' ] = 584.88796485
DATA['NUCLEAR REPULSION ENERGY']['S66-57-monoA-unCP' ] = 203.60060155
DATA['NUCLEAR REPULSION ENERGY']['S66-57-monoB-unCP' ] = 180.55180987
DATA['NUCLEAR REPULSION ENERGY']['S66-58-dimer' ] = 577.23538658
DATA['NUCLEAR REPULSION ENERGY']['S66-58-monoA-unCP' ] = 206.16864626
DATA['NUCLEAR REPULSION ENERGY']['S66-58-monoB-unCP' ] = 206.16860003
DATA['NUCLEAR REPULSION ENERGY']['S66-59-dimer' ] = 53.29797952
DATA['NUCLEAR REPULSION ENERGY']['S66-59-monoA-unCP' ] = 24.62604423
DATA['NUCLEAR REPULSION ENERGY']['S66-59-monoB-unCP' ] = 9.17684034
DATA['NUCLEAR REPULSION ENERGY']['S66-60-dimer' ] = 206.60195669
DATA['NUCLEAR REPULSION ENERGY']['S66-60-monoA-unCP' ] = 24.62574637
DATA['NUCLEAR REPULSION ENERGY']['S66-60-monoB-unCP' ] = 121.22795347
DATA['NUCLEAR REPULSION ENERGY']['S66-61-dimer' ] = 475.00612950
DATA['NUCLEAR REPULSION ENERGY']['S66-61-monoA-unCP' ] = 185.62492607
DATA['NUCLEAR REPULSION ENERGY']['S66-61-monoB-unCP' ] = 121.23972648
DATA['NUCLEAR REPULSION ENERGY']['S66-62-dimer' ] = 478.48168724
DATA['NUCLEAR REPULSION ENERGY']['S66-62-monoA-unCP' ] = 185.65184859
DATA['NUCLEAR REPULSION ENERGY']['S66-62-monoB-unCP' ] = 121.86597939
DATA['NUCLEAR REPULSION ENERGY']['S66-63-dimer' ] = 496.78090588
DATA['NUCLEAR REPULSION ENERGY']['S66-63-monoA-unCP' ] = 203.66095658
DATA['NUCLEAR REPULSION ENERGY']['S66-63-monoB-unCP' ] = 121.23566219
DATA['NUCLEAR REPULSION ENERGY']['S66-64-dimer' ] = 300.38789564
DATA['NUCLEAR REPULSION ENERGY']['S66-64-monoA-unCP' ] = 180.56185111
DATA['NUCLEAR REPULSION ENERGY']['S66-64-monoB-unCP' ] = 33.41895147
DATA['NUCLEAR REPULSION ENERGY']['S66-65-dimer' ] = 292.14525417
DATA['NUCLEAR REPULSION ENERGY']['S66-65-monoA-unCP' ] = 206.26607138
DATA['NUCLEAR REPULSION ENERGY']['S66-65-monoB-unCP' ] = 24.59915901
DATA['NUCLEAR REPULSION ENERGY']['S66-66-dimer' ] = 349.09867633
DATA['NUCLEAR REPULSION ENERGY']['S66-66-monoA-unCP' ] = 42.09376472
DATA['NUCLEAR REPULSION ENERGY']['S66-66-monoB-unCP' ] = 206.23491680
DATA['NUCLEAR REPULSION ENERGY']['S66-1-monoA-CP' ] = 9.15671411
DATA['NUCLEAR REPULSION ENERGY']['S66-1-monoB-CP' ] = 9.17259114
DATA['NUCLEAR REPULSION ENERGY']['S66-2-monoA-CP' ] = 9.14996836
DATA['NUCLEAR REPULSION ENERGY']['S66-2-monoB-CP' ] = 40.29463192
DATA['NUCLEAR REPULSION ENERGY']['S66-3-monoA-CP' ] = 9.12565570
DATA['NUCLEAR REPULSION ENERGY']['S66-3-monoB-CP' ] = 42.06267577
DATA['NUCLEAR REPULSION ENERGY']['S66-4-monoA-CP' ] = 9.13184124
DATA['NUCLEAR REPULSION ENERGY']['S66-4-monoB-CP' ] = 180.56084030
DATA['NUCLEAR REPULSION ENERGY']['S66-5-monoA-CP' ] = 40.41731272
DATA['NUCLEAR REPULSION ENERGY']['S66-5-monoB-CP' ] = 40.29806380
DATA['NUCLEAR REPULSION ENERGY']['S66-6-monoA-CP' ] = 40.42467073
DATA['NUCLEAR REPULSION ENERGY']['S66-6-monoB-CP' ] = 42.05202847
DATA['NUCLEAR REPULSION ENERGY']['S66-7-monoA-CP' ] = 40.41876218
DATA['NUCLEAR REPULSION ENERGY']['S66-7-monoB-CP' ] = 180.73873695
DATA['NUCLEAR REPULSION ENERGY']['S66-8-monoA-CP' ] = 40.42326344
DATA['NUCLEAR REPULSION ENERGY']['S66-8-monoB-CP' ] = 9.17236900
DATA['NUCLEAR REPULSION ENERGY']['S66-9-monoA-CP' ] = 42.10593235
DATA['NUCLEAR REPULSION ENERGY']['S66-9-monoB-CP' ] = 40.34710761
DATA['NUCLEAR REPULSION ENERGY']['S66-10-monoA-CP' ] = 42.09217552
DATA['NUCLEAR REPULSION ENERGY']['S66-10-monoB-CP' ] = 42.05982938
DATA['NUCLEAR REPULSION ENERGY']['S66-11-monoA-CP' ] = 42.09328618
DATA['NUCLEAR REPULSION ENERGY']['S66-11-monoB-CP' ] = 180.72211450
DATA['NUCLEAR REPULSION ENERGY']['S66-12-monoA-CP' ] = 42.04336531
DATA['NUCLEAR REPULSION ENERGY']['S66-12-monoB-CP' ] = 9.12312499
DATA['NUCLEAR REPULSION ENERGY']['S66-13-monoA-CP' ] = 180.80545988
DATA['NUCLEAR REPULSION ENERGY']['S66-13-monoB-CP' ] = 40.30378877
DATA['NUCLEAR REPULSION ENERGY']['S66-14-monoA-CP' ] = 180.81499576
DATA['NUCLEAR REPULSION ENERGY']['S66-14-monoB-CP' ] = 42.03791353
DATA['NUCLEAR REPULSION ENERGY']['S66-15-monoA-CP' ] = 180.53794513
DATA['NUCLEAR REPULSION ENERGY']['S66-15-monoB-CP' ] = 180.54327910
DATA['NUCLEAR REPULSION ENERGY']['S66-16-monoA-CP' ] = 180.57089645
DATA['NUCLEAR REPULSION ENERGY']['S66-16-monoB-CP' ] = 9.17374713
DATA['NUCLEAR REPULSION ENERGY']['S66-17-monoA-CP' ] = 357.25263911
DATA['NUCLEAR REPULSION ENERGY']['S66-17-monoB-CP' ] = 357.22824169
DATA['NUCLEAR REPULSION ENERGY']['S66-18-monoA-CP' ] = 9.12915636
DATA['NUCLEAR REPULSION ENERGY']['S66-18-monoB-CP' ] = 206.28546361
DATA['NUCLEAR REPULSION ENERGY']['S66-19-monoA-CP' ] = 40.42190801
DATA['NUCLEAR REPULSION ENERGY']['S66-19-monoB-CP' ] = 206.28426737
DATA['NUCLEAR REPULSION ENERGY']['S66-20-monoA-CP' ] = 121.35354216
DATA['NUCLEAR REPULSION ENERGY']['S66-20-monoB-CP' ] = 121.35037507
DATA['NUCLEAR REPULSION ENERGY']['S66-21-monoA-CP' ] = 121.85534909
DATA['NUCLEAR REPULSION ENERGY']['S66-21-monoB-CP' ] = 121.85562743
DATA['NUCLEAR REPULSION ENERGY']['S66-22-monoA-CP' ] = 121.30606379
DATA['NUCLEAR REPULSION ENERGY']['S66-22-monoB-CP' ] = 357.30242624
DATA['NUCLEAR REPULSION ENERGY']['S66-23-monoA-CP' ] = 121.91206440
DATA['NUCLEAR REPULSION ENERGY']['S66-23-monoB-CP' ] = 357.16987646
DATA['NUCLEAR REPULSION ENERGY']['S66-24-monoA-CP' ] = 203.71200257
DATA['NUCLEAR REPULSION ENERGY']['S66-24-monoB-CP' ] = 203.71172379
DATA['NUCLEAR REPULSION ENERGY']['S66-25-monoA-CP' ] = 206.22564193
DATA['NUCLEAR REPULSION ENERGY']['S66-25-monoB-CP' ] = 206.22748415
DATA['NUCLEAR REPULSION ENERGY']['S66-26-monoA-CP' ] = 357.16027337
DATA['NUCLEAR REPULSION ENERGY']['S66-26-monoB-CP' ] = 357.16027370
DATA['NUCLEAR REPULSION ENERGY']['S66-27-monoA-CP' ] = 203.68422363
DATA['NUCLEAR REPULSION ENERGY']['S66-27-monoB-CP' ] = 206.25955744
DATA['NUCLEAR REPULSION ENERGY']['S66-28-monoA-CP' ] = 203.65134501
DATA['NUCLEAR REPULSION ENERGY']['S66-28-monoB-CP' ] = 357.16948119
DATA['NUCLEAR REPULSION ENERGY']['S66-29-monoA-CP' ] = 206.16040036
DATA['NUCLEAR REPULSION ENERGY']['S66-29-monoB-CP' ] = 357.23565563
DATA['NUCLEAR REPULSION ENERGY']['S66-30-monoA-CP' ] = 203.74228045
DATA['NUCLEAR REPULSION ENERGY']['S66-30-monoB-CP' ] = 33.43000301
DATA['NUCLEAR REPULSION ENERGY']['S66-31-monoA-CP' ] = 357.18726739
DATA['NUCLEAR REPULSION ENERGY']['S66-31-monoB-CP' ] = 33.40409180
DATA['NUCLEAR REPULSION ENERGY']['S66-32-monoA-CP' ] = 357.24995067
DATA['NUCLEAR REPULSION ENERGY']['S66-32-monoB-CP' ] = 24.63459975
DATA['NUCLEAR REPULSION ENERGY']['S66-33-monoA-CP' ] = 206.29228895
DATA['NUCLEAR REPULSION ENERGY']['S66-33-monoB-CP' ] = 33.42391806
DATA['NUCLEAR REPULSION ENERGY']['S66-34-monoA-CP' ] = 185.63664994
DATA['NUCLEAR REPULSION ENERGY']['S66-34-monoB-CP' ] = 185.63558546
DATA['NUCLEAR REPULSION ENERGY']['S66-35-monoA-CP' ] = 185.63471242
DATA['NUCLEAR REPULSION ENERGY']['S66-35-monoB-CP' ] = 199.36895747
DATA['NUCLEAR REPULSION ENERGY']['S66-36-monoA-CP' ] = 199.35493735
DATA['NUCLEAR REPULSION ENERGY']['S66-36-monoB-CP' ] = 199.35496470
DATA['NUCLEAR REPULSION ENERGY']['S66-37-monoA-CP' ] = 188.28929834
DATA['NUCLEAR REPULSION ENERGY']['S66-37-monoB-CP' ] = 199.34481507
DATA['NUCLEAR REPULSION ENERGY']['S66-38-monoA-CP' ] = 188.38358820
DATA['NUCLEAR REPULSION ENERGY']['S66-38-monoB-CP' ] = 188.37865241
DATA['NUCLEAR REPULSION ENERGY']['S66-39-monoA-CP' ] = 203.67735882
DATA['NUCLEAR REPULSION ENERGY']['S66-39-monoB-CP' ] = 188.40454306
DATA['NUCLEAR REPULSION ENERGY']['S66-40-monoA-CP' ] = 203.68538784
DATA['NUCLEAR REPULSION ENERGY']['S66-40-monoB-CP' ] = 199.37329650
DATA['NUCLEAR REPULSION ENERGY']['S66-41-monoA-CP' ] = 357.06617642
DATA['NUCLEAR REPULSION ENERGY']['S66-41-monoB-CP' ] = 185.61673585
DATA['NUCLEAR REPULSION ENERGY']['S66-42-monoA-CP' ] = 357.04169352
DATA['NUCLEAR REPULSION ENERGY']['S66-42-monoB-CP' ] = 188.33728572
DATA['NUCLEAR REPULSION ENERGY']['S66-43-monoA-CP' ] = 357.12713115
DATA['NUCLEAR REPULSION ENERGY']['S66-43-monoB-CP' ] = 199.36153551
DATA['NUCLEAR REPULSION ENERGY']['S66-44-monoA-CP' ] = 33.42556566
DATA['NUCLEAR REPULSION ENERGY']['S66-44-monoB-CP' ] = 185.65594848
DATA['NUCLEAR REPULSION ENERGY']['S66-45-monoA-CP' ] = 24.64923587
DATA['NUCLEAR REPULSION ENERGY']['S66-45-monoB-CP' ] = 185.73197134
DATA['NUCLEAR REPULSION ENERGY']['S66-46-monoA-CP' ] = 180.49044991
DATA['NUCLEAR REPULSION ENERGY']['S66-46-monoB-CP' ] = 185.67687994
DATA['NUCLEAR REPULSION ENERGY']['S66-47-monoA-CP' ] = 203.66921988
DATA['NUCLEAR REPULSION ENERGY']['S66-47-monoB-CP' ] = 203.67694204
DATA['NUCLEAR REPULSION ENERGY']['S66-48-monoA-CP' ] = 206.19608668
DATA['NUCLEAR REPULSION ENERGY']['S66-48-monoB-CP' ] = 206.19869697
DATA['NUCLEAR REPULSION ENERGY']['S66-49-monoA-CP' ] = 203.65045916
DATA['NUCLEAR REPULSION ENERGY']['S66-49-monoB-CP' ] = 206.22459403
DATA['NUCLEAR REPULSION ENERGY']['S66-50-monoA-CP' ] = 203.65156163
DATA['NUCLEAR REPULSION ENERGY']['S66-50-monoB-CP' ] = 24.63554547
DATA['NUCLEAR REPULSION ENERGY']['S66-51-monoA-CP' ] = 24.65072244
DATA['NUCLEAR REPULSION ENERGY']['S66-51-monoB-CP' ] = 24.64312912
DATA['NUCLEAR REPULSION ENERGY']['S66-52-monoA-CP' ] = 203.60587521
DATA['NUCLEAR REPULSION ENERGY']['S66-52-monoB-CP' ] = 121.22680816
DATA['NUCLEAR REPULSION ENERGY']['S66-53-monoA-CP' ] = 203.61290966
DATA['NUCLEAR REPULSION ENERGY']['S66-53-monoB-CP' ] = 121.83743933
DATA['NUCLEAR REPULSION ENERGY']['S66-54-monoA-CP' ] = 203.63390042
DATA['NUCLEAR REPULSION ENERGY']['S66-54-monoB-CP' ] = 9.16766818
DATA['NUCLEAR REPULSION ENERGY']['S66-55-monoA-CP' ] = 203.62143957
DATA['NUCLEAR REPULSION ENERGY']['S66-55-monoB-CP' ] = 40.41522246
DATA['NUCLEAR REPULSION ENERGY']['S66-56-monoA-CP' ] = 203.65859480
DATA['NUCLEAR REPULSION ENERGY']['S66-56-monoB-CP' ] = 42.10725315
DATA['NUCLEAR REPULSION ENERGY']['S66-57-monoA-CP' ] = 203.60060155
DATA['NUCLEAR REPULSION ENERGY']['S66-57-monoB-CP' ] = 180.55180987
DATA['NUCLEAR REPULSION ENERGY']['S66-58-monoA-CP' ] = 206.16864626
DATA['NUCLEAR REPULSION ENERGY']['S66-58-monoB-CP' ] = 206.16860003
DATA['NUCLEAR REPULSION ENERGY']['S66-59-monoA-CP' ] = 24.62604423
DATA['NUCLEAR REPULSION ENERGY']['S66-59-monoB-CP' ] = 9.17684034
DATA['NUCLEAR REPULSION ENERGY']['S66-60-monoA-CP' ] = 24.62574637
DATA['NUCLEAR REPULSION ENERGY']['S66-60-monoB-CP' ] = 121.22795347
DATA['NUCLEAR REPULSION ENERGY']['S66-61-monoA-CP' ] = 185.62492607
DATA['NUCLEAR REPULSION ENERGY']['S66-61-monoB-CP' ] = 121.23972648
DATA['NUCLEAR REPULSION ENERGY']['S66-62-monoA-CP' ] = 185.65184859
DATA['NUCLEAR REPULSION ENERGY']['S66-62-monoB-CP' ] = 121.86597939
DATA['NUCLEAR REPULSION ENERGY']['S66-63-monoA-CP' ] = 203.66095658
DATA['NUCLEAR REPULSION ENERGY']['S66-63-monoB-CP' ] = 121.23566219
DATA['NUCLEAR REPULSION ENERGY']['S66-64-monoA-CP' ] = 180.56185111
DATA['NUCLEAR REPULSION ENERGY']['S66-64-monoB-CP' ] = 33.41895147
DATA['NUCLEAR REPULSION ENERGY']['S66-65-monoA-CP' ] = 206.26607138
DATA['NUCLEAR REPULSION ENERGY']['S66-65-monoB-CP' ] = 24.59915901
DATA['NUCLEAR REPULSION ENERGY']['S66-66-monoA-CP' ] = 42.09376472
DATA['NUCLEAR REPULSION ENERGY']['S66-66-monoB-CP' ] = 206.23491680
|
spring01/libPSI
|
lib/databases/S66.py
|
Python
|
gpl-2.0
| 148,284
|
[
"Psi4"
] |
c923dcde0c2631c468e69446294f729b634d82dc03f6fbfb9884dcf385cba730
|
# -*- coding:utf-8 -*-
#
# Copyright 2012 NAMD-EMAP-FGV
#
# This file is part of PyPLN. You can get more information at: http://pypln.org/.
#
# PyPLN is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyPLN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyPLN. If not, see <http://www.gnu.org/licenses/>.
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from rest_framework.reverse import reverse as rest_framework_reverse
from pypln.web.core.models import Document
from pypln.web.core.tests.utils import TestWithMongo
__all__ = ["DocumentListTest", "DocumentDetailTest"]
class DocumentListTest(TestWithMongo):
fixtures = ['users', 'corpora', 'documents']
def setUp(self):
self.document = Document.objects.filter(owner__username="user")[0]
self.user = self.document.owner
def test_requires_login(self):
response = self.client.get(reverse('property-list',
kwargs={'pk': self.document.id}))
self.assertEqual(response.status_code, 403)
def test_shows_document_correctly(self):
self.client.login(username="user", password="user")
response = self.client.get(reverse('property-list',
kwargs={'pk': self.document.id}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.renderer_context['view'].get_object(), self.document)
fake_request = RequestFactory().get(reverse('property-list',
kwargs={'pk': self.document.id}))
expected_urls = [rest_framework_reverse('property-detail', kwargs={
'pk': self.document.id, 'property': prop}, request=fake_request)
for prop in self.document.properties.keys()]
self.assertEqual(response.data['properties'], expected_urls)
def test_returns_404_for_inexistent_document(self):
self.client.login(username="user", password="user")
response = self.client.get(reverse('property-list',
kwargs={'pk': 9999}))
self.assertEqual(response.status_code, 404)
def test_returns_404_if_user_is_not_the_owner_of_the_document(self):
self.client.login(username="user", password="user")
other_doc = Document.objects.filter(owner__username="admin")[0]
response = self.client.get(reverse('property-list',
kwargs={'pk': other_doc.id}))
self.assertEqual(response.status_code, 404)
def test_only_accepts_get(self):
self.client.login(username="user", password="user")
document = Document.objects.filter(owner__username="admin")[0]
response = self.client.post(reverse('property-list',
kwargs={'pk': self.document.id}))
self.assertEqual(response.status_code, 405)
response = self.client.put(reverse('property-list',
kwargs={'pk': self.document.id}))
self.assertEqual(response.status_code, 405)
response = self.client.delete(reverse('property-list',
kwargs={'pk': self.document.id}))
self.assertEqual(response.status_code, 405)
class DocumentDetailTest(TestWithMongo):
fixtures = ['users', 'corpora', 'documents']
def setUp(self):
self.document = Document.objects.filter(owner__username="user")[0]
self.user = self.document.owner
def test_requires_login(self):
response = self.client.get(reverse('property-detail',
kwargs={'pk': self.document.id, 'property': 'text'}))
self.assertEqual(response.status_code, 403)
def test_shows_document_correctly(self):
self.client.login(username="user", password="user")
response = self.client.get(reverse('property-detail',
kwargs={'pk': self.document.id, 'property': 'text'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.renderer_context['view'].get_object(),
self.document)
self.assertEqual(response.data['value'],
self.document.properties['text'])
def test_returns_404_for_inexistent_document(self):
self.client.login(username="user", password="user")
response = self.client.get(reverse('property-detail',
kwargs={'pk': 9999, 'property': 'text'}))
self.assertEqual(response.status_code, 404)
def test_returns_404_for_inexistent_property(self):
self.client.login(username="user", password="user")
response = self.client.get(reverse('property-detail',
kwargs={'pk': self.document.id, 'property': 'inexistent'}))
self.assertEqual(response.status_code, 404)
def test_returns_404_if_user_is_not_the_owner_of_the_document(self):
self.client.login(username="user", password="user")
other_doc = Document.objects.filter(owner__username="admin")[0]
response = self.client.get(reverse('property-detail',
kwargs={'pk': other_doc.id, 'property': 'text'}))
self.assertEqual(response.status_code, 404)
def test_only_accepts_get(self):
self.client.login(username="user", password="user")
document = Document.objects.filter(owner__username="admin")[0]
response = self.client.post(reverse('property-detail',
kwargs={'pk': self.document.id, 'property': 'text'}))
self.assertEqual(response.status_code, 405)
response = self.client.put(reverse('property-detail',
kwargs={'pk': self.document.id, 'property': 'text'}))
self.assertEqual(response.status_code, 405)
response = self.client.delete(reverse('property-detail',
kwargs={'pk': self.document.id, 'property': 'text'}))
self.assertEqual(response.status_code, 405)
def test_shows_all_properties_for_all_data(self):
self.client.login(username="user", password="user")
response = self.client.get(reverse('property-detail',
kwargs={'pk': self.document.id, 'property': 'all_data'}))
expected_result = {k: self.document.properties[k] for k in
self.document.properties.keys()}
self.assertEqual(response.status_code, 200)
self.assertEqual(response.renderer_context['view'].get_object(),
self.document)
self.assertEqual(response.data['value'],
expected_result)
|
flavioamieiro/pypln.web
|
pypln/web/core/tests/views/test_properties.py
|
Python
|
gpl-3.0
| 6,751
|
[
"NAMD"
] |
d82f7aacb39263e80432a955a8be1b93274261346f3b750b18ee63bb8ce7d415
|
import ocl
import pyocl
import camvtk
import time
import datetime
import vtk
import math
def main():
print ocl.revision()
myscreen = camvtk.VTKScreen()
myscreen.camera.SetPosition(2, 2, 5)
myscreen.camera.SetFocalPoint(0.5,0, 1)
# axis arrows
camvtk.drawArrows(myscreen,center=(-2,-2,0))
camvtk.drawOCLtext(myscreen)
s = ocl.BallCutterVolume()
#s = ocl.CylCutterVolume()
#s = ocl.BullCutterVolume()
#s.center = ocl.Point(-2.50,-0.6,0)
s.r1=0.3
s.r2=0.1
s.radius = 0.4
s.length = 2
startpoint = ocl.Point(0.46,1.0,0.4)
s.setPos( startpoint )
# screenshot writer
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
cp= ocl.Point(0,0,0) # center of octree
max_depth = 6
root_scale = 1
t = ocl.Octree(root_scale, max_depth, cp)
t.init(2)
n = 0 # the frame number
print "root_scale = ", t.root_scale()
print " max_depth = ", t.max_depth()
print " leaf_scale=", t.leaf_scale()
# X
#stockbox = ocl.PlaneVolume( 1, 0, -0.9)
#t.diff_negative(stockbox)
#stockbox = ocl.PlaneVolume( 0, 0, 0.9 )
#t.diff_negative(stockbox)
# Y
#stockbox = ocl.PlaneVolume( 1, 1, -0.9)
#t.diff_negative(stockbox)
#stockbox = ocl.PlaneVolume( 0, 1, 0.9 )
#t.diff_negative(stockbox)
# Z
#stockbox = ocl.PlaneVolume( 1, 2, 0.1 )
#t.diff_negative(stockbox)
#stockbox = ocl.PlaneVolume( 0, 2, 0.8)
#t.diff_negative(stockbox)
#t.diff_negative(s)
mc = ocl.MarchingCubes()
print "mc()...",
tris = mc.mc_tree(t) # t.mc_triangles()
print " mc() got ", len(tris), " triangles"
#tris2 = t.side_triangles()
#print "appending"
#for tr in tris2:
# tris.append(tr)
#print " side_triangles() got ", len(tris2), " triangles"
mc_surf = camvtk.STLSurf( triangleList=tris )
mc_surf.SetColor(camvtk.cyan)
#s_surf = camvtk.STLSurf( triangleList=tris2 )
#s_surf.SetColor(camvtk.yellow)
#mc_surf.SetWireframe()
#mc_surf.SetOpacity(0.3)
print " STLSurf()...",
myscreen.addActor( mc_surf )
#myscreen.addActor( s_surf )
print "done."
myscreen.render()
myscreen.render()
#myscreen.iren.Start()
#exit()
myscreen.removeActor( mc_surf )
#myscreen.removeActor( s_surf )
#renderinterleave=900
#step_time = 0
Nmax=10
#dy = float(-2)/float(Nmax)
dy = - 2* t.leaf_scale()
cl = startpoint
while (n<Nmax):
cl = cl + ocl.Point(0.0,dy,0)
#cl = ocl.Point( clpoints[n].x, clpoints[n].y, clpoints[n].z )
s.setPos( cl ) # move the cutter
t_before = time.time()
t.diff_negative(s) # subtract cutter from stock
t_after = time.time()
build_time = t_after-t_before
#print n," diff() took ",build_time," s"
#step_time=step_time+build_time
if n<Nmax:
myscreen.removeActor( mc_surf )
#myscreen.removeActor( s_surf )
#for c in cactors:
# myscreen.removeActor( c )
#call_ms = 1e3*step_time/renderinterleave
#print renderinterleave," diff() calls in", step_time, " = ", call_ms," ms/call"
#infotext= "Octree max_depth=%i \nCL-point %i of %i \ndiff() CPU-time: %f ms/CL-point" % (max_depth,n,
# len(clpoints), call_ms )
#octtext.SetText(infotext)
#postext= "X: %f\nY: %f\nZ: %f" % (cl.x,cl.y,cl.z )
#cltext.SetText(postext)
#cactors = camvtk.drawBallCutter(myscreen, cutter, cl)
#t_before = time.time()
#print "mc()...",
tris = mc.mc_tree(t) #t.mc_triangles()
#tris2 = t.side_triangles()
#print "appending"
#for tr in tris2:
# tris.append(tr)
#mc_time = time.time()-t_before
#print "done in ", mc_time," s"
#print " mc() got ", len(tris), " triangles"
#print " STLSurf()...",
mc_surf = camvtk.STLSurf( triangleList=tris )
mc_surf.SetWireframe()
mc_surf.SetColor(camvtk.cyan)
myscreen.addActor( mc_surf )
#s_surf = camvtk.STLSurf( triangleList=tris2 )
#s_surf.SetWireframe()
#s_surf.SetColor(camvtk.yellow)
#myscreen.addActor( s_surf )
#print "done."
#print " render()...",
myscreen.render()
#myscreen.camera.Azimuth( 0.1 )
#lwr.SetFileName("frames/wireframe3_d8_frame"+ ('%06d' % n)+".png")
#w2if.Modified()
#lwr.Write()
#print "done."
#time.sleep(0.4)
print n, " mc_tris=",len(tris)
#," side_tris=",len(tris2)
n=n+1
#myscreen.camera.SetPosition(3*math.cos( 7*float(n)/(float(Nmax)) ), 3*math.sin( 7*float(n)/(float(Nmax)) ), 5)
#myscreen.camera.Azimuth( math.sin( 5*float(n)/(float(Nmax)) ) )
print "all done."
myscreen.iren.Start()
exit()
if __name__ == "__main__":
main()
|
tectronics/opencamlib
|
scripts/cutsim/cutsim_10_side_tris.py
|
Python
|
gpl-3.0
| 5,190
|
[
"VTK"
] |
afccd71350edd6c7a655c867317e1218544b20cc8321dce17c3f3ed6ad8a91d7
|
#!/usr/bin/env python
# mpirun -np 2 python test_ccsd.py
import numpy
from pyscf import lib
from pyscf import gto
from pyscf import scf
from pyscf import cc
from mpi4pyscf import cc as mpicc
from mpi4pyscf.tools import mpi
mol = gto.Mole()
mol.atom = [
[2 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = '6-31g'
mol.build()
mf = scf.RHF(mol)
nao = mol.nao_nr()
numpy.random.seed(1)
mf.mo_coeff = numpy.random.random((nao,nao)) - 0.5
mf.mo_occ = numpy.zeros(nao)
nocc = mol.nelectron // 2
nvir = nao - nocc
mf.mo_occ[:mol.nelectron//2] = 2
mycc = cc.CCSD(mf)
mycc.direct = True
eris = mycc.ao2mo(mf.mo_coeff)
mycc1 = mpicc.ccsd.CCSD(mf)
mycc1.ao2mo(mf.mo_coeff)
eris1 = mycc1._eris
nv = eris1.oovv.shape[2]
print(abs(numpy.asarray(eris1.oooo) - numpy.asarray(eris.oooo)).max())
print(abs(numpy.asarray(eris1.oovv) - numpy.asarray(eris.oovv[:,:,:nv])).max())
print(abs(numpy.asarray(eris1.ovvo) - numpy.asarray(eris.ovvo[:,:nv,:])).max())
print(abs(numpy.asarray(eris1.ovov) - numpy.asarray(eris.ovov[:,:nv,:])).max())
emp2, r1, r2 = mycc.init_amps(eris)
print(lib.finger(r1) - 0.20852878109950079)
print(lib.finger(r2) - 0.21333574169417541)
print(emp2 - -0.12037888088751542)
emp2, v1, v2 = mycc1.init_amps()
print(abs(v1 - r1).max())
print(abs(v2 - r2[:,:,:nv]).max())
print(emp2 - -0.12037888088751542)
t1 = numpy.random.random((nocc,nvir))
t2 = numpy.random.random((nocc,nocc,nvir,nvir))
t2 = t2 + t2.transpose(1,0,3,2)
v1, v2 = mycc.update_amps(t1, t2, eris)
print(lib.finger(v1) - 9.6029949445427079)
print(lib.finger(v2) - 4.5308876217231813)
def on_node(reg_procs, t1):
from mpi4pyscf.tools import mpi
mycc1 = mpi._registry[reg_procs[mpi.rank]]
t2 = mycc1.t2
eris = mycc1._eris
t1, t2 = mycc1.update_amps(t1, t2, eris)
t2 = mpi.gather(t2.transpose(2,3,0,1)).transpose(2,3,0,1)
return t1, t2
mpicc.ccsd.distribute_amplitudes_(mycc1, t1, t2)
x1, x2 = mpi.pool.apply(on_node, (mycc1._reg_procs, t1), (mycc1._reg_procs, t1))
print(lib.finger(x1) - 9.6029949445427079)
print(lib.finger(x2) - 4.5308876217231813)
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = '6-31g'
mol.build()
mf = scf.RHF(mol).run()
mycc = cc.CCSD(mf)
mycc.kernel()
mycc1 = mpicc.ccsd.CCSD(mf)
mycc1.kernel()
print(mycc.e_tot - mycc1.e_tot)
|
sunqm/mpi4pyscf
|
mpi4pyscf/cc/test/test_ccsd.py
|
Python
|
gpl-3.0
| 2,398
|
[
"PySCF"
] |
bb0277249d315eea32e0fcd132dbfbcf323cea643a7b02c6fb541cebc703bb80
|
#!/usr/bin/env python
import calendar
import netCDF4
import numpy as np
import os
import pandas as pd
import sys
import traceback
from cStringIO import StringIO
from datetime import datetime, timedelta
from .. import translator
# Return int with num days per year
def days_per_year(year):
if calendar.isleap(year):
return 366
return 365
# Return a list of date indexes to be included in a yearly netcdf (limit to 730)
def indexes(year, ref_year):
dates = []
ref_day = datetime(ref_year, 1, 1)
first_index = (datetime(year, 1, 1) - ref_day).days
last_index = first_index + 730
return range(first_index, last_index)
# Get index of matching date from list
def get_date_index(dates, dt):
if len(dates) == 0:
return None
first = dates[0]
index = (dt - first).days
if index >= 0 and index < len(dates) and dates[index] == dt:
return index
else:
return None
# Parse daily DSSAT output and append to a dictionary of numpy arrays
def read_daily(filename, variables, data, scens, scen_years, runs, num_years, lat, lon, fill_value, ref_year, dates):
daily_file = open(filename, 'r')
is_data = False
run = -1
indexes = {}
for line in daily_file:
line = line.strip()
if not line: continue
if line.startswith('*'):
is_data = False
elif line.startswith('@'):
headers = []
run += 1
scen_index = int(run * np.double(scen_years) / (num_years))
line = line.lstrip('@')
is_data = True
start_year = ref_year + (run % num_years)
if is_data:
line = line.split()
if len(headers) == 0:
for i,l in enumerate(line):
line[i] = l.replace('%', 'P')
headers.extend(line)
for header in headers:
indexes[header] = headers.index(header)
else:
year = int(line[indexes['YEAR']])
doy = int(line[indexes['DOY']])
dt = datetime(year, 1, 1) + timedelta(days=doy - 1)
dt_position = get_date_index(dates, dt)
for v in variables:
if dt_position is not None and v in indexes:
val = line[indexes[v]]
data[start_year][v][dt_position, scen_index, 0, 0] = val
return data
# Return a list of variables to be used per dssat filename
def variables_by_file(df, variables):
result = {}
for index,row in df.iterrows():
if row.variable in variables:
try:
result[row.filename].append(row.variable)
except KeyError:
result[row.filename] = [row.variable]
for v in variables:
if v not in [x for z in result.values() for x in z]:
print "Warning: Cannot find variable %s, skipping" % v
return result
class Out2PsimsDaily(translator.Translator):
def run(self, latidx, lonidx):
try:
num_scenarios = self.config.get('scens', '1')
num_years = self.config.get('num_years', '1')
variables = self.config.get('variables', '')
units = self.config.get('var_units', '')
delta = self.config.get('delta', '30')
ref_year = self.config.get('ref_year', '1958')
daily_csv = pd.read_csv('%s%s%s' % (os.path.dirname(__file__), os.sep, 'daily_variables.csv'))
outputfile = self.config.get_dict(self.translator_type, 'outputfile', default = '../../outputs/daily_%04d_%04d.psims.nc' % (latidx, lonidx))
scen_years = self.config.get('scen_years', num_years)
start_date = datetime(ref_year, 1, 1)
end_date = datetime(ref_year + num_years - 1, 12, 31)
dates = [start_date + timedelta(days=x) for x in range(0, (end_date-start_date).days+1)]
runs = num_scenarios
num_scenarios = int(num_scenarios * np.double(scen_years) / num_years)
latidx = int(latidx)
lonidx = int(lonidx)
delta = delta.split(',')
latdelta = np.double(delta[0]) / 60. # convert from arcminutes to degrees
londelta = latdelta if len(delta) == 1 else np.double(delta[1]) / 60.
scens = np.arange(num_scenarios)
variables = self.config.get('daily_variables').split(',')
variable_files = variables_by_file(daily_csv, variables)
lat = 90. - latdelta * (latidx - 0.5)
lon = -180. + londelta * (lonidx - 0.5)
fill_value = netCDF4.default_fillvals['f4']
data = {}
# Populate data array
for filename,varlist in variable_files.iteritems():
for v in varlist:
for start_year in range(ref_year, ref_year+num_years):
try:
data[start_year][v] = np.empty(shape=(len(dates), len(scens), 1, 1), dtype=float)
data[start_year][v].fill(fill_value)
except KeyError:
data[start_year] = {}
data[start_year][v] = np.empty(shape=(len(dates), len(scens), 1, 1), dtype=float)
data[start_year][v].fill(fill_value)
data = read_daily(filename, varlist, data, scens, scen_years, runs, num_years, 0, 0, fill_value, ref_year, dates)
# Save to NetCDF
for year in data:
current_outputfile = outputfile.replace('psims.nc', '%04d.psims.nc' % year)
netcdf_output = netCDF4.Dataset(current_outputfile, 'w', format='NETCDF4', fill_value=fill_value, zlib=None)
scen_dim = netcdf_output.createDimension('scen', len(scens))
scen_var = netcdf_output.createVariable('scen', 'i4', ('scen'))
scen_var.units = "count"
scen_var.long_name = "scenario"
scen_var[:] = scens[:]
time_dim = netcdf_output.createDimension('time', None)
time_var = netcdf_output.createVariable('time', 'i4', ('time'))
time_var.units = "days since %04d-%02d-%02d 00:00:00" % (start_date.year, start_date.month, start_date.day)
time_var.calendar = 'gregorian'
lat_dim = netcdf_output.createDimension('lat', 1)
lat_var = netcdf_output.createVariable('lat', 'f8', ('lat'))
lat_var.units = "degrees_north"
lat_var.long_name = "longitude"
lat_var[:] = lat
lon_dim = netcdf_output.createDimension('lon', 1)
lon_var = netcdf_output.createVariable('lon', 'f8', ('lon'))
lon_var.units = "degrees_east"
lon_var.long_name = "longitude"
lon_var[:] = lon
first_idx = None
last_idx = None
times = []
for v in data[year]:
times = indexes(year, ref_year)
time_var[:] = times
first_idx = times[0]
last_idx = times[-1]
for key,val in data[year].iteritems():
var = netcdf_output.createVariable(key, 'f4', ('time', 'scen', 'lat', 'lon'), fill_value=fill_value)
var[:] = val[first_idx:last_idx, :, 0, 0]
units = daily_csv['units'][daily_csv["variable"] == key].iloc[0]
if units:
var.units = units
long_name = daily_csv['long_name'][daily_csv["variable"] == key].iloc[0]
if long_name:
var.long_name = long_name
times = []
netcdf_output.close()
return True
except:
print "[%s] (%s/%s): %s" % (os.path.basename(__file__), latidx, lonidx, traceback.format_exc())
return False
|
RDCEP/psims
|
pysims/translators/dssat46/out2psimsdaily.py
|
Python
|
agpl-3.0
| 8,414
|
[
"NetCDF"
] |
0380051462ce37394167452e8668ddffc7b16ec2d8e1e36d66cb3fe2971d0945
|
#!/usr/bin/env python
"""
given an input table with ra/dec, returns a culled list of ra/dec that are not near "DETECTED" pixels in an image.
This returns positions that are in blank regions of the image. The pixel radius necessary to consider a region blank is a command-line argument.
This can operate over a list of visits/ccds or tracts/patchs
The code expects a fits file and will output a new fits file
"""
import sys, os, re
import argparse
import numpy as np
import pyfits
import lsst.daf.persistence as dafPer
import lsst.afw.table as afwTable
import lsst.afw.geom as afwGeom
import lsst.afw.image as afwImage
import hsc.tools.bick.utils as hscUtil
def loadRaDec(data):
"""
loads ra and dec from a fits file and puts them in a basic schema.
this grabs only the ra/dec from a fits file and puts them in an lsst.afw.table
schema that the pipeline can match against
"""
ras = data['ra']
decs = data['dec']
try:
ids = data['ID']
except:
ids = range(len(data))
#turns ra/dec into basic schema and return the schema
schema = afwTable.SourceTable.makeMinimalSchema()
table = afwTable.SourceTable.make(schema)
scat = afwTable.SourceCatalog(table)
for i,(ra,dec,ident) in enumerate(zip(ras,decs,ids)):
s = scat.addNew()
s.setId(int(ident))
s.setRa(float(ra)*afwGeom.degrees)
s.setDec(float(dec)*afwGeom.degrees)
return scat
def main(rerun, dataIds, fakes, root='/lustre/Subaru/SSP', rad=10):
doCoadd = 'tract' in dataIds[0].keys()
butler = dafPer.Butler(os.path.join(root, "rerun", rerun))
#read in fits file, replace with txt file or anything else
fits = pyfits.open(fakes)
data = fits[1].data
radecCat = loadRaDec(data)
ndata = len(data)
datamask = np.ones(ndata, dtype=bool)
ids = data["ID"] if "ID" in data.names else range(len(data))
idDict = dict(zip(ids, xrange(ndata)))
for dataId in dataIds:
print dataId
try:
sources = butler.get('deepCoadd_src' if doCoadd else 'src',
dataId, immediate=True,
flags=afwTable.SOURCE_IO_NO_FOOTPRINTS)
cal_md = butler.get('deepCoadd_md' if doCoadd else 'calexp_md',
dataId, immediate=True)
calexp = butler.get('deepCoadd' if doCoadd else 'calexp',
dataId, immediate=True)
except:
print "skipping", dataId
continue
if False:
matches = afwTable.matchRaDec(sources, radecCat,
3.3*afwGeom.arcseconds)
for (src, fake, d) in matches:
datamask[idDict[fake.getId()]] = False
msk = calexp.getMaskedImage().getMask()
detected = msk.clone()
detected &= msk.getPlaneBitMask("DETECTED")
wcs = calexp.getWcs()
count, good_count = 0, 0
for i_d, datum in enumerate(radecCat):
pixCoord = afwGeom.Point2I(wcs.skyToPixel(datum.getCoord()))
pixBox = afwGeom.BoxI(pixCoord,afwGeom.Extent2I(1,1))
pixBox.grow(rad)
pixBox.clip(calexp.getBBox(afwImage.PARENT))
if pixBox.isEmpty():
continue
else:
count += 1
subMask = afwImage.MaskU(detected, pixBox, afwImage.PARENT)
if sum(subMask.getArray().ravel()) != 0:
datamask[i_d] = False
else:
good_count += 1
print count, good_count
newdata = data[datamask]
print ndata, len(newdata)
hdu = pyfits.BinTableHDU(newdata)
hdu.writeto('blank_sources.fits', clobber=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('rerun')
parser.add_argument('fakes')
parser.add_argument("visits", help="visits or tracts")
parser.add_argument("ccds", help="CCDS or patches (for coadds)")
parser.add_argument("-f", "--filt",
default=None, help="filter, only set for tract/patches")
parser.add_argument("-R", '--root', default="/lustre/Subaru/SSP")
parser.add_argument('-r', '--radius',
type=int, default=20, help='pixel radius to avoid')
args = parser.parse_args()
visits = hscUtil.idSplit(args.visits)
ccds = hscUtil.idSplit(args.ccds)
if args.filt is None:
dataIds = [{'visit':v, 'ccd':c} for c in ccds for v in visits]
else:
dataIds = [{'tract':t, 'patch':p, 'filter':args.filt}
for p in ccds for t in visits]
main(args.rerun, dataIds, args.fakes, root=args.root, rad=args.radius)
|
HSC-Users/hscTools
|
clackner/bin/getBlankSources.py
|
Python
|
gpl-3.0
| 4,777
|
[
"VisIt"
] |
9a81ccaeefb481aed1a0ca614503b5163ad38ec97ff23127edef8416e14c08e6
|
"""
The main client API you'll be working with most often. You'll need to
configure a dropbox.session.DropboxSession for this to work, but otherwise
it's fairly self-explanatory.
Before you can begin making requests to the dropbox API, you have to
authenticate your application with Dropbox and get the user to
authorize your application to use dropbox on his behalf. A typical
progam, from the initial imports to making a simple request (``account_info``),
looks like this:
.. code-block:: python
# Include the Dropbox SDK libraries
from dropbox import client, rest, session
# Get your app key and secret from the Dropbox developer website
APP_KEY = 'INSERT_APP_KEY_HERE'
APP_SECRET = 'INSERT_SECRET_HERE'
# ACCESS_TYPE should be 'dropbox' or 'app_folder' as configured for your app
ACCESS_TYPE = 'INSERT_ACCESS_TYPE_HERE'
sess = session.DropboxSession(APP_KEY, APP_SECRET, ACCESS_TYPE)
request_token = sess.obtain_request_token()
url = sess.build_authorize_url(request_token)
# Make the user sign in and authorize this token
print "url:", url
print "Please visit this website and press the 'Allow' button, then hit 'Enter' here."
raw_input()
# This will fail if the user didn't visit the above URL and hit 'Allow'
access_token = sess.obtain_access_token(request_token)
client = client.DropboxClient(sess)
print "linked account:", client.account_info()
"""
from __future__ import absolute_import
import re
import os
from StringIO import StringIO
try:
import json
except ImportError:
import simplejson as json
from .rest import ErrorResponse, RESTClient
def format_path(path):
"""Normalize path for use with the Dropbox API.
This function turns multiple adjacent slashes into single
slashes, then ensures that there's a leading slash but
not a trailing slash.
"""
if not path:
return path
path = re.sub(r'/+', '/', path)
if path == '/':
return (u"" if isinstance(path, unicode) else "")
else:
return '/' + path.strip('/')
class DropboxClient(object):
"""
The main access point of doing REST calls on Dropbox. You should
first create and configure a dropbox.session.DropboxSession object,
and then pass it into DropboxClient's constructor. DropboxClient
then does all the work of properly calling each API method
with the correct OAuth authentication.
You should be aware that any of these methods can raise a
rest.ErrorResponse exception if the server returns a non-200
or invalid HTTP response. Note that a 401 return status at any
point indicates that the user needs to be reauthenticated.
"""
def __init__(self, session, rest_client=RESTClient):
"""Initialize the DropboxClient object.
Args:
``session``: A dropbox.session.DropboxSession object to use for making requests.
``rest_client``: A dropbox.rest.RESTClient-like object to use for making requests. [optional]
"""
self.session = session
self.rest_client = rest_client
def request(self, target, params=None, method='POST', content_server=False):
"""Make an HTTP request to a target API method.
This is an internal method used to properly craft the url, headers, and
params for a Dropbox API request. It is exposed for you in case you
need craft other API calls not in this library or if you want to debug it.
Args:
- ``target``: The target URL with leading slash (e.g. '/files')
- ``params``: A dictionary of parameters to add to the request
- ``method``: An HTTP method (e.g. 'GET' or 'POST')
- ``content_server``: A boolean indicating whether the request is to the
API content server, for example to fetch the contents of a file
rather than its metadata.
Returns:
- A tuple of (url, params, headers) that should be used to make the request.
OAuth authentication information will be added as needed within these fields.
"""
assert method in ['GET','POST', 'PUT'], "Only 'GET', 'POST', and 'PUT' are allowed."
if params is None:
params = {}
host = self.session.API_CONTENT_HOST if content_server else self.session.API_HOST
base = self.session.build_url(host, target)
headers, params = self.session.build_access_headers(method, base, params)
if method in ('GET', 'PUT'):
url = self.session.build_url(host, target, params)
else:
url = self.session.build_url(host, target)
return url, params, headers
def account_info(self):
"""Retrieve information about the user's account.
Returns:
- A dictionary containing account information.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#account-info
"""
url, params, headers = self.request("/account/info", method='GET')
return self.rest_client.GET(url, headers)
def get_chunked_uploader(self, file_obj, length):
"""Creates a ChunkedUploader to upload the given file-like object.
Args:
- ``file_obj``: The file-like object which is the source of the data
being uploaded.
- ``length``: The number of bytes to upload.
The expected use of this function is as follows:
.. code-block:: python
bigFile = open("data.txt", 'rb')
uploader = myclient.get_chunked_uploader(bigFile, size)
print "uploading: ", size
while uploader.offset < size:
try:
upload = uploader.upload_chunked()
except rest.ErrorResponse, e:
# perform error handling and retry logic
The SDK leaves the error handling and retry logic to the developer
to implement, as the exact requirements will depend on the application
involved.
"""
return DropboxClient.ChunkedUploader(self, file_obj, length)
class ChunkedUploader(object):
"""Contains the logic around a chunked upload, which uploads a
large file to Dropbox via the /chunked_upload endpoint
"""
def __init__(self, client, file_obj, length):
self.client = client
self.offset = 0
self.upload_id = None
self.last_block = None
self.file_obj = file_obj
self.target_length = length
def upload_chunked(self, chunk_size = 4 * 1024 * 1024):
"""Uploads data from this ChunkedUploader's file_obj in chunks, until
an error occurs. Throws an exception when an error occurs, and can
be called again to resume the upload.
Args:
- ``chunk_size``: The number of bytes to put in each chunk. [default 4 MB]
"""
while self.offset < self.target_length:
next_chunk_size = min(chunk_size, self.target_length - self.offset)
if self.last_block == None:
self.last_block = self.file_obj.read(next_chunk_size)
try:
(self.offset, self.upload_id) = self.client.upload_chunk(StringIO(self.last_block), next_chunk_size, self.offset, self.upload_id)
self.last_block = None
except ErrorResponse, e:
reply = e.body
if "offset" in reply and reply['offset'] != 0:
if reply['offset'] > self.offset:
self.last_block = None
self.offset = reply['offset']
def finish(self, path, overwrite=False, parent_rev=None):
"""Commits the bytes uploaded by this ChunkedUploader to a file
in the users dropbox.
Args:
- ``path``: The full path of the file in the Dropbox.
- ``overwrite``: Whether to overwrite an existing file at the given path. [default False]
If overwrite is False and a file already exists there, Dropbox
will rename the upload to make sure it doesn't overwrite anything.
You need to check the metadata returned for the new name.
This field should only be True if your intent is to potentially
clobber changes to a file that you don't know about.
- ``parent_rev``: The rev field from the 'parent' of this upload. [optional]
If your intent is to update the file at the given path, you should
pass the parent_rev parameter set to the rev value from the most recent
metadata you have of the existing file at that path. If the server
has a more recent version of the file at the specified path, it will
automatically rename your uploaded file, spinning off a conflict.
Using this parameter effectively causes the overwrite parameter to be ignored.
The file will always be overwritten if you send the most-recent parent_rev,
and it will never be overwritten if you send a less-recent one.
"""
path = "/commit_chunked_upload/%s%s" % (self.client.session.root, format_path(path))
params = dict(
overwrite = bool(overwrite),
upload_id = self.upload_id
)
if parent_rev is not None:
params['parent_rev'] = parent_rev
url, params, headers = self.client.request(path, params, content_server=True)
return self.client.rest_client.POST(url, params, headers)
def upload_chunk(self, file_obj, length, offset=0, upload_id=None):
"""Uploads a single chunk of data from the given file like object. The majority of users
should use the ChunkedUploader object, which provides a simpler interface to the
chunked_upload API endpoint.
Args:
- ``file_obj``: The source of the data to upload
- ``length``: The number of bytes to upload in one chunk.
Returns:
- The reply from the server, as a dictionary
"""
params = dict()
if upload_id:
params['upload_id'] = upload_id
params['offset'] = offset
url, ignored_params, headers = self.request("/chunked_upload", params, method='PUT', content_server=True)
try:
reply = self.rest_client.PUT(url, file_obj, headers)
return reply['offset'], reply['upload_id']
except ErrorResponse, e:
raise e
def put_file(self, full_path, file_obj, overwrite=False, parent_rev=None):
"""Upload a file.
A typical use case would be as follows:
.. code-block:: python
f = open('working-draft.txt')
response = client.put_file('/magnum-opus.txt', f)
print "uploaded:", response
which would return the metadata of the uploaded file, similar to:
.. code-block:: python
{
'bytes': 77,
'icon': 'page_white_text',
'is_dir': False,
'mime_type': 'text/plain',
'modified': 'Wed, 20 Jul 2011 22:04:50 +0000',
'path': '/magnum-opus.txt',
'rev': '362e2029684fe',
'revision': 221922,
'root': 'dropbox',
'size': '77 bytes',
'thumb_exists': False
}
Args:
- ``full_path``: The full path to upload the file to, *including the file name*.
If the destination directory does not yet exist, it will be created.
- ``file_obj``: A file-like object to upload. If you would like, you can pass a string as file_obj.
- ``overwrite``: Whether to overwrite an existing file at the given path. [default False]
If overwrite is False and a file already exists there, Dropbox
will rename the upload to make sure it doesn't overwrite anything.
You need to check the metadata returned for the new name.
This field should only be True if your intent is to potentially
clobber changes to a file that you don't know about.
- ``parent_rev``: The rev field from the 'parent' of this upload. [optional]
If your intent is to update the file at the given path, you should
pass the parent_rev parameter set to the rev value from the most recent
metadata you have of the existing file at that path. If the server
has a more recent version of the file at the specified path, it will
automatically rename your uploaded file, spinning off a conflict.
Using this parameter effectively causes the overwrite parameter to be ignored.
The file will always be overwritten if you send the most-recent parent_rev,
and it will never be overwritten if you send a less-recent one.
Returns:
- A dictionary containing the metadata of the newly uploaded file.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#files-put
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 503: User over quota
Note: In Python versions below version 2.6, httplib doesn't handle file-like objects.
In that case, this code will read the entire file into memory (!).
"""
path = "/files_put/%s%s" % (self.session.root, format_path(full_path))
params = {
'overwrite': bool(overwrite),
}
if parent_rev is not None:
params['parent_rev'] = parent_rev
url, params, headers = self.request(path, params, method='PUT', content_server=True)
return self.rest_client.PUT(url, file_obj, headers)
def get_file(self, from_path, rev=None):
"""Download a file.
Unlike most other calls, get_file returns a raw HTTPResponse with the connection open.
You should call .read() and perform any processing you need, then close the HTTPResponse.
A typical usage looks like this:
.. code-block:: python
out = open('magnum-opus.txt', 'w')
f, metadata = client.get_file_and_metadata('/magnum-opus.txt').read()
out.write(f)
which would download the file ``magnum-opus.txt`` and write the contents into
the file ``magnum-opus.txt`` on the local filesystem.
Args:
- ``from_path``: The path to the file to be downloaded.
- ``rev``: A previous rev value of the file to be downloaded. [optional]
Returns:
- An httplib.HTTPResponse that is the result of the request.
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No file was found at the given path, or the file that was there was deleted.
- 200: Request was okay but response was malformed in some way.
"""
path = "/files/%s%s" % (self.session.root, format_path(from_path))
params = {}
if rev is not None:
params['rev'] = rev
url, params, headers = self.request(path, params, method='GET', content_server=True)
return self.rest_client.request("GET", url, headers=headers, raw_response=True)
def get_file_and_metadata(self, from_path, rev=None):
"""Download a file alongwith its metadata.
Acts as a thin wrapper around get_file() (see get_file() comments for
more details)
Args:
- ``from_path``: The path to the file to be downloaded.
- ``rev``: A previous rev value of the file to be downloaded. [optional]
Returns:
- An httplib.HTTPResponse that is the result of the request.
- A dictionary containing the metadata of the file (see
https://www.dropbox.com/developers/reference/api#metadata for details).
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No file was found at the given path, or the file that was there was deleted.
- 200: Request was okay but response was malformed in some way.
"""
file_res = self.get_file(from_path, rev)
metadata = DropboxClient.__parse_metadata_as_dict(file_res)
return file_res, metadata
@staticmethod
def __parse_metadata_as_dict(dropbox_raw_response):
"""Parses file metadata from a raw dropbox HTTP response, raising a
dropbox.rest.ErrorResponse if parsing fails.
"""
metadata = None
for header, header_val in dropbox_raw_response.getheaders():
if header.lower() == 'x-dropbox-metadata':
try:
metadata = json.loads(header_val)
except ValueError:
raise ErrorResponse(dropbox_raw_response)
if not metadata: raise ErrorResponse(dropbox_raw_response)
return metadata
def delta(self, cursor=None):
"""A way of letting you keep up with changes to files and folders in a
user's Dropbox. You can periodically call delta() to get a list of "delta
entries", which are instructions on how to update your local state to
match the server's state.
Arguments:
- ``cursor``: On the first call, omit this argument (or pass in ``None``). On
subsequent calls, pass in the ``cursor`` string returned by the previous
call.
Returns: A dict with three fields.
- ``entries``: A list of "delta entries" (described below)
- ``reset``: If ``True``, you should your local state to be an empty folder
before processing the list of delta entries. This is only ``True`` only
in rare situations.
- ``cursor``: A string that is used to keep track of your current state.
On the next call to delta(), pass in this value to return entries
that were recorded since the cursor was returned.
- ``has_more``: If ``True``, then there are more entries available; you can
call delta() again immediately to retrieve those entries. If ``False``,
then wait at least 5 minutes (preferably longer) before checking again.
Delta Entries: Each entry is a 2-item list of one of following forms:
- [*path*, *metadata*]: Indicates that there is a file/folder at the given
path. You should add the entry to your local path. (The *metadata*
value is the same as what would be returned by the ``metadata()`` call.)
- If the new entry includes parent folders that don't yet exist in your
local state, create those parent folders in your local state. You
will eventually get entries for those parent folders.
- If the new entry is a file, replace whatever your local state has at
*path* with the new entry.
- If the new entry is a folder, check what your local state has at
*path*. If it's a file, replace it with the new entry. If it's a
folder, apply the new *metadata* to the folder, but do not modify
the folder's children.
- [*path*, ``nil``]: Indicates that there is no file/folder at the *path* on
Dropbox. To update your local state to match, delete whatever is at *path*,
including any children (you will sometimes also get "delete" delta entries
for the children, but this is not guaranteed). If your local state doesn't
have anything at *path*, ignore this entry.
Remember: Dropbox treats file names in a case-insensitive but case-preserving
way. To facilitate this, the *path* strings above are lower-cased versions of
the actual path. The *metadata* dicts have the original, case-preserved path.
"""
path = "/delta"
params = {}
if cursor is not None:
params['cursor'] = cursor
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def create_copy_ref(self, from_path):
"""Creates and returns a copy ref for a specific file. The copy ref can be
used to instantly copy that file to the Dropbox of another account.
Args:
- ``path``: The path to the file for a copy ref to be created on.
Returns:
- A dictionary that looks like the following example:
``{"expires":"Fri, 31 Jan 2042 21:01:05 +0000", "copy_ref":"z1X6ATl6aWtzOGq0c3g5Ng"}``
"""
path = "/copy_ref/%s%s" % (self.session.root, format_path(from_path))
url, params, headers = self.request(path, {}, method='GET')
return self.rest_client.GET(url, headers)
def add_copy_ref(self, copy_ref, to_path):
"""Adds the file referenced by the copy ref to the specified path
Args:
- ``copy_ref``: A copy ref string that was returned from a create_copy_ref call.
The copy_ref can be created from any other Dropbox account, or from the same account.
- ``path``: The path to where the file will be created.
Returns:
- A dictionary containing the metadata of the new copy of the file.
"""
path = "/fileops/copy"
params = {'from_copy_ref': copy_ref,
'to_path': format_path(to_path),
'root': self.session.root}
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def file_copy(self, from_path, to_path):
"""Copy a file or folder to a new location.
Args:
- ``from_path``: The path to the file or folder to be copied.
- ``to_path``: The destination path of the file or folder to be copied.
This parameter should include the destination filename (e.g.
from_path: '/test.txt', to_path: '/dir/test.txt'). If there's
already a file at the to_path, this copy will be renamed to
be unique.
Returns:
- A dictionary containing the metadata of the new copy of the file or folder.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#fileops-copy
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No file was found at given from_path.
- 503: User over storage quota.
"""
params = {'root': self.session.root,
'from_path': format_path(from_path),
'to_path': format_path(to_path),
}
url, params, headers = self.request("/fileops/copy", params)
return self.rest_client.POST(url, params, headers)
def file_create_folder(self, path):
"""Create a folder.
Args:
- ``path``: The path of the new folder.
Returns:
- A dictionary containing the metadata of the newly created folder.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#fileops-create-folder
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 403: A folder at that path already exists.
"""
params = {'root': self.session.root, 'path': format_path(path)}
url, params, headers = self.request("/fileops/create_folder", params)
return self.rest_client.POST(url, params, headers)
def file_delete(self, path):
"""Delete a file or folder.
Args:
- ``path``: The path of the file or folder.
Returns:
- A dictionary containing the metadata of the just deleted file.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#fileops-delete
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No file was found at the given path.
"""
params = {'root': self.session.root, 'path': format_path(path)}
url, params, headers = self.request("/fileops/delete", params)
return self.rest_client.POST(url, params, headers)
def file_move(self, from_path, to_path):
"""Move a file or folder to a new location.
Args:
- ``from_path``: The path to the file or folder to be moved.
- ``to_path``: The destination path of the file or folder to be moved.
This parameter should include the destination filename (e.g.
- ``from_path``: '/test.txt', to_path: '/dir/test.txt'). If there's
already a file at the to_path, this file or folder will be renamed to
be unique.
Returns:
- A dictionary containing the metadata of the new copy of the file or folder.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#fileops-move
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No file was found at given from_path.
- 503: User over storage quota.
"""
params = {'root': self.session.root, 'from_path': format_path(from_path), 'to_path': format_path(to_path)}
url, params, headers = self.request("/fileops/move", params)
return self.rest_client.POST(url, params, headers)
def metadata(self, path, list=True, file_limit=25000, hash=None, rev=None, include_deleted=False):
"""Retrieve metadata for a file or folder.
A typical use would be:
.. code-block:: python
folder_metadata = client.metadata('/')
print "metadata:", folder_metadata
which would return the metadata of the root directory. This
will look something like:
.. code-block:: python
{
'bytes': 0,
'contents': [
{
'bytes': 0,
'icon': 'folder',
'is_dir': True,
'modified': 'Thu, 25 Aug 2011 00:03:15 +0000',
'path': '/Sample Folder',
'rev': '803beb471',
'revision': 8,
'root': 'dropbox',
'size': '0 bytes',
'thumb_exists': False
},
{
'bytes': 77,
'icon': 'page_white_text',
'is_dir': False,
'mime_type': 'text/plain',
'modified': 'Wed, 20 Jul 2011 22:04:50 +0000',
'path': '/magnum-opus.txt',
'rev': '362e2029684fe',
'revision': 221922,
'root': 'dropbox',
'size': '77 bytes',
'thumb_exists': False
}
],
'hash': 'efdac89c4da886a9cece1927e6c22977',
'icon': 'folder',
'is_dir': True,
'path': '/',
'root': 'app_folder',
'size': '0 bytes',
'thumb_exists': False
}
In this example, the root directory contains two things: ``Sample Folder``,
which is a folder, and ``/magnum-opus.txt``, which is a text file 77 bytes long
Args:
- ``path``: The path to the file or folder.
- ``list``: Whether to list all contained files (only applies when
path refers to a folder).
- ``file_limit``: The maximum number of file entries to return within
a folder. If the number of files in the directory exceeds this
limit, an exception is raised. The server will return at max
25,000 files within a folder.
- ``hash``: Every directory listing has a hash parameter attached that
can then be passed back into this function later to save on\
bandwidth. Rather than returning an unchanged folder's contents,\
the server will instead return a 304.\
- ``rev``: The revision of the file to retrieve the metadata for. [optional]
This parameter only applies for files. If omitted, you'll receive
the most recent revision metadata.
Returns:
- A dictionary containing the metadata of the file or folder
(and contained files if appropriate).
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#metadata
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 304: Current directory hash matches hash parameters, so contents are unchanged.
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No file was found at given path.
- 406: Too many file entries to return.
"""
path = "/metadata/%s%s" % (self.session.root, format_path(path))
params = {'file_limit': file_limit,
'list': 'true',
'include_deleted': include_deleted,
}
if not list:
params['list'] = 'false'
if hash is not None:
params['hash'] = hash
if rev:
params['rev'] = rev
url, params, headers = self.request(path, params, method='GET')
return self.rest_client.GET(url, headers)
def thumbnail(self, from_path, size='large', format='JPEG'):
"""Download a thumbnail for an image.
Unlike most other calls, thumbnail returns a raw HTTPResponse with the connection open.
You should call .read() and perform any processing you need, then close the HTTPResponse.
Args:
- ``from_path``: The path to the file to be thumbnailed.
- ``size``: A string describing the desired thumbnail size.
At this time, 'small', 'medium', and 'large' are
officially supported sizes (32x32, 64x64, and 128x128
respectively), though others may be available. Check
https://www.dropbox.com/developers/reference/api#thumbnails for
more details.
Returns:
- An httplib.HTTPResponse that is the result of the request.
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No file was found at the given from_path, or files of that type cannot be thumbnailed.
- 415: Image is invalid and cannot be thumbnailed.
"""
assert format in ['JPEG', 'PNG'], "expected a thumbnail format of 'JPEG' or 'PNG', got %s" % format
path = "/thumbnails/%s%s" % (self.session.root, format_path(from_path))
url, params, headers = self.request(path, {'size': size, 'format': format}, method='GET', content_server=True)
return self.rest_client.request("GET", url, headers=headers, raw_response=True)
def thumbnail_and_metadata(self, from_path, size='large', format='JPEG'):
"""Download a thumbnail for an image alongwith its metadata.
Acts as a thin wrapper around thumbnail() (see thumbnail() comments for
more details)
Args:
- ``from_path``: The path to the file to be thumbnailed.
- ``size``: A string describing the desired thumbnail size. See thumbnail()
for details.
Returns:
- An httplib.HTTPResponse that is the result of the request.
- A dictionary containing the metadata of the file whose thumbnail
was downloaded (see https://www.dropbox.com/developers/reference/api#metadata
for details).
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No file was found at the given from_path, or files of that type cannot be thumbnailed.
- 415: Image is invalid and cannot be thumbnailed.
- 200: Request was okay but response was malformed in some way.
"""
thumbnail_res = self.thumbnail(from_path, size, format)
metadata = DropboxClient.__parse_metadata_as_dict(thumbnail_res)
return thumbnail_res, metadata
def search(self, path, query, file_limit=1000, include_deleted=False):
"""Search directory for filenames matching query.
Args:
- ``path``: The directory to search within.
- ``query``: The query to search on (minimum 3 characters).
- ``file_limit``: The maximum number of file entries to return within a folder.
The server will return at max 1,000 files.
- ``include_deleted``: Whether to include deleted files in search results.
Returns:
- A list of the metadata of all matching files (up to
file_limit entries). For a detailed description of what
this call returns, visit:
https://www.dropbox.com/developers/reference/api#search
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
"""
path = "/search/%s%s" % (self.session.root, format_path(path))
params = {
'query': query,
'file_limit': file_limit,
'include_deleted': include_deleted,
}
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def revisions(self, path, rev_limit=1000):
"""Retrieve revisions of a file.
Args:
- ``path``: The file to fetch revisions for. Note that revisions
are not available for folders.
- ``rev_limit``: The maximum number of file entries to return within
a folder. The server will return at max 1,000 revisions.
Returns:
- A list of the metadata of all matching files (up to rev_limit entries).
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#revisions
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No revisions were found at the given path.
"""
path = "/revisions/%s%s" % (self.session.root, format_path(path))
params = {
'rev_limit': rev_limit,
}
url, params, headers = self.request(path, params, method='GET')
return self.rest_client.GET(url, headers)
def restore(self, path, rev):
"""Restore a file to a previous revision.
Args:
- ``path``: The file to restore. Note that folders can't be restored.
- ``rev``: A previous rev value of the file to be restored to.
Returns:
- A dictionary containing the metadata of the newly restored file.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#restore
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: Unable to find the file at the given revision.
"""
path = "/restore/%s%s" % (self.session.root, format_path(path))
params = {
'rev': rev,
}
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def media(self, path):
"""Get a temporary unauthenticated URL for a media file.
All of Dropbox's API methods require OAuth, which may cause problems in
situations where an application expects to be able to hit a URL multiple times
(for example, a media player seeking around a video file). This method
creates a time-limited URL that can be accessed without any authentication,
and returns that to you, along with an expiration time.
Args:
- ``path``: The file to return a URL for. Folders are not supported.
Returns:
- A dictionary that looks like the following example:
``{'url': 'https://dl.dropbox.com/0/view/wvxv1fw6on24qw7/file.mov', 'expires': 'Thu, 16 Sep 2011 01:01:25 +0000'}``
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#media
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: Unable to find the file at the given path.
"""
path = "/media/%s%s" % (self.session.root, format_path(path))
url, params, headers = self.request(path, method='GET')
return self.rest_client.GET(url, headers)
def share(self, path):
"""Create a shareable link to a file or folder.
Shareable links created on Dropbox are time-limited, but don't require any
authentication, so they can be given out freely. The time limit should allow
at least a day of shareability, though users have the ability to disable
a link from their account if they like.
Args:
- ``path``: The file or folder to share.
Returns:
- A dictionary that looks like the following example:
``{'url': 'http://www.dropbox.com/s/m/a2mbDa2', 'expires': 'Thu, 16 Sep 2011 01:01:25 +0000'}``
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#shares
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: Unable to find the file at the given path.
"""
path = "/shares/%s%s" % (self.session.root, format_path(path))
url, params, headers = self.request(path, method='GET')
return self.rest_client.GET(url, headers)
|
azumimuo/family-xbmc-addon
|
script.xbmcbackup/resources/lib/dropbox/client.py
|
Python
|
gpl-2.0
| 40,015
|
[
"VisIt"
] |
7368922a2e56f3f239ae2baae7ed76f66b9e497b88969d27332c25ffe647f72a
|
#!/usr/bin/env python3
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import vtk
import chigger
camera = vtk.vtkCamera()
camera.SetViewUp(-0.01297019406812408, 0.87867984226827, 0.4772352762079132)
camera.SetPosition(10.331000991784688, -5.473421359648077, 10.483371124667542)
camera.SetFocalPoint(0.16947273724857123, 0.07124492441302266, -0.0015694043706061533)
reader = chigger.exodus.ExodusReader('../../input/mug_blocks_out.e', boundary=['bottom', 'top'])
mug = chigger.exodus.ExodusResult(reader, block=None, boundary=['1'], variable='convected', cmap='coolwarm', camera=camera)
window = chigger.RenderWindow(mug, size=[300,300], test=True)
window.write('boundary_numeric.png')
#for key, value in reader.getBlockInformation().iteritems():
# print key, value
window.start()
|
nuclear-wizard/moose
|
python/chigger/tests/exodus/blocks/boundary_numeric.py
|
Python
|
lgpl-2.1
| 1,081
|
[
"MOOSE",
"VTK"
] |
59e113629ab850b693b2217b403bec24806a4c152cb3682628bc03da32c470d6
|
import unittest
import os
import json
import scipy
from io import open
from pymatgen.phonon.dos import CompletePhononDos
from pymatgen.phonon.plotter import PhononDosPlotter, PhononBSPlotter, ThermoPlotter
from pymatgen.phonon.bandstructure import PhononBandStructureSymmLine
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class PhononDosPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "NaCl_complete_ph_dos.json"), "r") as f:
self.dos = CompletePhononDos.from_dict(json.load(f))
self.plotter = PhononDosPlotter(sigma=0.2, stack=True)
self.plotter_nostack = PhononDosPlotter(sigma=0.2, stack=False)
def test_add_dos_dict(self):
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 0)
self.plotter.add_dos_dict(self.dos.get_element_dos(),
key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 2)
def test_get_dos_dict(self):
self.plotter.add_dos_dict(self.dos.get_element_dos(),
key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
for el in ["Na", "Cl"]:
self.assertIn(el, d)
def test_plot(self):
# Disabling latex for testing.
from matplotlib import rc
rc('text', usetex=False)
self.plotter.add_dos("Total", self.dos)
self.plotter.get_plot(units="mev")
self.plotter_nostack.add_dos("Total", self.dos)
self.plotter_nostack.get_plot(units="mev")
class PhononBSPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "NaCl_phonon_bandstructure.json"), "r") as f:
d = json.loads(f.read())
self.bs = PhononBandStructureSymmLine.from_dict(d)
self.plotter = PhononBSPlotter(self.bs)
def test_bs_plot_data(self):
self.assertEqual(len(self.plotter.bs_plot_data()['distances'][0]), 51,
"wrong number of distances in the first branch")
self.assertEqual(len(self.plotter.bs_plot_data()['distances']), 4,
"wrong number of branches")
self.assertEqual(
sum([len(e) for e in self.plotter.bs_plot_data()['distances']]),
204, "wrong number of distances")
self.assertEqual(self.plotter.bs_plot_data()['ticks']['label'][4], "Y",
"wrong tick label")
self.assertEqual(len(self.plotter.bs_plot_data()['ticks']['label']),
8, "wrong number of tick labels")
def test_plot(self):
# Disabling latex for testing.
from matplotlib import rc
rc('text', usetex=False)
self.plotter.get_plot(units="mev")
def test_plot_compare(self):
# Disabling latex for testing.
from matplotlib import rc
rc('text', usetex=False)
self.plotter.plot_compare(self.plotter, units="mev")
class ThermoPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "NaCl_complete_ph_dos.json"), "r") as f:
self.dos = CompletePhononDos.from_dict(json.load(f))
self.plotter = ThermoPlotter(self.dos, self.dos.structure)
def test_plot_functions(self):
# Disabling latex for testing.
from matplotlib import rc
rc('text', usetex=False)
self.plotter.plot_cv(5, 100, 5, show=False)
self.plotter.plot_entropy(5, 100, 5, show=False)
self.plotter.plot_internal_energy(5, 100, 5, show=False)
self.plotter.plot_helmholtz_free_energy(5, 100, 5, show=False)
self.plotter.plot_thermodynamic_properties(5, 100, 5, show=False, fig_close=True)
if __name__ == "__main__":
unittest.main()
|
mbkumar/pymatgen
|
pymatgen/phonon/tests/test_plotter.py
|
Python
|
mit
| 3,873
|
[
"pymatgen"
] |
f0fe99e1fb801b6553449e65bceb196690a1200413f435530d40b85a8e47b7a1
|
#
# @file TestPriority.py
# @brief SBML Priority unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# $Id$
# $HeadURL$
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestPriority.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestPriority(unittest.TestCase):
global P
P = None
def setUp(self):
self.P = libsbml.Priority(3,1)
if (self.P == None):
pass
pass
def tearDown(self):
_dummyList = [ self.P ]; _dummyList[:] = []; del _dummyList
pass
def test_Priority_create(self):
self.assert_( self.P.getTypeCode() == libsbml.SBML_PRIORITY )
self.assert_( self.P.getMetaId() == "" )
self.assert_( self.P.getNotes() == None )
self.assert_( self.P.getAnnotation() == None )
self.assert_( self.P.getMath() == None )
pass
def test_Priority_createWithNS(self):
xmlns = libsbml.XMLNamespaces()
xmlns.add( "http://www.sbml.org", "testsbml")
sbmlns = libsbml.SBMLNamespaces(3,1)
sbmlns.addNamespaces(xmlns)
object = libsbml.Priority(sbmlns)
self.assert_( object.getTypeCode() == libsbml.SBML_PRIORITY )
self.assert_( object.getMetaId() == "" )
self.assert_( object.getNotes() == None )
self.assert_( object.getAnnotation() == None )
self.assert_( object.getLevel() == 3 )
self.assert_( object.getVersion() == 1 )
self.assert_( object.getNamespaces() != None )
self.assert_( object.getNamespaces().getLength() == 2 )
_dummyList = [ object ]; _dummyList[:] = []; del _dummyList
pass
def test_Priority_free_NULL(self):
_dummyList = [ None ]; _dummyList[:] = []; del _dummyList
pass
def test_Priority_setMath(self):
math = libsbml.parseFormula("lambda(x, x^3)")
self.P.setMath(math)
math1 = self.P.getMath()
self.assert_( math1 != None )
formula = libsbml.formulaToString(math1)
self.assert_( formula != None )
self.assert_(( "lambda(x, x^3)" == formula ))
self.assert_( self.P.getMath() != math )
self.assertEqual( True, self.P.isSetMath() )
self.P.setMath(self.P.getMath())
math1 = self.P.getMath()
self.assert_( math1 != None )
formula = libsbml.formulaToString(math1)
self.assert_( formula != None )
self.assert_(( "lambda(x, x^3)" == formula ))
self.P.setMath(None)
self.assertEqual( False, self.P.isSetMath() )
if (self.P.getMath() != None):
pass
pass
def test_Priority_setMath1(self):
math = libsbml.parseFormula("2 * k")
i = self.P.setMath(math)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.P.getMath() != math )
self.assertEqual( True, self.P.isSetMath() )
i = self.P.setMath(None)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.P.getMath() == None )
self.assertEqual( False, self.P.isSetMath() )
_dummyList = [ math ]; _dummyList[:] = []; del _dummyList
pass
def test_Priority_setMath2(self):
math = libsbml.ASTNode(libsbml.AST_TIMES)
i = self.P.setMath(math)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
self.assertEqual( False, self.P.isSetMath() )
_dummyList = [ math ]; _dummyList[:] = []; del _dummyList
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestPriority))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
alexholehouse/SBMLIntegrator
|
libsbml-5.0.0/src/bindings/python/test/sbml/TestPriority.py
|
Python
|
gpl-3.0
| 4,795
|
[
"VisIt"
] |
eff3ac58e9c6e94bea7404b356173e4c672daf0fa8378805ebfb61f2ca17e201
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Test the button source
# The image to map on the button
r = vtk.vtkJPEGReader()
r.SetFileName(VTK_DATA_ROOT + "/Data/beach.jpg")
r.Update()
t = vtk.vtkTexture()
t.SetInputConnection(r.GetOutputPort())
dims = r.GetOutput().GetDimensions()
d1 = dims[0]
d2 = dims[1]
# The first elliptical button
bs = vtk.vtkEllipticalButtonSource()
bs.SetWidth(2)
bs.SetHeight(1)
bs.SetDepth(0.2)
bs.SetCircumferentialResolution(64)
bs.SetRadialRatio(1.1)
bs.SetShoulderResolution(8)
bs.SetTextureResolution(4)
bs.TwoSidedOn()
bMapper = vtk.vtkPolyDataMapper()
bMapper.SetInputConnection(bs.GetOutputPort())
b1 = vtk.vtkActor()
b1.SetMapper(bMapper)
b1.SetTexture(t)
# The second elliptical button
bs2 = vtk.vtkEllipticalButtonSource()
bs2.SetWidth(2)
bs2.SetHeight(1)
bs2.SetDepth(0.2)
bs2.SetCircumferentialResolution(64)
bs2.SetRadialRatio(1.1)
bs2.SetShoulderResolution(8)
bs2.SetTextureResolution(4)
bs2.TwoSidedOn()
bs2.SetCenter(2, 0, 0)
bs2.SetTextureStyleToFitImage()
bs2.SetTextureDimensions(d1, d2)
b2Mapper = vtk.vtkPolyDataMapper()
b2Mapper.SetInputConnection(bs2.GetOutputPort())
b2 = vtk.vtkActor()
b2.SetMapper(b2Mapper)
b2.SetTexture(t)
# The third rectangular button
bs3 = vtk.vtkRectangularButtonSource()
bs3.SetWidth(1.5)
bs3.SetHeight(0.75)
bs3.SetDepth(0.2)
bs3.TwoSidedOn()
bs3.SetCenter(0, 1, 0)
bs3.SetTextureDimensions(d1, d2)
b3Mapper = vtk.vtkPolyDataMapper()
b3Mapper.SetInputConnection(bs3.GetOutputPort())
b3 = vtk.vtkActor()
b3.SetMapper(b3Mapper)
b3.SetTexture(t)
# The fourth rectangular button
bs4 = vtk.vtkRectangularButtonSource()
bs4.SetWidth(1.5)
bs4.SetHeight(0.75)
bs4.SetDepth(0.2)
bs4.TwoSidedOn()
bs4.SetCenter(2, 1, 0)
bs4.SetTextureStyleToFitImage()
bs4.SetTextureDimensions(d1, d2)
b4Mapper = vtk.vtkPolyDataMapper()
b4Mapper.SetInputConnection(bs4.GetOutputPort())
b4 = vtk.vtkActor()
b4.SetMapper(b4Mapper)
b4.SetTexture(t)
# Create the RenderWindow, Renderer and Interactive Renderer
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.AddActor(b1)
ren1.AddActor(b2)
ren1.AddActor(b3)
ren1.AddActor(b4)
ren1.SetBackground(0, 0, 0)
renWin.SetSize(250, 150)
renWin.Render()
ren1.GetActiveCamera().Zoom(1.5)
renWin.Render()
iren.Initialize()
#iren.Start()
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Filters/Sources/Testing/Python/TestButtonSource.py
|
Python
|
gpl-3.0
| 2,447
|
[
"VTK"
] |
5bfda2690e12526ec6c701fce499e0e110a91ae0944eb9bf93eefe7ca310ef75
|
# -*- coding: utf-8 -*-
#
# canu documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 26 18:41:08 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'canu'
copyright = u'2015, Adam Phillippy, Sergey Koren, Brian Walenz'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.6'
# The full version, including alpha/beta/rc tags.
release = '1.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# Build using the RTD theme, if not on RTD.
# https://read-the-docs.readthedocs.org/en/latest/theme.html
# https://github.com/snide/sphinx_rtd_theme
#
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [ "/usr/local/lib/python2.7/site-packages", ]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'canudoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'canu.tex', u'canu Documentation',
u'Adam Phillippy, Sergey Koren, Brian Walenz', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'canu', u'canu Documentation',
[u'Adam Phillippy, Sergey Koren, Brian Walenz'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'canu', u'canu Documentation',
u'Adam Phillippy, Sergey Koren, Brian Walenz', 'canu', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
sgblanch/canu
|
documentation/source/conf.py
|
Python
|
gpl-2.0
| 8,725
|
[
"Brian"
] |
57a7acdbd23e77844dedd49bf4927ee48bc83a0a52c70701c4227b675ed9ea7c
|
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Edward Mountjoy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import string
def wrap(string, length):
""" Yield successive length-sized chunks from string.
"""
for i in xrange(0, len(string), length):
yield string[i:i + length]
def phred_score_dict(offset):
""" Creates a dict of phred score values
"""
phred_dict = {}
for letter in string.printable:
phred_dict[letter] = float(ord(letter) - offset)
return phred_dict
def fastqIterator(handle):
"""This function is adapted from the biopython source code. It therefore has
been distibuted under their own license:
Biopython License Agreement
Permission to use, copy, modify, and distribute this software and its
documentation with or without modifications and for any purpose and
without fee is hereby granted, provided that any copyright notices
appear in all copies and that both those copyright notices and this
permission notice appear in supporting documentation, and that the
names of the contributors or copyright holders not be used in
advertising or publicity pertaining to distribution of the software
without specific prior permission.
THE CONTRIBUTORS AND COPYRIGHT HOLDERS OF THIS SOFTWARE DISCLAIM ALL
WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL THE
CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT
OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
OR PERFORMANCE OF THIS SOFTWARE.
"""
# We need to call handle.readline() at least four times per record,
# so we'll save a property look up each time:
handle_readline = handle.readline
# Skip any text before the first record (e.g. blank lines, comments?)
while True:
line = handle_readline().decode("utf-8")
if not line:
return # Premature end of file, or just empty?
if line[0] == "@":
break
if isinstance(line[0], int):
raise ValueError("Is this handle in binary mode not text mode?")
while line:
if line[0] != "@":
raise ValueError(
"Records in Fastq files should start with '@' character")
title_line = line[1:].rstrip()
# Will now be at least one line of quality data - in most FASTQ files
# just one line! We therefore use string concatenation (if needed)
# rather using than the "".join(...) trick just in case it is multiline:
seq_string = handle_readline().decode("utf-8").rstrip()
# There may now be more sequence lines, or the "+" quality marker line:
while True:
line = handle_readline().decode("utf-8")
if not line:
raise ValueError("End of file without quality information.")
if line[0] == "+":
# The title here is optional, but if present must match!
second_title = line[1:].rstrip()
if second_title and second_title != title_line:
raise ValueError("Sequence and quality captions differ.")
break
seq_string += line.rstrip() # removes trailing newlines
# This is going to slow things down a little, but assuming
# this isn't allowed we should try and catch it here:
if " " in seq_string or "\t" in seq_string:
raise ValueError("Whitespace is not allowed in the sequence.")
seq_len = len(seq_string)
# Will now be at least one line of quality data...
quality_string = handle_readline().decode("utf-8").rstrip()
# There may now be more quality data, or another sequence, or EOF
while True:
line = handle_readline().decode("utf-8")
if not line:
break # end of file
if line[0] == "@":
# This COULD be the start of a new sequence. However, it MAY just
# be a line of quality data which starts with a "@" character. We
# should be able to check this by looking at the sequence length
# and the amount of quality data found so far.
if len(quality_string) >= seq_len:
# We expect it to be equal if this is the start of a new record.
# If the quality data is longer, we'll raise an error below.
break
# Continue - its just some (more) quality data.
quality_string += line.rstrip()
if seq_len != len(quality_string):
raise ValueError("Lengths of sequence and quality values differs "
" for %s (%i and %i)."
% (title_line, seq_len, len(quality_string)))
# Convert into a fastq record
record = Fastq(title_line, seq_string, quality_string)
# Return the record and then continue...
yield record
raise StopIteration
def fastqWriter(record, handle):
""" Simple fastq writer.
"""
out = []
# Add @ to header
out.append("@{0}".format(record.id))
# Add sequence
out.append(record.seq)
# Add +
out.append("+")
# Add qual
out.append(record.qual_str)
# Write to handle
out_line = "\n".join(out) + "\n"
handle.write(out_line.encode("utf-8"))
return 0
class Fastq:
# Class to hold a single fastq record
def __init__(self, title_line, seq_string, quality_string):
self.id = title_line
self.seq = seq_string
self.qual_str = quality_string
self.qual_prob = None
def qual_to_prob(self, base_prob_precompute):
""" Converts the quality string to a list of base probabilities.
"""
self.qual_prob = [base_prob_precompute[x] for x in self.qual_str]
return 0
|
edm1/error-aware-demultiplexer
|
src/fastqparser.py
|
Python
|
mit
| 7,235
|
[
"Biopython"
] |
6a1eb247c9503ec1699891f3393a4d7d213c7746e5d7b6cdb25d966456001923
|
########################################################################
# $HeadURL$
# File : Watchdog.py
# Author: Stuart Paterson
########################################################################
""" The Watchdog class is used by the Job Wrapper to resolve and monitor
the system resource consumption. The Watchdog can determine if
a running job is stalled and indicate this to the Job Wrapper.
Furthermore, the Watchdog will identify when the Job CPU limit has been
exceeded and fail jobs meaningfully.
Information is returned to the WMS via the heart-beat mechanism. This
also interprets control signals from the WMS e.g. to kill a running
job.
- Still to implement:
- CPU normalization for correct comparison with job limit
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Utilities import Time
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.ConfigurationSystem.Client.Config import gConfig
from DIRAC.ConfigurationSystem.Client.PathFinder import getSystemInstance
from DIRAC.Core.Utilities.ProcessMonitor import ProcessMonitor
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.TimeLeft.TimeLeft import TimeLeft
import os, time
class Watchdog:
#############################################################################
def __init__( self, pid, exeThread, spObject, jobCPUtime, memoryLimit = 0, systemFlag = 'linux2.4' ):
""" Constructor, takes system flag as argument.
"""
self.log = gLogger.getSubLogger( "Watchdog" )
self.systemFlag = systemFlag
self.exeThread = exeThread
self.wrapperPID = pid
self.appPID = self.exeThread.getCurrentPID()
self.spObject = spObject
self.jobCPUtime = jobCPUtime
self.memoryLimit = memoryLimit
self.calibration = 0
self.initialValues = {}
self.parameters = {}
self.peekFailCount = 0
self.peekRetry = 5
self.processMonitor = ProcessMonitor()
self.checkError = ''
self.currentStats = {}
self.initialized = False
self.count = 0
#############################################################################
def initialize( self, loops = 0 ):
""" Watchdog initialization.
"""
if self.initialized:
self.log.info( 'Watchdog already initialized' )
return S_OK()
else:
self.initialized = True
setup = gConfig.getValue( '/DIRAC/Setup', '' )
if not setup:
return S_ERROR( 'Can not get the DIRAC Setup value' )
wms_instance = getSystemInstance( "WorkloadManagement" )
if not wms_instance:
return S_ERROR( 'Can not get the WorkloadManagement system instance' )
self.section = '/Systems/WorkloadManagement/%s/JobWrapper' % wms_instance
self.maxcount = loops
self.log.verbose( 'Watchdog initialization' )
self.log.info( 'Attempting to Initialize Watchdog for: %s' % ( self.systemFlag ) )
# Test control flags
self.testWallClock = gConfig.getValue( self.section + '/CheckWallClockFlag', 1 )
self.testDiskSpace = gConfig.getValue( self.section + '/CheckDiskSpaceFlag', 1 )
self.testLoadAvg = gConfig.getValue( self.section + '/CheckLoadAvgFlag', 1 )
self.testCPUConsumed = gConfig.getValue( self.section + '/CheckCPUConsumedFlag', 1 )
self.testCPULimit = gConfig.getValue( self.section + '/CheckCPULimitFlag', 0 )
self.testMemoryLimit = gConfig.getValue( self.section + '/CheckMemoryLimitFlag', 0 )
self.testTimeLeft = gConfig.getValue( self.section + '/CheckTimeLeftFlag', 1 )
# Other parameters
self.pollingTime = gConfig.getValue( self.section + '/PollingTime', 10 ) # 10 seconds
self.checkingTime = gConfig.getValue( self.section + '/CheckingTime', 30 * 60 ) # 30 minute period
self.minCheckingTime = gConfig.getValue( self.section + '/MinCheckingTime', 20 * 60 ) # 20 mins
self.maxWallClockTime = gConfig.getValue( self.section + '/MaxWallClockTime', 3 * 24 * 60 * 60 ) # e.g. 4 days
self.jobPeekFlag = gConfig.getValue( self.section + '/JobPeekFlag', 1 ) # on / off
self.minDiskSpace = gConfig.getValue( self.section + '/MinDiskSpace', 10 ) # MB
self.loadAvgLimit = gConfig.getValue( self.section + '/LoadAverageLimit', 1000 ) # > 1000 and jobs killed
self.sampleCPUTime = gConfig.getValue( self.section + '/CPUSampleTime', 30 * 60 ) # e.g. up to 20mins sample
self.jobCPUMargin = gConfig.getValue( self.section + '/JobCPULimitMargin', 20 ) # %age buffer before killing job
self.minCPUWallClockRatio = gConfig.getValue( self.section + '/MinCPUWallClockRatio', 5 ) # ratio %age
self.nullCPULimit = gConfig.getValue( self.section + '/NullCPUCountLimit', 5 ) # After 5 sample times return null CPU consumption kill job
self.checkCount = 0
self.nullCPUCount = 0
if self.checkingTime < self.minCheckingTime:
self.log.info( 'Requested CheckingTime of %s setting to %s seconds (minimum)' % ( self.checkingTime, self.minCheckingTime ) )
self.checkingTime = self.minCheckingTime
# The time left is returned in seconds @ 250 SI00 = 1 HS06,
# the self.checkingTime and self.pollingTime are in seconds,
# thus they need to be multiplied by a large enough factor
self.grossTimeLeftLimit = 10 * self.checkingTime
self.fineTimeLeftLimit = gConfig.getValue( self.section + '/TimeLeftLimit', 150 * self.pollingTime )
self.timeLeftUtil = TimeLeft()
self.timeLeft = 0
self.littleTimeLeft = False
return S_OK()
def run( self ):
""" The main watchdog execution method
"""
result = self.initialize()
if not result['OK']:
gLogger.always( 'Can not start watchdog for the following reason' )
gLogger.always( result['Message'] )
return result
try:
while True:
gLogger.debug( 'Starting watchdog loop # %d' % self.count )
start_cycle_time = time.time()
result = self.execute()
exec_cycle_time = time.time() - start_cycle_time
if not result[ 'OK' ]:
gLogger.error( "Watchdog error during execution", result[ 'Message' ] )
break
elif result['Value'] == "Ended":
break
self.count += 1
if exec_cycle_time < self.pollingTime:
time.sleep( self.pollingTime - exec_cycle_time )
return S_OK()
except Exception:
gLogger.exception()
return S_ERROR( 'Exception' )
#############################################################################
def execute( self ):
""" The main agent execution method of the Watchdog.
"""
if not self.exeThread.isAlive():
# print self.parameters
self.__getUsageSummary()
self.log.info( 'Process to monitor has completed, Watchdog will exit.' )
return S_OK( "Ended" )
if self.littleTimeLeft:
# if we have gone over enough iterations query again
if self.littleTimeLeftCount == 0 and self.__timeLeft() == -1:
self.checkError = 'Job has reached the CPU limit of the queue'
self.log.error( self.checkError, self.timeLeft )
self.__killRunningThread()
return S_OK()
else:
self.littleTimeLeftCount -= 1
# Note: need to poll regularly to see if the thread is alive
# but only perform checks with a certain frequency
if ( time.time() - self.initialValues['StartTime'] ) > self.checkingTime * self.checkCount:
self.checkCount += 1
result = self.__performChecks()
if not result['OK']:
self.log.warn( 'Problem during recent checks' )
self.log.warn( result['Message'] )
return S_OK()
else:
# self.log.debug('Application thread is alive: checking count is %s' %(self.checkCount))
return S_OK()
#############################################################################
def __performChecks( self ):
"""The Watchdog checks are performed at a different period to the checking of the
application thread and correspond to the checkingTime.
"""
self.log.verbose( '------------------------------------' )
self.log.verbose( 'Checking loop starts for Watchdog' )
heartBeatDict = {}
msg = ''
result = self.getLoadAverage()
msg += 'LoadAvg: %s ' % ( result['Value'] )
heartBeatDict['LoadAverage'] = result['Value']
if not self.parameters.has_key( 'LoadAverage' ):
self.parameters['LoadAverage'] = []
self.parameters['LoadAverage'].append( result['Value'] )
result = self.getMemoryUsed()
msg += 'MemUsed: %.1f kb ' % ( result['Value'] )
heartBeatDict['MemoryUsed'] = result['Value']
if not self.parameters.has_key( 'MemoryUsed' ):
self.parameters['MemoryUsed'] = []
self.parameters['MemoryUsed'].append( result['Value'] )
result = self.processMonitor.getMemoryConsumed( self.wrapperPID )
if result['OK']:
vsize = result['Value']['Vsize']/1024.
rss = result['Value']['RSS']/1024.
heartBeatDict['Vsize'] = vsize
heartBeatDict['RSS'] = rss
self.parameters.setdefault( 'Vsize', [] )
self.parameters['Vsize'].append( vsize )
self.parameters.setdefault( 'RSS', [] )
self.parameters['RSS'].append( rss )
msg += "Job Vsize: %.1f kb " % vsize
msg += "Job RSS: %.1f kb " % rss
result = self.getDiskSpace()
msg += 'DiskSpace: %.1f MB ' % ( result['Value'] )
if not self.parameters.has_key( 'DiskSpace' ):
self.parameters['DiskSpace'] = []
self.parameters['DiskSpace'].append( result['Value'] )
heartBeatDict['AvailableDiskSpace'] = result['Value']
result = self.__getCPU()
msg += 'CPU: %s (h:m:s) ' % ( result['Value'] )
if not self.parameters.has_key( 'CPUConsumed' ):
self.parameters['CPUConsumed'] = []
self.parameters['CPUConsumed'].append( result['Value'] )
hmsCPU = result['Value']
rawCPU = self.__convertCPUTime( hmsCPU )
if rawCPU['OK']:
heartBeatDict['CPUConsumed'] = rawCPU['Value']
result = self.__getWallClockTime()
msg += 'WallClock: %.2f s ' % ( result['Value'] )
self.parameters['WallClockTime'].append( result['Value'] )
heartBeatDict['WallClockTime'] = result['Value']
self.log.info( msg )
result = self.__checkProgress()
if not result['OK']:
self.checkError = result['Message']
self.log.warn( self.checkError )
if self.jobPeekFlag:
result = self.__peek()
if result['OK']:
outputList = result['Value']
size = len( outputList )
self.log.info( 'Last %s lines of available application output:' % ( size ) )
self.log.info( '================START================' )
for line in outputList:
self.log.info( line )
self.log.info( '=================END=================' )
self.__killRunningThread()
return S_OK()
recentStdOut = 'None'
if self.jobPeekFlag:
result = self.__peek()
if result['OK']:
outputList = result['Value']
size = len( outputList )
recentStdOut = 'Last %s lines of application output from Watchdog on %s [UTC]:' % ( size, Time.dateTime() )
border = '=' * len( recentStdOut )
cpuTotal = 'Last reported CPU consumed for job is %s (h:m:s)' % ( hmsCPU )
if self.timeLeft:
cpuTotal += ', Batch Queue Time Left %s (s @ HS06)' % self.timeLeft
recentStdOut = '\n%s\n%s\n%s\n%s\n' % ( border, recentStdOut, cpuTotal, border )
self.log.info( recentStdOut )
for line in outputList:
self.log.info( line )
recentStdOut += line + '\n'
else:
recentStdOut = 'Watchdog is initializing and will attempt to obtain standard output from application thread'
self.log.info( recentStdOut )
self.peekFailCount += 1
if self.peekFailCount > self.peekRetry:
self.jobPeekFlag = 0
self.log.warn( 'Turning off job peeking for remainder of execution' )
if not os.environ.has_key( 'JOBID' ):
self.log.info( 'Running without JOBID so parameters will not be reported' )
return S_OK()
jobID = os.environ['JOBID']
staticParamDict = {'StandardOutput':recentStdOut}
self.__sendSignOfLife( int( jobID ), heartBeatDict, staticParamDict )
return S_OK( 'Watchdog checking cycle complete' )
#############################################################################
def __getCPU( self ):
"""Uses os.times() to get CPU time and returns HH:MM:SS after conversion.
"""
cpuTime = '00:00:00'
try:
cpuTime = self.processMonitor.getCPUConsumed( self.wrapperPID )
except Exception:
self.log.warn( 'Could not determine CPU time consumed with exception' )
self.log.exception()
return S_OK( cpuTime ) # just return null CPU
if not cpuTime['OK']:
self.log.warn( 'Problem while checking consumed CPU' )
self.log.warn( cpuTime )
return S_OK( '00:00:00' ) # again return null CPU in this case
cpuTime = cpuTime['Value']
self.log.verbose( "Raw CPU time consumed (s) = %s" % ( cpuTime ) )
result = self.__getCPUHMS( cpuTime )
return result
#############################################################################
def __getCPUHMS( self, cpuTime ):
mins, secs = divmod( cpuTime, 60 )
hours, mins = divmod( mins, 60 )
humanTime = '%02d:%02d:%02d' % ( hours, mins, secs )
self.log.verbose( 'Human readable CPU time is: %s' % humanTime )
return S_OK( humanTime )
#############################################################################
def __interpretControlSignal( self, signalDict ):
"""This method is called whenever a signal is sent via the result of
sending a sign of life.
"""
self.log.info( 'Received control signal' )
if type( signalDict ) == type( {} ):
if signalDict.has_key( 'Kill' ):
self.log.info( 'Received Kill signal, stopping job via control signal' )
self.checkError = 'Received Kill signal'
self.__killRunningThread()
else:
self.log.info( 'The following control signal was sent but not understood by the watchdog:' )
self.log.info( signalDict )
else:
self.log.info( 'Expected dictionary for control signal, received:\n%s' % ( signalDict ) )
return S_OK()
#############################################################################
def __checkProgress( self ):
"""This method calls specific tests to determine whether the job execution
is proceeding normally. CS flags can easily be added to add or remove
tests via central configuration.
"""
report = ''
if self.testWallClock:
result = self.__checkWallClockTime()
report += 'WallClock: OK, '
if not result['OK']:
self.log.warn( result['Message'] )
return result
else:
report += 'WallClock: NA,'
if self.testDiskSpace:
result = self.__checkDiskSpace()
report += 'DiskSpace: OK, '
if not result['OK']:
self.log.warn( result['Message'] )
return result
else:
report += 'DiskSpace: NA,'
if self.testLoadAvg:
result = self.__checkLoadAverage()
report += 'LoadAverage: OK, '
if not result['OK']:
self.log.warn( result['Message'] )
return result
else:
report += 'LoadAverage: NA,'
if self.testCPUConsumed:
result = self.__checkCPUConsumed()
report += 'CPUConsumed: OK, '
if not result['OK']:
return result
else:
report += 'CPUConsumed: NA, '
if self.testCPULimit:
result = self.__checkCPULimit()
report += 'CPULimit OK, '
if not result['OK']:
self.log.warn( result['Message'] )
return result
else:
report += 'CPULimit: NA, '
if self.testTimeLeft:
self.__timeLeft()
if self.timeLeft:
report += 'TimeLeft: OK'
else:
report += 'TimeLeft: NA'
if self.testMemoryLimit:
result = self.__checkMemoryLimit()
report += 'MemoryLimit OK, '
if not result['OK']:
self.log.warn( result['Message'] )
return result
else:
report += 'MemoryLimit: NA, '
self.log.info( report )
return S_OK( 'All enabled checks passed' )
#############################################################################
def __checkCPUConsumed( self ):
""" Checks whether the CPU consumed by application process is reasonable. This
method will report stalled jobs to be killed.
"""
self.log.info( "Checking CPU Consumed" )
if 'WallClockTime' not in self.parameters:
return S_ERROR( 'Missing WallClockTime info' )
if 'CPUConsumed' not in self.parameters:
return S_ERROR( 'Missing CPUConsumed info' )
wallClockTime = self.parameters['WallClockTime'][-1]
if wallClockTime < self.sampleCPUTime:
self.log.info( "Stopping check, wallclock time (%s) is still smalled than sample time (%s)" % ( wallClockTime,
self.sampleCPUTime ) )
return S_OK()
intervals = max( 1, int( self.sampleCPUTime / self.checkingTime ) )
if len( self.parameters['CPUConsumed'] ) < intervals + 1:
self.log.info( "Not enough snapshots to calculate, there are %s and we need %s" % ( len( self.parameters['CPUConsumed'] ),
intervals + 1 ) )
return S_OK()
wallClockTime = self.parameters['WallClockTime'][-1] - self.parameters['WallClockTime'][-1 - intervals ]
try:
cpuTime = self.__convertCPUTime( self.parameters['CPUConsumed'][-1] )['Value']
# For some reason, some times the CPU consumed estimation returns 0
# if cpuTime == 0:
# return S_OK()
cpuTime -= self.__convertCPUTime( self.parameters['CPUConsumed'][-1 - intervals ] )['Value']
if cpuTime < 0:
self.log.warn( 'Consumed CPU time negative, something wrong may have happened, ignore' )
return S_OK()
if wallClockTime <= 0:
self.log.warn( 'Wallclock time should not be negative or zero, Ignore' )
return S_OK()
ratio = ( cpuTime / wallClockTime ) * 100.
self.log.info( "CPU/Wallclock ratio is %.2f%%" % ratio )
# in case of error cpuTime might be 0, exclude this
if ratio < self.minCPUWallClockRatio:
if os.path.exists( 'DISABLE_WATCHDOG_CPU_WALLCLOCK_CHECK' ):
self.log.info( 'N.B. job would be declared as stalled but CPU / WallClock check is disabled by payload' )
return S_OK()
self.log.info( "Job is stalled!" )
return S_ERROR( 'Watchdog identified this job as stalled' )
except Exception, e:
self.log.error( "Cannot convert CPU consumed from string to int", str( e ) )
return S_OK()
#############################################################################
def __convertCPUTime( self, cputime ):
""" Method to convert the CPU time as returned from the Watchdog
instances to the equivalent DIRAC normalized CPU time to be compared
to the Job CPU requirement.
"""
cpuValue = 0
cpuHMS = cputime.split( ':' )
# for i in xrange( len( cpuHMS ) ):
# cpuHMS[i] = cpuHMS[i].replace( '00', '0' )
try:
hours = float( cpuHMS[0] ) * 60 * 60
mins = float( cpuHMS[1] ) * 60
secs = float( cpuHMS[2] )
cpuValue = float( hours + mins + secs )
except Exception, x:
self.log.warn( str( x ) )
return S_ERROR( 'Could not calculate CPU time' )
# Normalization to be implemented
normalizedCPUValue = cpuValue
result = S_OK()
result['Value'] = normalizedCPUValue
self.log.debug( 'CPU value %s converted to %s' % ( cputime, normalizedCPUValue ) )
return result
#############################################################################
def __checkCPULimit( self ):
""" Checks that the job has consumed more than the job CPU requirement
(plus a configurable margin) and kills them as necessary.
"""
consumedCPU = 0
if self.parameters.has_key( 'CPUConsumed' ):
consumedCPU = self.parameters['CPUConsumed'][-1]
consumedCPUDict = self.__convertCPUTime( consumedCPU )
if consumedCPUDict['OK']:
currentCPU = consumedCPUDict['Value']
else:
return S_OK( 'Not possible to determine current CPU consumed' )
if consumedCPU:
limit = self.jobCPUtime + self.jobCPUtime * ( self.jobCPUMargin / 100 )
cpuConsumed = float( currentCPU )
if cpuConsumed > limit:
self.log.info( 'Job has consumed more than the specified CPU limit with an additional %s%% margin' % ( self.jobCPUMargin ) )
return S_ERROR( 'Job has exceeded maximum CPU time limit' )
else:
return S_OK( 'Job within CPU limit' )
elif not currentCPU:
self.log.verbose( 'Both initial and current CPU consumed are null' )
return S_OK( 'CPU consumed is not measurable yet' )
else:
return S_OK( 'Not possible to determine CPU consumed' )
def __checkMemoryLimit( self ):
""" Checks that the job memory consumption is within a limit
"""
if self.parameters.has_key( 'Vsize' ):
vsize = self.parameters['Vsize'][-1]
if vsize and self.memoryLimit:
if vsize > self.memoryLimit:
vsize = vsize
# Just a warning for the moment
self.log.warn( "Job has consumed %f.2 KB of memory with the limit of %f.2 KB" % ( vsize, self.memoryLimit ) )
return S_OK()
#############################################################################
def __checkDiskSpace( self ):
"""Checks whether the CS defined minimum disk space is available.
"""
if self.parameters.has_key( 'DiskSpace' ):
availSpace = self.parameters['DiskSpace'][-1]
if availSpace >= 0 and availSpace < self.minDiskSpace:
self.log.info( 'Not enough local disk space for job to continue, defined in CS as %s MB' % ( self.minDiskSpace ) )
return S_ERROR( 'Job has insufficient disk space to continue' )
else:
return S_OK( 'Job has enough disk space available' )
else:
return S_ERROR( 'Available disk space could not be established' )
#############################################################################
def __checkWallClockTime( self ):
"""Checks whether the job has been running for the CS defined maximum
wall clock time.
"""
if self.initialValues.has_key( 'StartTime' ):
startTime = self.initialValues['StartTime']
if time.time() - startTime > self.maxWallClockTime:
self.log.info( 'Job has exceeded maximum wall clock time of %s seconds' % ( self.maxWallClockTime ) )
return S_ERROR( 'Job has exceeded maximum wall clock time' )
else:
return S_OK( 'Job within maximum wall clock time' )
else:
return S_ERROR( 'Job start time could not be established' )
#############################################################################
def __checkLoadAverage( self ):
"""Checks whether the CS defined maximum load average is exceeded.
"""
if self.parameters.has_key( 'LoadAverage' ):
loadAvg = self.parameters['LoadAverage'][-1]
if loadAvg > float( self.loadAvgLimit ):
self.log.info( 'Maximum load average exceeded, defined in CS as %s ' % ( self.loadAvgLimit ) )
return S_ERROR( 'Job exceeded maximum load average' )
else:
return S_OK( 'Job running with normal load average' )
else:
return S_ERROR( 'Job load average not established' )
#############################################################################
def __peek( self ):
""" Uses ExecutionThread.getOutput() method to obtain standard output
from running thread via subprocess callback function.
"""
result = self.exeThread.getOutput()
if not result['OK']:
self.log.warn( 'Could not obtain output from running application thread' )
self.log.warn( result['Message'] )
return result
#############################################################################
def calibrate( self ):
""" The calibrate method obtains the initial values for system memory and load
and calculates the margin for error for the rest of the Watchdog cycle.
"""
self.__getWallClockTime()
self.parameters['WallClockTime'] = []
initialCPU = 0.0
result = self.__getCPU()
self.log.verbose( 'CPU consumed %s' % ( result ) )
if not result['OK']:
msg = 'Could not establish CPU consumed'
self.log.warn( msg )
# result = S_ERROR(msg)
# return result
initialCPU = result['Value']
self.initialValues['CPUConsumed'] = initialCPU
self.parameters['CPUConsumed'] = []
result = self.getLoadAverage()
self.log.verbose( 'LoadAverage: %s' % ( result ) )
if not result['OK']:
msg = 'Could not establish LoadAverage'
self.log.warn( msg )
# result = S_ERROR(msg)
# return result
self.initialValues['LoadAverage'] = result['Value']
self.parameters['LoadAverage'] = []
result = self.getMemoryUsed()
self.log.verbose( 'MemUsed: %s' % ( result ) )
if not result['OK']:
msg = 'Could not establish MemoryUsed'
self.log.warn( msg )
# result = S_ERROR(msg)
# return result
self.initialValues['MemoryUsed'] = result['Value']
self.parameters['MemoryUsed'] = []
result = self.processMonitor.getMemoryConsumed( self.wrapperPID )
self.log.verbose( 'Job Memory: %s' % ( result['Value'] ) )
if not result['OK']:
self.log.warn( 'Could not get job memory usage' )
self.initialValues['Vsize'] = result['Value']['Vsize']/1024.
self.initialValues['RSS'] = result['Value']['RSS']/1024.
self.parameters['Vsize'] = []
self.parameters['RSS'] = []
result = self. getDiskSpace()
self.log.verbose( 'DiskSpace: %s' % ( result ) )
if not result['OK']:
msg = 'Could not establish DiskSpace'
self.log.warn( msg )
# result = S_ERROR(msg)
# return result
self.initialValues['DiskSpace'] = result['Value']
self.parameters['DiskSpace'] = []
result = self.getNodeInformation()
self.log.verbose( 'NodeInfo: %s' % ( result ) )
if not result['OK']:
msg = 'Could not establish static system information'
self.log.warn( msg )
# result = S_ERROR(msg)
# return result
if os.environ.has_key( 'LSB_JOBID' ):
result['LocalJobID'] = os.environ['LSB_JOBID']
if os.environ.has_key( 'PBS_JOBID' ):
result['LocalJobID'] = os.environ['PBS_JOBID']
if os.environ.has_key( 'QSUB_REQNAME' ):
result['LocalJobID'] = os.environ['QSUB_REQNAME']
if os.environ.has_key( 'JOB_ID' ):
result['LocalJobID'] = os.environ['JOB_ID']
self.__reportParameters( result, 'NodeInformation', True )
self.__reportParameters( self.initialValues, 'InitialValues' )
return S_OK()
def __timeLeft( self ):
"""
return Normalized CPU time left in the batch system
0 if not available
update self.timeLeft and self.littleTimeLeft
"""
# Get CPU time left in the batch system
result = self.timeLeftUtil.getTimeLeft( 0.0 )
if not result['OK']:
# Could not get CPU time left, we might need to wait for the first loop
# or the Utility is not working properly for this batch system
# or we are in a batch system
timeLeft = 0
else:
timeLeft = result['Value']
self.timeLeft = timeLeft
if not self.littleTimeLeft:
if timeLeft and timeLeft < self.grossTimeLeftLimit:
self.log.info( 'TimeLeft bellow %s, now checking with higher frequency' % timeLeft )
self.littleTimeLeft = True
# TODO: better configurable way of doing this to be coded
self.littleTimeLeftCount = 15
else:
if self.timeLeft and self.timeLeft < self.fineTimeLeftLimit:
timeLeft = -1
return timeLeft
#############################################################################
def __getUsageSummary( self ):
""" Returns average load, memory etc. over execution of job thread
"""
summary = {}
# CPUConsumed
if self.parameters.has_key( 'CPUConsumed' ):
cpuList = self.parameters['CPUConsumed']
if cpuList:
hmsCPU = cpuList[-1]
rawCPU = self.__convertCPUTime( hmsCPU )
if rawCPU['OK']:
summary['LastUpdateCPU(s)'] = rawCPU['Value']
else:
summary['LastUpdateCPU(s)'] = 'Could not be estimated'
# DiskSpace
if self.parameters.has_key( 'DiskSpace' ):
space = self.parameters['DiskSpace']
if space:
value = abs( float( space[-1] ) - float( self.initialValues['DiskSpace'] ) )
if value < 0:
value = 0
summary['DiskSpace(MB)'] = value
else:
summary['DiskSpace(MB)'] = 'Could not be estimated'
# MemoryUsed
if self.parameters.has_key( 'MemoryUsed' ):
memory = self.parameters['MemoryUsed']
if memory:
summary['MemoryUsed(kb)'] = abs( float( memory[-1] ) - float( self.initialValues['MemoryUsed'] ) )
else:
summary['MemoryUsed(kb)'] = 'Could not be estimated'
# LoadAverage
if self.parameters.has_key( 'LoadAverage' ):
laList = self.parameters['LoadAverage']
if laList:
summary['LoadAverage'] = float( sum( laList ) ) / float( len( laList ) )
else:
summary['LoadAverage'] = 'Could not be estimated'
result = self.__getWallClockTime()
wallClock = result['Value']
summary['WallClockTime(s)'] = wallClock
self.__reportParameters( summary, 'UsageSummary', True )
self.currentStats = summary
#############################################################################
def __reportParameters( self, params, title = None, report = False ):
"""Will report parameters for job.
"""
try:
parameters = []
self.log.info( '==========================================================' )
if title:
self.log.info( 'Watchdog will report %s' % ( title ) )
else:
self.log.info( 'Watchdog will report parameters' )
self.log.info( '==========================================================' )
vals = params
if params.has_key( 'Value' ):
if vals['Value']:
vals = params['Value']
for k, v in vals.items():
if v:
self.log.info( str( k ) + ' = ' + str( v ) )
parameters.append( ( k, v ) )
if report:
self.__setJobParamList( parameters )
self.log.info( '==========================================================' )
except Exception, x:
self.log.warn( 'Problem while reporting parameters' )
self.log.warn( str( x ) )
#############################################################################
def __getWallClockTime( self ):
""" Establishes the Wall Clock time spent since the Watchdog initialization"""
result = S_OK()
if self.initialValues.has_key( 'StartTime' ):
currentTime = time.time()
wallClock = currentTime - self.initialValues['StartTime']
result['Value'] = wallClock
else:
self.initialValues['StartTime'] = time.time()
result['Value'] = 0.0
return result
#############################################################################
def __killRunningThread( self ):
""" Will kill the running thread process and any child processes."""
self.log.info( 'Sending kill signal to application PID %s' % ( self.spObject.getChildPID() ) )
result = self.spObject.killChild()
self.applicationKilled = True
self.log.info( 'Subprocess.killChild() returned:%s ' % ( result ) )
return S_OK( 'Thread killed' )
#############################################################################
def __sendSignOfLife( self, jobID, heartBeatDict, staticParamDict ):
""" Sends sign of life 'heartbeat' signal and triggers control signal
interpretation.
"""
jobReport = RPCClient( 'WorkloadManagement/JobStateUpdate', timeout = 120 )
result = jobReport.sendHeartBeat( jobID, heartBeatDict, staticParamDict )
if not result['OK']:
self.log.warn( 'Problem sending sign of life' )
self.log.warn( result )
if result['OK'] and result['Value']:
self.__interpretControlSignal( result['Value'] )
return result
#############################################################################
def __setJobParamList( self, value ):
"""Wraps around setJobParameters of state update client
"""
# job wrapper template sets the jobID variable
if not os.environ.has_key( 'JOBID' ):
self.log.info( 'Running without JOBID so parameters will not be reported' )
return S_OK()
jobID = os.environ['JOBID']
jobReport = RPCClient( 'WorkloadManagement/JobStateUpdate', timeout = 120 )
jobParam = jobReport.setJobParameters( int( jobID ), value )
self.log.verbose( 'setJobParameters(%s,%s)' % ( jobID, value ) )
if not jobParam['OK']:
self.log.warn( jobParam['Message'] )
return jobParam
#############################################################################
def getNodeInformation( self ):
""" Attempts to retrieve all static system information, should be overridden in a subclass"""
methodName = 'getNodeInformation'
self.log.warn( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
return S_ERROR( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
#############################################################################
def getLoadAverage( self ):
""" Attempts to get the load average, should be overridden in a subclass"""
methodName = 'getLoadAverage'
self.log.warn( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
return S_ERROR( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
#############################################################################
def getMemoryUsed( self ):
""" Attempts to get the memory used, should be overridden in a subclass"""
methodName = 'getMemoryUsed'
self.log.warn( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
return S_ERROR( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
#############################################################################
def getDiskSpace( self ):
""" Attempts to get the available disk space, should be overridden in a subclass"""
methodName = 'getDiskSpace'
self.log.warn( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
return S_ERROR( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
calancha/DIRAC
|
WorkloadManagementSystem/JobWrapper/Watchdog.py
|
Python
|
gpl-3.0
| 34,976
|
[
"DIRAC"
] |
b9044fa64eb879a23ebb598f335776f47708b89b93e9a84e81a51370e8396385
|
"""
Student Views
"""
import datetime
import logging
import uuid
import json
import warnings
from collections import defaultdict
from urlparse import urljoin, urlsplit, parse_qs, urlunsplit
from django.views.generic import TemplateView
from pytz import UTC
from requests import HTTPError
from ipware.ip import get_ip
import edx_oauth2_provider
from django.conf import settings
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_reset_confirm
from django.contrib import messages
from django.core.context_processors import csrf
from django.core import mail
from django.core.urlresolvers import reverse, NoReverseMatch, reverse_lazy
from django.core.validators import validate_email, ValidationError
from django.db import IntegrityError, transaction
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseServerError, Http404
from django.shortcuts import redirect
from django.utils.encoding import force_bytes, force_text
from django.utils.translation import ungettext
from django.utils.http import base36_to_int, urlsafe_base64_encode, urlencode
from django.utils.translation import ugettext as _, get_language
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from django.views.decorators.http import require_POST, require_GET
from django.db.models.signals import post_save
from django.dispatch import receiver, Signal
from django.template.response import TemplateResponse
from provider.oauth2.models import Client
from ratelimitbackend.exceptions import RateLimitException
from social.apps.django_app import utils as social_utils
from social.backends import oauth as social_oauth
from social.exceptions import AuthException, AuthAlreadyAssociated
from edxmako.shortcuts import render_to_response, render_to_string
from course_modes.models import CourseMode
from shoppingcart.api import order_history
from student.models import (
Registration, UserProfile,
PendingEmailChange, CourseEnrollment, CourseEnrollmentAttribute, unique_id_for_user,
CourseEnrollmentAllowed, UserStanding, LoginFailures,
create_comments_service_user, PasswordHistory, UserSignupSource,
DashboardConfiguration, LinkedInAddToProfileConfiguration, ManualEnrollmentAudit, ALLOWEDTOENROLL_TO_ENROLLED,
LogoutViewConfiguration)
from student.forms import AccountCreationForm, PasswordResetFormNoActive, get_registration_extension_form
from lms.djangoapps.commerce.utils import EcommerceService # pylint: disable=import-error
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification # pylint: disable=import-error
from bulk_email.models import Optout, BulkEmailFlag # pylint: disable=import-error
from certificates.models import CertificateStatuses, certificate_status_for_student
from certificates.api import ( # pylint: disable=import-error
get_certificate_url,
has_html_certificates_enabled,
)
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator
from ccx_keys.locator import CCXLocator
from collections import namedtuple
from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date # pylint: disable=import-error
from courseware.access import has_access
from django_comment_common.models import Role
from external_auth.models import ExternalAuthMap
import external_auth.views
from external_auth.login_and_register import (
login as external_auth_login,
register as external_auth_register
)
from lang_pref import LANGUAGE_KEY
import track.views
import dogstats_wrapper as dog_stats_api
from util.db import outer_atomic
from util.json_request import JsonResponse
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.milestones_helpers import (
get_pre_requisite_courses_not_completed,
)
from util.password_policy_validators import validate_password_strength
import third_party_auth
from third_party_auth import pipeline, provider
from student.helpers import (
check_verify_status_by_course,
auth_pipeline_urls, get_next_url_for_login_page,
DISABLE_UNENROLL_CERT_STATES,
)
from student.cookies import set_logged_in_cookies, delete_logged_in_cookies
from student.models import anonymous_id_for_user, UserAttribute, EnrollStatusChange
from shoppingcart.models import DonationConfiguration, CourseRegistrationCode
from embargo import api as embargo_api
import analytics
from eventtracking import tracker
# Note that this lives in LMS, so this dependency should be refactored.
from notification_prefs.views import enable_notifications
from openedx.core.djangoapps.credit.email_utils import get_credit_provider_display_names, make_providers_strings
from openedx.core.djangoapps.user_api.preferences import api as preferences_api
from openedx.core.djangoapps.programs.utils import get_programs_for_dashboard, get_display_category
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.theming import helpers as theming_helpers
from labster_course_license.user_utils import get_user_region
log = logging.getLogger("edx.student")
AUDIT_LOG = logging.getLogger("audit")
ReverifyInfo = namedtuple('ReverifyInfo', 'course_id course_name course_number date status display') # pylint: disable=invalid-name
SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated'
# Used as the name of the user attribute for tracking affiliate registrations
REGISTRATION_AFFILIATE_ID = 'registration_affiliate_id'
# used to announce a registration
REGISTER_USER = Signal(providing_args=["user", "profile"])
# Disable this warning because it doesn't make sense to completely refactor tests to appease Pylint
# pylint: disable=logging-format-interpolation
def csrf_token(context):
"""A csrf token that can be included in a form."""
token = context.get('csrf_token', '')
if token == 'NOTPROVIDED':
return ''
return (u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="%s" /></div>' % (token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is cached for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context=None, user=AnonymousUser()):
"""
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
"""
if extra_context is None:
extra_context = {}
courses = get_courses(user)
if configuration_helpers.get_value(
"ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"],
):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
context = {'courses': courses}
context['homepage_overlay_html'] = configuration_helpers.get_value('homepage_overlay_html')
# This appears to be an unused context parameter, at least for the master templates...
context['show_partners'] = configuration_helpers.get_value('show_partners', True)
# TO DISPLAY A YOUTUBE WELCOME VIDEO
# 1) Change False to True
context['show_homepage_promo_video'] = configuration_helpers.get_value('show_homepage_promo_video', False)
# 2) Add your video's YouTube ID (11 chars, eg "123456789xX"), or specify via site configuration
# Note: This value should be moved into a configuration setting and plumbed-through to the
# context via the site configuration workflow, versus living here
youtube_video_id = configuration_helpers.get_value('homepage_promo_video_youtube_id', "your-youtube-id")
context['homepage_promo_video_youtube_id'] = youtube_video_id
# allow for theme override of the courses list
context['courses_list'] = theming_helpers.get_template_path('courses_list.html')
# Insert additional context for use in the template
context.update(extra_context)
return render_to_response('index.html', context)
def process_survey_link(survey_link, user):
"""
If {UNIQUE_ID} appears in the link, replace it with a unique id for the user.
Currently, this is sha1(user.username). Otherwise, return survey_link.
"""
return survey_link.format(UNIQUE_ID=unique_id_for_user(user))
def cert_info(user, course_overview, course_mode):
"""
Get the certificate info needed to render the dashboard section for the given
student and course.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
Returns:
dict: Empty dict if certificates are disabled or hidden, or a dictionary with keys:
'status': one of 'generating', 'ready', 'notpassing', 'processing', 'restricted'
'show_download_url': bool
'download_url': url, only present if show_download_url is True
'show_disabled_download_button': bool -- true if state is 'generating'
'show_survey_button': bool
'survey_url': url, only if show_survey_button is True
'grade': if status is not 'processing'
'can_unenroll': if status allows for unenrollment
"""
if not course_overview.may_certify():
return {}
return _cert_info(
user,
course_overview,
certificate_status_for_student(user, course_overview.id),
course_mode
)
def reverification_info(statuses):
"""
Returns reverification-related information for *all* of user's enrollments whose
reverification status is in statuses.
Args:
statuses (list): a list of reverification statuses we want information for
example: ["must_reverify", "denied"]
Returns:
dictionary of lists: dictionary with one key per status, e.g.
dict["must_reverify"] = []
dict["must_reverify"] = [some information]
"""
reverifications = defaultdict(list)
# Sort the data by the reverification_end_date
for status in statuses:
if reverifications[status]:
reverifications[status].sort(key=lambda x: x.date)
return reverifications
def get_course_enrollments(user, org_to_include, orgs_to_exclude):
"""
Given a user, return a filtered set of his or her course enrollments.
Arguments:
user (User): the user in question.
org_to_include (str): If not None, ONLY courses of this org will be returned.
orgs_to_exclude (list[str]): If org_to_include is not None, this
argument is ignored. Else, courses of this org will be excluded.
Returns:
generator[CourseEnrollment]: a sequence of enrollments to be displayed
on the user's dashboard.
"""
for enrollment in CourseEnrollment.enrollments_for_user(user):
# If the course is missing or broken, log an error and skip it.
course_overview = enrollment.course_overview
if not course_overview:
log.error(
"User %s enrolled in broken or non-existent course %s",
user.username,
enrollment.course_id
)
continue
# Filter out anything that is not attributed to the current ORG.
if org_to_include and course_overview.location.org != org_to_include:
continue
# Conversely, filter out any enrollments with courses attributed to current ORG.
elif course_overview.location.org in orgs_to_exclude:
continue
# Else, include the enrollment.
else:
yield enrollment
def _cert_info(user, course_overview, cert_status, course_mode): # pylint: disable=unused-argument
"""
Implements the logic for cert_info -- split out for testing.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
"""
# simplify the status for the template using this lookup table
template_state = {
CertificateStatuses.generating: 'generating',
CertificateStatuses.downloadable: 'ready',
CertificateStatuses.notpassing: 'notpassing',
CertificateStatuses.restricted: 'restricted',
CertificateStatuses.auditing: 'auditing',
CertificateStatuses.audit_passing: 'auditing',
CertificateStatuses.audit_notpassing: 'auditing',
CertificateStatuses.unverified: 'unverified',
}
default_status = 'processing'
default_info = {
'status': default_status,
'show_disabled_download_button': False,
'show_download_url': False,
'show_survey_button': False,
'can_unenroll': True,
}
if cert_status is None:
return default_info
is_hidden_status = cert_status['status'] in ('unavailable', 'processing', 'generating', 'notpassing', 'auditing')
if course_overview.certificates_display_behavior == 'early_no_info' and is_hidden_status:
return {}
status = template_state.get(cert_status['status'], default_status)
status_dict = {
'status': status,
'show_download_url': status == 'ready',
'show_disabled_download_button': status == 'generating',
'mode': cert_status.get('mode', None),
'linked_in_url': None,
'can_unenroll': status not in DISABLE_UNENROLL_CERT_STATES,
}
if (status in ('generating', 'ready', 'notpassing', 'restricted', 'auditing', 'unverified') and
course_overview.end_of_course_survey_url is not None):
status_dict.update({
'show_survey_button': True,
'survey_url': process_survey_link(course_overview.end_of_course_survey_url, user)})
else:
status_dict['show_survey_button'] = False
if status == 'ready':
# showing the certificate web view button if certificate is ready state and feature flags are enabled.
if has_html_certificates_enabled(course_overview.id, course_overview):
if course_overview.has_any_active_web_certificate:
status_dict.update({
'show_cert_web_view': True,
'cert_web_view_url': get_certificate_url(course_id=course_overview.id, uuid=cert_status['uuid'])
})
else:
# don't show download certificate button if we don't have an active certificate for course
status_dict['show_download_url'] = False
elif 'download_url' not in cert_status:
log.warning(
u"User %s has a downloadable cert for %s, but no download url",
user.username,
course_overview.id
)
return default_info
else:
status_dict['download_url'] = cert_status['download_url']
# If enabled, show the LinkedIn "add to profile" button
# Clicking this button sends the user to LinkedIn where they
# can add the certificate information to their profile.
linkedin_config = LinkedInAddToProfileConfiguration.current()
# posting certificates to LinkedIn is not currently
# supported in White Labels
if linkedin_config.enabled and not theming_helpers.is_request_in_themed_site():
status_dict['linked_in_url'] = linkedin_config.add_to_profile_url(
course_overview.id,
course_overview.display_name,
cert_status.get('mode'),
cert_status['download_url']
)
if status in ('generating', 'ready', 'notpassing', 'restricted', 'auditing', 'unverified'):
if 'grade' not in cert_status:
# Note: as of 11/20/2012, we know there are students in this state-- cs169.1x,
# who need to be regraded (we weren't tracking 'notpassing' at first).
# We can add a log.warning here once we think it shouldn't happen.
return default_info
else:
status_dict['grade'] = cert_status['grade']
return status_dict
@ensure_csrf_cookie
def signin_user(request):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
external_auth_response = external_auth_login(request)
if external_auth_response is not None:
return external_auth_response
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
third_party_auth_error = None
for msg in messages.get_messages(request):
if msg.extra_tags.split()[0] == "social-auth":
# msg may or may not be translated. Try translating [again] in case we are able to:
third_party_auth_error = _(unicode(msg)) # pylint: disable=translation-of-non-string
break
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
# Bool injected into JS to submit form if we're inside a running third-
# party auth pipeline; distinct from the actual instance of the running
# pipeline, if any.
'pipeline_running': 'true' if pipeline.running(request) else 'false',
'pipeline_url': auth_pipeline_urls(pipeline.AUTH_ENTRY_LOGIN, redirect_url=redirect_to),
'platform_name': configuration_helpers.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'third_party_auth_error': third_party_auth_error
}
return render_to_response('login.html', context)
@ensure_csrf_cookie
def register_user(request, extra_context=None):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
external_auth_response = external_auth_register(request)
if external_auth_response is not None:
return external_auth_response
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
'email': '',
'name': '',
'running_pipeline': None,
'pipeline_urls': auth_pipeline_urls(pipeline.AUTH_ENTRY_REGISTER, redirect_url=redirect_to),
'platform_name': configuration_helpers.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'selected_provider': '',
'username': '',
}
if extra_context is not None:
context.update(extra_context)
if context.get("extauth_domain", '').startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return render_to_response('register-shib.html', context)
# If third-party auth is enabled, prepopulate the form with data from the
# selected provider.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
current_provider = provider.Registry.get_from_pipeline(running_pipeline)
if current_provider is not None:
overrides = current_provider.get_register_form_data(running_pipeline.get('kwargs'))
overrides['running_pipeline'] = running_pipeline
overrides['selected_provider'] = current_provider.name
context.update(overrides)
return render_to_response('register.html', context)
def complete_course_mode_info(course_id, enrollment, modes=None):
"""
We would like to compute some more information from the given course modes
and the user's current enrollment
Returns the given information:
- whether to show the course upsell information
- numbers of days until they can't upsell anymore
"""
if modes is None:
modes = CourseMode.modes_for_course_dict(course_id)
mode_info = {'show_upsell': False, 'days_for_upsell': None}
# we want to know if the user is already enrolled as verified or credit and
# if verified is an option.
if CourseMode.VERIFIED in modes and enrollment.mode in CourseMode.UPSELL_TO_VERIFIED_MODES:
mode_info['show_upsell'] = True
mode_info['verified_sku'] = modes['verified'].sku
mode_info['verified_bulk_sku'] = modes['verified'].bulk_sku
# if there is an expiration date, find out how long from now it is
if modes['verified'].expiration_datetime:
today = datetime.datetime.now(UTC).date()
mode_info['days_for_upsell'] = (modes['verified'].expiration_datetime.date() - today).days
return mode_info
def is_course_blocked(request, redeemed_registration_codes, course_key):
"""Checking either registration is blocked or not ."""
blocked = False
for redeemed_registration in redeemed_registration_codes:
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
if redeemed_registration.invoice_item:
if not redeemed_registration.invoice_item.invoice.is_valid:
blocked = True
# disabling email notifications for unpaid registration courses
Optout.objects.get_or_create(user=request.user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
request.user.username,
request.user.email,
course_key,
)
track.views.server_track(
request,
"change-email1-settings",
{"receive_emails": "no", "course": course_key.to_deprecated_string()},
page='dashboard',
)
break
return blocked
@login_required
@ensure_csrf_cookie
def dashboard(request):
user = request.user
platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME)
# we want to filter and only show enrollments for courses within
# the 'ORG' defined in configuration.
course_org_filter = configuration_helpers.get_value('course_org_filter')
# Let's filter out any courses in an "org" that has been declared to be
# in a configuration
org_filter_out_set = configuration_helpers.get_all_orgs()
# remove our current org from the "filter out" list, if applicable
if course_org_filter:
org_filter_out_set.remove(course_org_filter)
# Build our (course, enrollment) list for the user, but ignore any courses that no
# longer exist (because the course IDs have changed). Still, we don't delete those
# enrollments, because it could have been a data push snafu.
course_enrollments = list(get_course_enrollments(user, course_org_filter, org_filter_out_set))
# sort the enrollment pairs by the enrollment date
course_enrollments.sort(key=lambda x: x.created, reverse=True)
# Retrieve the course modes for each course
enrolled_course_ids = [enrollment.course_id for enrollment in course_enrollments]
__, unexpired_course_modes = CourseMode.all_and_unexpired_modes_for_courses(enrolled_course_ids)
course_modes_by_course = {
course_id: {
mode.slug: mode
for mode in modes
}
for course_id, modes in unexpired_course_modes.iteritems()
}
# Check to see if the student has recently enrolled in a course.
# If so, display a notification message confirming the enrollment.
enrollment_message = _create_recent_enrollment_message(
course_enrollments, course_modes_by_course
)
course_optouts = Optout.objects.filter(user=user).values_list('course_id', flat=True)
message = ""
if not user.is_active:
message = render_to_string(
'registration/activate_account_notice.html',
{'email': user.email, 'platform_name': platform_name}
)
# Global staff can see what courses errored on their dashboard
staff_access = False
errored_courses = {}
if has_access(user, 'staff', 'global'):
# Show any courses that errored on load
staff_access = True
errored_courses = modulestore().get_errored_courses()
show_courseware_links_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if has_access(request.user, 'load', enrollment.course_overview)
and has_access(request.user, 'view_courseware_with_prerequisites', enrollment.course_overview)
)
# Get any programs associated with courses being displayed.
# This is passed along in the template context to allow rendering of
# program-related information on the dashboard.
course_programs = _get_course_programs(user, [enrollment.course_id for enrollment in course_enrollments])
# Construct a dictionary of course mode information
# used to render the course list. We re-use the course modes dict
# we loaded earlier to avoid hitting the database.
course_mode_info = {
enrollment.course_id: complete_course_mode_info(
enrollment.course_id, enrollment,
modes=course_modes_by_course[enrollment.course_id]
)
for enrollment in course_enrollments
}
# Determine the per-course verification status
# This is a dictionary in which the keys are course locators
# and the values are one of:
#
# VERIFY_STATUS_NEED_TO_VERIFY
# VERIFY_STATUS_SUBMITTED
# VERIFY_STATUS_APPROVED
# VERIFY_STATUS_MISSED_DEADLINE
#
# Each of which correspond to a particular message to display
# next to the course on the dashboard.
#
# If a course is not included in this dictionary,
# there is no verification messaging to display.
verify_status_by_course = check_verify_status_by_course(user, course_enrollments)
cert_statuses = {
enrollment.course_id: cert_info(request.user, enrollment.course_overview, enrollment.mode)
for enrollment in course_enrollments
}
# only show email settings for Mongo course and when bulk email is turned on
show_email_settings_for = frozenset(
enrollment.course_id for enrollment in course_enrollments if (
BulkEmailFlag.feature_enabled(enrollment.course_id)
)
)
# Verification Attempts
# Used to generate the "you must reverify for course x" banner
verification_status, verification_msg = SoftwareSecurePhotoVerification.user_status(user)
# Gets data for midcourse reverifications, if any are necessary or have failed
statuses = ["approved", "denied", "pending", "must_reverify"]
reverifications = reverification_info(statuses)
show_refund_option_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.refundable()
)
block_courses = frozenset(
enrollment.course_id for enrollment in course_enrollments
if is_course_blocked(
request,
CourseRegistrationCode.objects.filter(
course_id=enrollment.course_id,
registrationcoderedemption__redeemed_by=request.user
),
enrollment.course_id
)
)
enrolled_courses_either_paid = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.is_paid_course()
)
# If there are *any* denied reverifications that have not been toggled off,
# we'll display the banner
denied_banner = any(item.display for item in reverifications["denied"])
# Populate the Order History for the side-bar.
order_history_list = order_history(user, course_org_filter=course_org_filter, org_filter_out_set=org_filter_out_set)
# get list of courses having pre-requisites yet to be completed
courses_having_prerequisites = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.course_overview.pre_requisite_courses
)
courses_requirements_not_met = get_pre_requisite_courses_not_completed(user, courses_having_prerequisites)
if 'notlive' in request.GET:
redirect_message = _("The course you are looking for does not start until {date}.").format(
date=request.GET['notlive']
)
elif 'course_closed' in request.GET:
redirect_message = _("The course you are looking for is closed for enrollment as of {date}.").format(
date=request.GET['course_closed']
)
else:
redirect_message = ''
context = {
'enrollment_message': enrollment_message,
'redirect_message': redirect_message,
'course_enrollments': course_enrollments,
'course_optouts': course_optouts,
'message': message,
'staff_access': staff_access,
'errored_courses': errored_courses,
'show_courseware_links_for': show_courseware_links_for,
'all_course_modes': course_mode_info,
'cert_statuses': cert_statuses,
'credit_statuses': _credit_statuses(user, course_enrollments),
'show_email_settings_for': show_email_settings_for,
'reverifications': reverifications,
'verification_status': verification_status,
'verification_status_by_course': verify_status_by_course,
'verification_msg': verification_msg,
'show_refund_option_for': show_refund_option_for,
'block_courses': block_courses,
'denied_banner': denied_banner,
'billing_email': settings.PAYMENT_SUPPORT_EMAIL,
'user': user,
'logout_url': reverse('logout'),
'platform_name': platform_name,
'enrolled_courses_either_paid': enrolled_courses_either_paid,
'provider_states': [],
'order_history_list': order_history_list,
'courses_requirements_not_met': courses_requirements_not_met,
'nav_hidden': True,
'course_programs': course_programs,
'disable_courseware_js': True,
'show_program_listing': ProgramsApiConfig.current().show_program_listing,
}
ecommerce_service = EcommerceService()
if ecommerce_service.is_enabled(request.user):
context.update({
'use_ecommerce_payment_flow': True,
'ecommerce_payment_page': ecommerce_service.payment_page_url(),
})
return render_to_response('dashboard.html', context)
def _create_recent_enrollment_message(course_enrollments, course_modes): # pylint: disable=invalid-name
"""
Builds a recent course enrollment message.
Constructs a new message template based on any recent course enrollments
for the student.
Args:
course_enrollments (list[CourseEnrollment]): a list of course enrollments.
course_modes (dict): Mapping of course ID's to course mode dictionaries.
Returns:
A string representing the HTML message output from the message template.
None if there are no recently enrolled courses.
"""
recently_enrolled_courses = _get_recently_enrolled_courses(course_enrollments)
if recently_enrolled_courses:
enroll_messages = [
{
"course_id": enrollment.course_overview.id,
"course_name": enrollment.course_overview.display_name,
"allow_donation": _allow_donation(course_modes, enrollment.course_overview.id, enrollment)
}
for enrollment in recently_enrolled_courses
]
platform_name = configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
return render_to_string(
'enrollment/course_enrollment_message.html',
{'course_enrollment_messages': enroll_messages, 'platform_name': platform_name}
)
def _get_recently_enrolled_courses(course_enrollments):
"""
Given a list of enrollments, filter out all but recent enrollments.
Args:
course_enrollments (list[CourseEnrollment]): A list of course enrollments.
Returns:
list[CourseEnrollment]: A list of recent course enrollments.
"""
seconds = DashboardConfiguration.current().recent_enrollment_time_delta
time_delta = (datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds))
return [
enrollment for enrollment in course_enrollments
# If the enrollment has no created date, we are explicitly excluding the course
# from the list of recent enrollments.
if enrollment.is_active and enrollment.created > time_delta
]
def _allow_donation(course_modes, course_id, enrollment):
"""Determines if the dashboard will request donations for the given course.
Check if donations are configured for the platform, and if the current course is accepting donations.
Args:
course_modes (dict): Mapping of course ID's to course mode dictionaries.
course_id (str): The unique identifier for the course.
enrollment(CourseEnrollment): The enrollment object in which the user is enrolled
Returns:
True if the course is allowing donations.
"""
donations_enabled = DonationConfiguration.current().enabled
return (
donations_enabled and
enrollment.mode in course_modes[course_id] and
course_modes[course_id][enrollment.mode].min_price == 0
)
def _update_email_opt_in(request, org):
"""Helper function used to hit the profile API if email opt-in is enabled."""
email_opt_in = request.POST.get('email_opt_in')
if email_opt_in is not None:
email_opt_in_boolean = email_opt_in == 'true'
preferences_api.update_email_opt_in(request.user, org, email_opt_in_boolean)
def _credit_statuses(user, course_enrollments):
"""
Retrieve the status for credit courses.
A credit course is a course for which a user can purchased
college credit. The current flow is:
1. User becomes eligible for credit (submits verifications, passes the course, etc.)
2. User purchases credit from a particular credit provider.
3. User requests credit from the provider, usually creating an account on the provider's site.
4. The credit provider notifies us whether the user's request for credit has been accepted or rejected.
The dashboard is responsible for communicating the user's state in this flow.
Arguments:
user (User): The currently logged-in user.
course_enrollments (list[CourseEnrollment]): List of enrollments for the
user.
Returns: dict
The returned dictionary has keys that are `CourseKey`s and values that
are dictionaries with:
* eligible (bool): True if the user is eligible for credit in this course.
* deadline (datetime): The deadline for purchasing and requesting credit for this course.
* purchased (bool): Whether the user has purchased credit for this course.
* provider_name (string): The display name of the credit provider.
* provider_status_url (string): A URL the user can visit to check on their credit request status.
* request_status (string): Either "pending", "approved", or "rejected"
* error (bool): If true, an unexpected error occurred when retrieving the credit status,
so the user should contact the support team.
Example:
>>> _credit_statuses(user, course_enrollments)
{
CourseKey.from_string("edX/DemoX/Demo_Course"): {
"course_key": "edX/DemoX/Demo_Course",
"eligible": True,
"deadline": 2015-11-23 00:00:00 UTC,
"purchased": True,
"provider_name": "Hogwarts",
"provider_status_url": "http://example.com/status",
"request_status": "pending",
"error": False
}
}
"""
from openedx.core.djangoapps.credit import api as credit_api
# Feature flag off
if not settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY"):
return {}
request_status_by_course = {
request["course_key"]: request["status"]
for request in credit_api.get_credit_requests_for_user(user.username)
}
credit_enrollments = {
enrollment.course_id: enrollment
for enrollment in course_enrollments
if enrollment.mode == "credit"
}
# When a user purchases credit in a course, the user's enrollment
# mode is set to "credit" and an enrollment attribute is set
# with the ID of the credit provider. We retrieve *all* such attributes
# here to minimize the number of database queries.
purchased_credit_providers = {
attribute.enrollment.course_id: attribute.value
for attribute in CourseEnrollmentAttribute.objects.filter(
namespace="credit",
name="provider_id",
enrollment__in=credit_enrollments.values()
).select_related("enrollment")
}
provider_info_by_id = {
provider["id"]: provider
for provider in credit_api.get_credit_providers()
}
statuses = {}
for eligibility in credit_api.get_eligibilities_for_user(user.username):
course_key = CourseKey.from_string(unicode(eligibility["course_key"]))
providers_names = get_credit_provider_display_names(course_key)
status = {
"course_key": unicode(course_key),
"eligible": True,
"deadline": eligibility["deadline"],
"purchased": course_key in credit_enrollments,
"provider_name": make_providers_strings(providers_names),
"provider_status_url": None,
"provider_id": None,
"request_status": request_status_by_course.get(course_key),
"error": False,
}
# If the user has purchased credit, then include information about the credit
# provider from which the user purchased credit.
# We retrieve the provider's ID from the an "enrollment attribute" set on the user's
# enrollment when the user's order for credit is fulfilled by the E-Commerce service.
if status["purchased"]:
provider_id = purchased_credit_providers.get(course_key)
if provider_id is None:
status["error"] = True
log.error(
u"Could not find credit provider associated with credit enrollment "
u"for user %s in course %s. The user will not be able to see his or her "
u"credit request status on the student dashboard. This attribute should "
u"have been set when the user purchased credit in the course.",
user.id, course_key
)
else:
provider_info = provider_info_by_id.get(provider_id, {})
status["provider_name"] = provider_info.get("display_name")
status["provider_status_url"] = provider_info.get("status_url")
status["provider_id"] = provider_id
statuses[course_key] = status
return statuses
@transaction.non_atomic_requests
@require_POST
@outer_atomic(read_committed=True)
def change_enrollment(request, check_access=True):
"""
Modify the enrollment status for the logged-in user.
The request parameter must be a POST request (other methods return 405)
that specifies course_id and enrollment_action parameters. If course_id or
enrollment_action is not specified, if course_id is not valid, if
enrollment_action is something other than "enroll" or "unenroll", if
enrollment_action is "enroll" and enrollment is closed for the course, or
if enrollment_action is "unenroll" and the user is not enrolled in the
course, a 400 error will be returned. If the user is not logged in, 403
will be returned; it is important that only this case return 403 so the
front end can redirect the user to a registration or login page when this
happens. This function should only be called from an AJAX request, so
the error messages in the responses should never actually be user-visible.
Args:
request (`Request`): The Django request object
Keyword Args:
check_access (boolean): If True, we check that an accessible course actually
exists for the given course_key before we enroll the student.
The default is set to False to avoid breaking legacy code or
code with non-standard flows (ex. beta tester invitations), but
for any standard enrollment flow you probably want this to be True.
Returns:
Response
"""
# Get the user
user = request.user
# Ensure the user is authenticated
if not user.is_authenticated():
return HttpResponseForbidden()
# Ensure we received a course_id
action = request.POST.get("enrollment_action")
if 'course_id' not in request.POST:
return HttpResponseBadRequest(_("Course id not specified"))
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(request.POST.get("course_id"))
except InvalidKeyError:
log.warning(
u"User %s tried to %s with invalid course id: %s",
user.username,
action,
request.POST.get("course_id"),
)
return HttpResponseBadRequest(_("Invalid course id"))
if action == "enroll":
# Make sure the course exists
# We don't do this check on unenroll, or a bad course id can't be unenrolled from
if not modulestore().has_course(course_id):
log.warning(
u"User %s tried to enroll in non-existent course %s",
user.username,
course_id
)
return HttpResponseBadRequest(_("Course id is invalid"))
# Record the user's email opt-in preference
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
_update_email_opt_in(request, course_id.org)
available_modes = CourseMode.modes_for_course_dict(course_id)
# Check whether the user is blocked from enrolling in this course
# This can occur if the user's IP is on a global blacklist
# or if the user is enrolling in a country in which the course
# is not available.
redirect_url = embargo_api.redirect_if_blocked(
course_id, user=user, ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return HttpResponse(redirect_url)
# Check that auto enrollment is allowed for this course
# (= the course is NOT behind a paywall)
if CourseMode.can_auto_enroll(course_id):
# Enroll the user using the default mode (audit)
# We're assuming that users of the course enrollment table
# will NOT try to look up the course enrollment model
# by its slug. If they do, it's possible (based on the state of the database)
# for no such model to exist, even though we've set the enrollment type
# to "audit".
try:
enroll_mode = CourseMode.auto_enroll_mode(course_id, available_modes)
if enroll_mode:
enrollment = CourseEnrollment.enroll(user, course_id, check_access=check_access, mode=enroll_mode)
enrollment.send_signal(EnrollStatusChange.enroll)
except Exception: # pylint: disable=broad-except
return HttpResponseBadRequest(_("Could not enroll"))
# If we have more than one course mode or professional ed is enabled,
# then send the user to the choose your track page.
# (In the case of no-id-professional/professional ed, this will redirect to a page that
# funnels users directly into the verification / payment flow)
if CourseMode.has_verified_mode(available_modes) or CourseMode.has_professional_mode(available_modes):
return HttpResponse(
reverse("course_modes_choose", kwargs={'course_id': unicode(course_id)})
)
# Otherwise, there is only one mode available (the default)
return HttpResponse()
elif action == "unenroll":
enrollment = CourseEnrollment.get_enrollment(user, course_id)
if not enrollment:
return HttpResponseBadRequest(_("You are not enrolled in this course"))
certificate_info = cert_info(user, enrollment.course_overview, enrollment.mode)
if certificate_info.get('status') in DISABLE_UNENROLL_CERT_STATES:
return HttpResponseBadRequest(_("Your certificate prevents you from unenrolling from this course"))
CourseEnrollment.unenroll(user, course_id)
return HttpResponse()
else:
return HttpResponseBadRequest(_("Enrollment action is invalid"))
# Need different levels of logging
@ensure_csrf_cookie
def login_user(request, error=""): # pylint: disable=too-many-statements,unused-argument
"""AJAX request to log in the user."""
backend_name = None
email = None
password = None
redirect_url = None
response = None
running_pipeline = None
third_party_auth_requested = third_party_auth.is_enabled() and pipeline.running(request)
third_party_auth_successful = False
trumped_by_first_party_auth = bool(request.POST.get('email')) or bool(request.POST.get('password'))
user = None
platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME)
if third_party_auth_requested and not trumped_by_first_party_auth:
# The user has already authenticated via third-party auth and has not
# asked to do first party auth by supplying a username or password. We
# now want to put them through the same logging and cookie calculation
# logic as with first-party auth.
running_pipeline = pipeline.get(request)
username = running_pipeline['kwargs'].get('username')
backend_name = running_pipeline['backend']
third_party_uid = running_pipeline['kwargs']['uid']
requested_provider = provider.Registry.get_from_pipeline(running_pipeline)
try:
user = pipeline.get_authenticated_user(requested_provider, username, third_party_uid)
third_party_auth_successful = True
except User.DoesNotExist:
AUDIT_LOG.warning(
u"Login failed - user with username {username} has no social auth "
"with backend_name {backend_name}".format(
username=username, backend_name=backend_name)
)
message = _(
"You've successfully logged into your {provider_name} account, "
"but this account isn't linked with an {platform_name} account yet."
).format(
platform_name=platform_name,
provider_name=requested_provider.name,
)
message += "<br/><br/>"
message += _(
"Use your {platform_name} username and password to log into {platform_name} below, "
"and then link your {platform_name} account with {provider_name} from your dashboard."
).format(
platform_name=platform_name,
provider_name=requested_provider.name,
)
message += "<br/><br/>"
message += _(
"If you don't have an {platform_name} account yet, "
"click <strong>Register</strong> at the top of the page."
).format(
platform_name=platform_name
)
return HttpResponse(message, content_type="text/plain", status=403)
else:
if 'email' not in request.POST or 'password' not in request.POST:
return JsonResponse({
"success": False,
# TODO: User error message
"value": _('There was an error receiving your login information. Please email us.'),
}) # TODO: this should be status code 400
email = request.POST['email']
password = request.POST['password']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Unknown user email")
else:
AUDIT_LOG.warning(u"Login failed - Unknown user email: {0}".format(email))
# check if the user has a linked shibboleth account, if so, redirect the user to shib-login
# This behavior is pretty much like what gmail does for shibboleth. Try entering some @stanford.edu
# address into the Gmail login.
if settings.FEATURES.get('AUTH_USE_SHIB') and user:
try:
eamap = ExternalAuthMap.objects.get(user=user)
if eamap.external_domain.startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return JsonResponse({
"success": False,
"redirect": reverse('shib-login'),
}) # TODO: this should be status code 301 # pylint: disable=fixme
except ExternalAuthMap.DoesNotExist:
# This is actually the common case, logging in user without external linked login
AUDIT_LOG.info(u"User %s w/o external auth attempting login", user)
# see if account has been locked out due to excessive login failures
user_found_by_email_lookup = user
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
if LoginFailures.is_user_locked_out(user_found_by_email_lookup):
lockout_message = _('This account has been temporarily locked due '
'to excessive login failures. Try again later.')
return JsonResponse({
"success": False,
"value": lockout_message,
}) # TODO: this should be status code 429 # pylint: disable=fixme
# see if the user must reset his/her password due to any policy settings
if user_found_by_email_lookup and PasswordHistory.should_user_reset_password_now(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('Your password has expired due to password policy on this account. You must '
'reset your password before you can log in again. Please click the '
'"Forgot Password" link on this page to reset your password before logging in again.'),
}) # TODO: this should be status code 403 # pylint: disable=fixme
# if the user doesn't exist, we want to set the username to an invalid
# username so that authentication is guaranteed to fail and we can take
# advantage of the ratelimited backend
username = user.username if user else ""
if not third_party_auth_successful:
try:
user = authenticate(username=username, password=password, request=request)
# this occurs when there are too many attempts from the same IP address
except RateLimitException:
return JsonResponse({
"success": False,
"value": _('Too many failed login attempts. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
if user is None:
# tick the failed login counters if the user exists in the database
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
LoginFailures.increment_lockout_counter(user_found_by_email_lookup)
# if we didn't find this username earlier, the account for this email
# doesn't exist, and doesn't have a corresponding password
msg = None
if username != "":
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
loggable_id = user_found_by_email_lookup.id if user_found_by_email_lookup else "<unknown>"
AUDIT_LOG.warning(u"Login failed - password for user.id: {0} is invalid".format(loggable_id))
else:
AUDIT_LOG.warning(u"Login failed - password for {0} is invalid".format(email))
# Start: Added by Labster
else:
# If there was login error, check whether user with the given email exists in one of the regions. If yes,
# suggest student to to login to the appropriate region server.
regions = configuration_helpers.get_value('REGIONS', settings.REGIONS)
region = get_user_region(request, regions, email)
if region:
msg = _(
'It appears that your account is located on our {region_code} server. '
'Please sign in <a href="{login_url}">here</a> instead.'
).format(
region_code=region['region_code'],
login_url=region['login_url'],
)
elif settings.LABSTER_FEATURES.get('ENABLE_REGION_IPADDR_WARNING'):
current_region = request.session.get('country_code')
region = regions.get(current_region)
# If ENABLE_REGION_IPADDR_WARNING is enabled and user does not have an account
# neither in Central nor in regions, suggest student to register to
# the appropriate region server based on IP address of user.
if region:
msg = _(
'It appears that you are based in the {region_code}. '
'Please create an account <a href="{register_url}">here</a>.'
).format(
region_code=region['region_code'],
register_url=region['register_url'],
)
msg = msg or _("Account doesn't exist.")
# End: Added by Labster
msg = msg or _('Email or password is incorrect.')
return JsonResponse({
"success": False,
"value": msg,
}) # TODO: this should be status code 400 # pylint: disable=fixme
# successful login, clear failed login attempts counters, if applicable
if LoginFailures.is_feature_enabled():
LoginFailures.clear_lockout_counter(user)
# Track the user's sign in
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.identify(
user.id,
{
'email': email,
'username': username
},
{
# Disable MailChimp because we don't want to update the user's email
# and username in MailChimp on every page load. We only need to capture
# this data on registration/activation.
'MailChimp': False
}
)
analytics.track(
user.id,
"edx.bi.user.account.authenticated",
{
'category': "conversion",
'label': request.POST.get('course_id'),
'provider': None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
if user is not None and user.is_active:
try:
# We do not log here, because we have a handler registered
# to perform logging on successful logins.
login(request, user)
if request.POST.get('remember') == 'true':
request.session.set_expiry(604800)
log.debug("Setting user session to never expire")
else:
request.session.set_expiry(0)
except Exception as exc: # pylint: disable=broad-except
AUDIT_LOG.critical("Login failed - Could not create session. Is memcached running?")
log.critical("Login failed - Could not create session. Is memcached running?")
log.exception(exc)
raise
redirect_url = None # The AJAX method calling should know the default destination upon success
if third_party_auth_successful:
redirect_url = pipeline.get_complete_url(backend_name)
response = JsonResponse({
"success": True,
"redirect_url": redirect_url,
})
# Ensure that the external marketing site can
# detect that the user is logged in.
return set_logged_in_cookies(request, response, user)
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Account not active for user.id: {0}, resending activation".format(user.id))
else:
AUDIT_LOG.warning(u"Login failed - Account not active for user {0}, resending activation".format(username))
reactivation_email_for_user(user)
not_activated_msg = _("Before you sign in, you need to activate your account. We have sent you an "
"email message with instructions for activating your account.")
return JsonResponse({
"success": False,
"value": not_activated_msg,
}) # TODO: this should be status code 400 # pylint: disable=fixme
@csrf_exempt
@require_POST
@social_utils.strategy("social:complete")
def login_oauth_token(request, backend):
"""
Authenticate the client using an OAuth access token by using the token to
retrieve information from a third party and matching that information to an
existing user.
"""
warnings.warn("Please use AccessTokenExchangeView instead.", DeprecationWarning)
backend = request.backend
if isinstance(backend, social_oauth.BaseOAuth1) or isinstance(backend, social_oauth.BaseOAuth2):
if "access_token" in request.POST:
# Tell third party auth pipeline that this is an API call
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API
user = None
try:
user = backend.do_auth(request.POST["access_token"])
except (HTTPError, AuthException):
pass
# do_auth can return a non-User object if it fails
if user and isinstance(user, User):
login(request, user)
return JsonResponse(status=204)
else:
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
return JsonResponse({"error": "invalid_token"}, status=401)
else:
return JsonResponse({"error": "invalid_request"}, status=400)
raise Http404
@require_GET
@login_required
@ensure_csrf_cookie
def manage_user_standing(request):
"""
Renders the view used to manage user standing. Also displays a table
of user accounts that have been disabled and who disabled them.
"""
if not request.user.is_staff:
raise Http404
all_disabled_accounts = UserStanding.objects.filter(
account_status=UserStanding.ACCOUNT_DISABLED
)
all_disabled_users = [standing.user for standing in all_disabled_accounts]
headers = ['username', 'account_changed_by']
rows = []
for user in all_disabled_users:
row = [user.username, user.standing.changed_by]
rows.append(row)
context = {'headers': headers, 'rows': rows}
return render_to_response("manage_user_standing.html", context)
@require_POST
@login_required
@ensure_csrf_cookie
def disable_account_ajax(request):
"""
Ajax call to change user standing. Endpoint of the form
in manage_user_standing.html
"""
if not request.user.is_staff:
raise Http404
username = request.POST.get('username')
context = {}
if username is None or username.strip() == '':
context['message'] = _('Please enter a username')
return JsonResponse(context, status=400)
account_action = request.POST.get('account_action')
if account_action is None:
context['message'] = _('Please choose an option')
return JsonResponse(context, status=400)
username = username.strip()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
context['message'] = _("User with username {} does not exist").format(username)
return JsonResponse(context, status=400)
else:
user_account, _success = UserStanding.objects.get_or_create(
user=user, defaults={'changed_by': request.user},
)
if account_action == 'disable':
user_account.account_status = UserStanding.ACCOUNT_DISABLED
context['message'] = _("Successfully disabled {}'s account").format(username)
log.info(u"%s disabled %s's account", request.user, username)
elif account_action == 'reenable':
user_account.account_status = UserStanding.ACCOUNT_ENABLED
context['message'] = _("Successfully reenabled {}'s account").format(username)
log.info(u"%s reenabled %s's account", request.user, username)
else:
context['message'] = _("Unexpected account status")
return JsonResponse(context, status=400)
user_account.changed_by = request.user
user_account.standing_last_changed_at = datetime.datetime.now(UTC)
user_account.save()
return JsonResponse(context)
@login_required
@ensure_csrf_cookie
def change_setting(request):
"""JSON call to change a profile setting: Right now, location"""
# TODO (vshnayder): location is no longer used
u_prof = UserProfile.objects.get(user=request.user) # request.user.profile_cache
if 'location' in request.POST:
u_prof.location = request.POST['location']
u_prof.save()
return JsonResponse({
"success": True,
"location": u_prof.location,
})
class AccountValidationError(Exception):
def __init__(self, message, field):
super(AccountValidationError, self).__init__(message)
self.field = field
@receiver(post_save, sender=User)
def user_signup_handler(sender, **kwargs): # pylint: disable=unused-argument
"""
handler that saves the user Signup Source
when the user is created
"""
if 'created' in kwargs and kwargs['created']:
site = configuration_helpers.get_value('SITE_NAME')
if site:
user_signup_source = UserSignupSource(user=kwargs['instance'], site=site)
user_signup_source.save()
log.info(u'user {} originated from a white labeled "Microsite"'.format(kwargs['instance'].id))
def _do_create_account(form, custom_form=None):
"""
Given cleaned post variables, create the User and UserProfile objects, as well as the
registration for this user.
Returns a tuple (User, UserProfile, Registration).
Note: this function is also used for creating test users.
"""
errors = {}
errors.update(form.errors)
if custom_form:
errors.update(custom_form.errors)
if errors:
raise ValidationError(errors)
user = User(
username=form.cleaned_data["username"],
email=form.cleaned_data["email"],
is_active=False
)
user.set_password(form.cleaned_data["password"])
registration = Registration()
# TODO: Rearrange so that if part of the process fails, the whole process fails.
# Right now, we can have e.g. no registration e-mail sent out and a zombie account
try:
with transaction.atomic():
user.save()
if custom_form:
custom_model = custom_form.save(commit=False)
custom_model.user = user
custom_model.save()
except IntegrityError:
# Figure out the cause of the integrity error
if len(User.objects.filter(username=user.username)) > 0:
raise AccountValidationError(
_("An account with the Public Username '{username}' already exists.").format(username=user.username),
field="username"
)
elif len(User.objects.filter(email=user.email)) > 0:
raise AccountValidationError(
_("An account with the Email '{email}' already exists.").format(email=user.email),
field="email"
)
else:
raise
# add this account creation to password history
# NOTE, this will be a NOP unless the feature has been turned on in configuration
password_history_entry = PasswordHistory()
password_history_entry.create(user)
registration.register(user)
profile_fields = [
"name", "level_of_education", "gender", "mailing_address", "city", "country", "goals",
"year_of_birth"
]
profile = UserProfile(
user=user,
**{key: form.cleaned_data.get(key) for key in profile_fields}
)
extended_profile = form.cleaned_extended_profile
if extended_profile:
profile.meta = json.dumps(extended_profile)
try:
profile.save()
except Exception: # pylint: disable=broad-except
log.exception("UserProfile creation failed for user {id}.".format(id=user.id))
raise
return (user, profile, registration)
def create_account_with_params(request, params):
"""
Given a request and a dict of parameters (which may or may not have come
from the request), create an account for the requesting user, including
creating a comments service user object and sending an activation email.
This also takes external/third-party auth into account, updates that as
necessary, and authenticates the user for the request's session.
Does not return anything.
Raises AccountValidationError if an account with the username or email
specified by params already exists, or ValidationError if any of the given
parameters is invalid for any other reason.
Issues with this code:
* It is not transactional. If there is a failure part-way, an incomplete
account will be created and left in the database.
* Third-party auth passwords are not verified. There is a comment that
they are unused, but it would be helpful to have a sanity check that
they are sane.
* It is over 300 lines long (!) and includes disprate functionality, from
registration e-mails to all sorts of other things. It should be broken
up into semantically meaningful functions.
* The user-facing text is rather unfriendly (e.g. "Username must be a
minimum of two characters long" rather than "Please use a username of
at least two characters").
"""
# Copy params so we can modify it; we can't just do dict(params) because if
# params is request.POST, that results in a dict containing lists of values
params = dict(params.items())
# allow to define custom set of required/optional/hidden fields via configuration
extra_fields = configuration_helpers.get_value(
'REGISTRATION_EXTRA_FIELDS',
getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
)
# Start: Added by Labster
# User can create an account only in the appropriate region or user
# that already enrolled in this current region.
email = params["email"]
email_enrolled = CourseEnrollmentAllowed.objects.filter(email=email).exists()
if not email_enrolled:
regions = configuration_helpers.get_value('REGIONS', settings.REGIONS)
region = get_user_region(request, regions, email, enrollment=True)
# Check if user already enrolled in other regions.
if region:
msg = _(
'It appears that you are already enrolled in a course hosted on our {region_code} server. '
'Please create an account <a href="{register_url}">here</a>.'
).format(
region_code=region['region_code'],
register_url=region['register_url'],
)
raise ValidationError({'error': [msg]})
# Restrict user registration if `ALLOW_OTHER_REGION_TO_REGISTER` is
# disable and user region is covered by Labster.
if not settings.LABSTER_FEATURES.get('ALLOW_OTHER_REGION_TO_REGISTER'):
current_region = request.session.get('country_code')
region = regions.get(current_region)
if region:
msg = _(
'Registration of users from {region_name} is not allowed on this server.'
).format(region_name=region['name'], )
raise ValidationError({'error': [msg]})
# End: Added by Labster
# Boolean of whether a 3rd party auth provider and credentials were provided in
# the API so the newly created account can link with the 3rd party account.
#
# Note: this is orthogonal to the 3rd party authentication pipeline that occurs
# when the account is created via the browser and redirect URLs.
should_link_with_social_auth = third_party_auth.is_enabled() and 'provider' in params
if should_link_with_social_auth or (third_party_auth.is_enabled() and pipeline.running(request)):
params["password"] = pipeline.make_random_password()
# if doing signup for an external authorization, then get email, password, name from the eamap
# don't use the ones from the form, since the user could have hacked those
# unless originally we didn't get a valid email or name from the external auth
# TODO: We do not check whether these values meet all necessary criteria, such as email length
do_external_auth = 'ExternalAuthMap' in request.session
if do_external_auth:
eamap = request.session['ExternalAuthMap']
try:
validate_email(eamap.external_email)
params["email"] = eamap.external_email
except ValidationError:
pass
if eamap.external_name.strip() != '':
params["name"] = eamap.external_name
params["password"] = eamap.internal_password
log.debug(u'In create_account with external_auth: user = %s, email=%s', params["name"], params["email"])
extended_profile_fields = configuration_helpers.get_value('extended_profile_fields', [])
enforce_password_policy = (
settings.FEATURES.get("ENFORCE_PASSWORD_POLICY", False) and
not do_external_auth
)
# Can't have terms of service for certain SHIB users, like at Stanford
registration_fields = getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
tos_required = (
registration_fields.get('terms_of_service') != 'hidden' or
registration_fields.get('honor_code') != 'hidden'
) and (
not settings.FEATURES.get("AUTH_USE_SHIB") or
not settings.FEATURES.get("SHIB_DISABLE_TOS") or
not do_external_auth or
not eamap.external_domain.startswith(
external_auth.views.SHIBBOLETH_DOMAIN_PREFIX
)
)
form = AccountCreationForm(
data=params,
extra_fields=extra_fields,
extended_profile_fields=extended_profile_fields,
enforce_username_neq_password=True,
enforce_password_policy=enforce_password_policy,
tos_required=tos_required,
)
custom_form = get_registration_extension_form(data=params)
# Perform operations within a transaction that are critical to account creation
with transaction.atomic():
# first, create the account
(user, profile, registration) = _do_create_account(form, custom_form)
# next, link the account with social auth, if provided via the API.
# (If the user is using the normal register page, the social auth pipeline does the linking, not this code)
if should_link_with_social_auth:
backend_name = params['provider']
request.social_strategy = social_utils.load_strategy(request)
redirect_uri = reverse('social:complete', args=(backend_name, ))
request.backend = social_utils.load_backend(request.social_strategy, backend_name, redirect_uri)
social_access_token = params.get('access_token')
if not social_access_token:
raise ValidationError({
'access_token': [
_("An access_token is required when passing value ({}) for provider.").format(
params['provider']
)
]
})
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_REGISTER_API
pipeline_user = None
error_message = ""
try:
pipeline_user = request.backend.do_auth(social_access_token, user=user)
except AuthAlreadyAssociated:
error_message = _("The provided access_token is already associated with another user.")
except (HTTPError, AuthException):
error_message = _("The provided access_token is not valid.")
if not pipeline_user or not isinstance(pipeline_user, User):
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
raise ValidationError({'access_token': [error_message]})
# Perform operations that are non-critical parts of account creation
preferences_api.set_user_preference(user, LANGUAGE_KEY, get_language())
if settings.FEATURES.get('ENABLE_DISCUSSION_EMAIL_DIGEST'):
try:
enable_notifications(user)
except Exception: # pylint: disable=broad-except
log.exception("Enable discussion notifications failed for user {id}.".format(id=user.id))
dog_stats_api.increment("common.student.account_created")
# If the user is registering via 3rd party auth, track which provider they use
third_party_provider = None
running_pipeline = None
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
third_party_provider = provider.Registry.get_from_pipeline(running_pipeline)
# Track the user's registration
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
identity_args = [
user.id, # pylint: disable=no-member
{
'email': user.email,
'username': user.username,
'name': profile.name,
# Mailchimp requires the age & yearOfBirth to be integers, we send a sane integer default if falsey.
'age': profile.age or -1,
'yearOfBirth': profile.year_of_birth or datetime.datetime.now(UTC).year,
'education': profile.level_of_education_display,
'address': profile.mailing_address,
'gender': profile.gender_display,
'country': unicode(profile.country),
}
]
if hasattr(settings, 'MAILCHIMP_NEW_USER_LIST_ID'):
identity_args.append({
"MailChimp": {
"listId": settings.MAILCHIMP_NEW_USER_LIST_ID
}
})
analytics.identify(*identity_args)
analytics.track(
user.id,
"edx.bi.user.account.registered",
{
'category': 'conversion',
'label': params.get('course_id'),
'provider': third_party_provider.name if third_party_provider else None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
# Announce registration
REGISTER_USER.send(sender=None, user=user, profile=profile)
create_comments_service_user(user)
# Don't send email if we are:
#
# 1. Doing load testing.
# 2. Random user generation for other forms of testing.
# 3. External auth bypassing activation.
# 4. Have the platform configured to not require e-mail activation.
# 5. Registering a new user using a trusted third party provider (with skip_email_verification=True)
#
# Note that this feature is only tested as a flag set one way or
# the other for *new* systems. we need to be careful about
# changing settings on a running system to make sure no users are
# left in an inconsistent state (or doing a migration if they are).
send_email = (
not settings.FEATURES.get('SKIP_EMAIL_VALIDATION', None) and
not settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING') and
not (do_external_auth and settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH')) and
not (
third_party_provider and third_party_provider.skip_email_verification and
user.email == running_pipeline['kwargs'].get('details', {}).get('email')
)
)
if send_email:
context = {
'name': profile.name,
'key': registration.activation_key,
}
# composes activation email
subject = render_to_string('emails/activation_email_subject.txt', context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
from_address = configuration_helpers.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
if settings.FEATURES.get('REROUTE_ACTIVATION_EMAIL'):
dest_addr = settings.FEATURES['REROUTE_ACTIVATION_EMAIL']
message = ("Activation for %s (%s): %s\n" % (user, user.email, profile.name) +
'-' * 80 + '\n\n' + message)
mail.send_mail(subject, message, from_address, [dest_addr], fail_silently=False)
else:
user.email_user(subject, message, from_address)
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send activation email to user from "%s"', from_address, exc_info=True)
else:
registration.activate()
_enroll_user_in_pending_courses(user) # Enroll student in any pending courses
# Immediately after a user creates an account, we log them in. They are only
# logged in until they close the browser. They can't log in again until they click
# the activation link from the email.
new_user = authenticate(username=user.username, password=params['password'])
login(request, new_user)
request.session.set_expiry(0)
_record_registration_attribution(request, new_user)
# TODO: there is no error checking here to see that the user actually logged in successfully,
# and is not yet an active user.
if new_user is not None:
AUDIT_LOG.info(u"Login success on new account creation - {0}".format(new_user.username))
if do_external_auth:
eamap.user = new_user
eamap.dtsignup = datetime.datetime.now(UTC)
eamap.save()
AUDIT_LOG.info(u"User registered with external_auth %s", new_user.username)
AUDIT_LOG.info(u'Updated ExternalAuthMap for %s to be %s', new_user.username, eamap)
if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):
log.info('bypassing activation email')
new_user.is_active = True
new_user.save()
AUDIT_LOG.info(u"Login activated on extauth account - {0} ({1})".format(new_user.username, new_user.email))
return new_user
def _enroll_user_in_pending_courses(student):
"""
Enroll student in any pending courses he/she may have.
"""
ceas = CourseEnrollmentAllowed.objects.filter(email=student.email)
for cea in ceas:
if cea.auto_enroll:
enrollment = CourseEnrollment.enroll(student, cea.course_id)
manual_enrollment_audit = ManualEnrollmentAudit.get_manual_enrollment_by_email(student.email)
if manual_enrollment_audit is not None:
# get the enrolled by user and reason from the ManualEnrollmentAudit table.
# then create a new ManualEnrollmentAudit table entry for the same email
# different transition state.
ManualEnrollmentAudit.create_manual_enrollment_audit(
manual_enrollment_audit.enrolled_by, student.email, ALLOWEDTOENROLL_TO_ENROLLED,
manual_enrollment_audit.reason, enrollment
)
def _record_registration_attribution(request, user):
"""
Attribute this user's registration to the referring affiliate, if
applicable.
"""
affiliate_id = request.COOKIES.get(settings.AFFILIATE_COOKIE_NAME)
if user is not None and affiliate_id is not None:
UserAttribute.set_user_attribute(user, REGISTRATION_AFFILIATE_ID, affiliate_id)
@csrf_exempt
def create_account(request, post_override=None):
"""
JSON call to create new edX account.
Used by form in signup_modal.html, which is included into navigation.html
"""
warnings.warn("Please use RegistrationView instead.", DeprecationWarning)
try:
user = create_account_with_params(request, post_override or request.POST)
except AccountValidationError as exc:
return JsonResponse({'success': False, 'value': exc.message, 'field': exc.field}, status=400)
except ValidationError as exc:
field, error_list = next(exc.message_dict.iteritems())
return JsonResponse(
{
"success": False,
"field": field,
"value": error_list[0],
},
status=400
)
redirect_url = None # The AJAX method calling should know the default destination upon success
# Resume the third-party-auth pipeline if necessary.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
redirect_url = pipeline.get_complete_url(running_pipeline['backend'])
response = JsonResponse({
'success': True,
'redirect_url': redirect_url,
})
set_logged_in_cookies(request, response, user)
return response
def auto_auth(request):
"""
Create or configure a user account, then log in as that user.
Enabled only when
settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] is true.
Accepts the following querystring parameters:
* `username`, `email`, and `password` for the user account
* `full_name` for the user profile (the user's full name; defaults to the username)
* `staff`: Set to "true" to make the user global staff.
* `course_id`: Enroll the student in the course with `course_id`
* `roles`: Comma-separated list of roles to grant the student in the course with `course_id`
* `no_login`: Define this to create the user but not login
* `redirect`: Set to "true" will redirect to the `redirect_to` value if set, or
course home page if course_id is defined, otherwise it will redirect to dashboard
* `redirect_to`: will redirect to to this url
If username, email, or password are not provided, use
randomly generated credentials.
"""
# Generate a unique name to use if none provided
unique_name = uuid.uuid4().hex[0:30]
# Use the params from the request, otherwise use these defaults
username = request.GET.get('username', unique_name)
password = request.GET.get('password', unique_name)
email = request.GET.get('email', unique_name + "@example.com")
full_name = request.GET.get('full_name', username)
is_staff = request.GET.get('staff', None)
is_superuser = request.GET.get('superuser', None)
course_id = request.GET.get('course_id', None)
ccx_id = request.GET.get('ccx_id', None)
redirect_to = request.GET.get('redirect_to', None)
# mode has to be one of 'honor'/'professional'/'verified'/'audit'/'no-id-professional'/'credit'
enrollment_mode = request.GET.get('enrollment_mode', 'honor')
course_key = None
if course_id:
course_key = CourseLocator.from_string(course_id)
role_names = [v.strip() for v in request.GET.get('roles', '').split(',') if v.strip()]
redirect_when_done = request.GET.get('redirect', '').lower() == 'true' or redirect_to
login_when_done = 'no_login' not in request.GET
ccx_key = None
if ccx_id:
ccx_key = CCXLocator.from_course_locator(course_key, ccx_id)
form = AccountCreationForm(
data={
'username': username,
'email': email,
'password': password,
'name': full_name,
},
tos_required=False
)
# Attempt to create the account.
# If successful, this will return a tuple containing
# the new user object.
try:
user, profile, reg = _do_create_account(form)
except (AccountValidationError, ValidationError):
# Attempt to retrieve the existing user.
user = User.objects.get(username=username)
user.email = email
user.set_password(password)
user.save()
profile = UserProfile.objects.get(user=user)
reg = Registration.objects.get(user=user)
# Set the user's global staff bit
if is_staff is not None:
user.is_staff = (is_staff == "true")
user.save()
if is_superuser is not None:
user.is_superuser = (is_superuser == "true")
user.save()
# Activate the user
reg.activate()
reg.save()
# ensure parental consent threshold is met
year = datetime.date.today().year
age_limit = settings.PARENTAL_CONSENT_AGE_LIMIT
profile.year_of_birth = (year - age_limit) - 1
profile.save()
# Enroll the user in a course
if course_key is not None:
CourseEnrollment.enroll(user, course_key, mode=enrollment_mode)
# Enroll the user in a ccx
if ccx_key is not None:
CourseEnrollment.enroll(user, ccx_key)
# Apply the roles
for role_name in role_names:
role = Role.objects.get(name=role_name, course_id=course_key)
user.roles.add(role)
# Log in as the user
if login_when_done:
user = authenticate(username=username, password=password)
login(request, user)
create_comments_service_user(user)
# Provide the user with a valid CSRF token
# then return a 200 response unless redirect is true
if redirect_when_done:
# Redirect to specific page if specified
if redirect_to:
redirect_url = redirect_to
# Redirect to course info page if course_id is known
elif course_id:
try:
# redirect to course info page in LMS
redirect_url = reverse(
'info',
kwargs={'course_id': course_id}
)
except NoReverseMatch:
# redirect to course outline page in Studio
redirect_url = reverse(
'course_handler',
kwargs={'course_key_string': course_id}
)
else:
try:
# redirect to dashboard for LMS
redirect_url = reverse('dashboard')
except NoReverseMatch:
# redirect to home for Studio
redirect_url = reverse('home')
return redirect(redirect_url)
elif request.META.get('HTTP_ACCEPT') == 'application/json':
response = JsonResponse({
'created_status': u"Logged in" if login_when_done else "Created",
'username': username,
'email': email,
'password': password,
'user_id': user.id, # pylint: disable=no-member
'anonymous_id': anonymous_id_for_user(user, None),
})
else:
success_msg = u"{} user {} ({}) with password {} and user_id {}".format(
u"Logged in" if login_when_done else "Created",
username, email, password, user.id # pylint: disable=no-member
)
response = HttpResponse(success_msg)
response.set_cookie('csrftoken', csrf(request)['csrf_token'])
return response
@ensure_csrf_cookie
def activate_account(request, key):
"""When link in activation e-mail is clicked"""
regs = Registration.objects.filter(activation_key=key)
if len(regs) == 1:
user_logged_in = request.user.is_authenticated()
already_active = True
if not regs[0].user.is_active:
regs[0].activate()
already_active = False
# Enroll student in any pending courses he/she may have if auto_enroll flag is set
_enroll_user_in_pending_courses(regs[0].user)
resp = render_to_response(
"registration/activation_complete.html",
{
'user_logged_in': user_logged_in,
'already_active': already_active
}
)
return resp
if len(regs) == 0:
return render_to_response(
"registration/activation_invalid.html",
{'csrf': csrf(request)['csrf_token']}
)
return HttpResponseServerError(_("Unknown error. Please e-mail us to let us know how it happened."))
@csrf_exempt
@require_POST
def password_reset(request):
""" Attempts to send a password reset e-mail. """
# Add some rate limiting here by re-using the RateLimitMixin as a helper class
limiter = BadRequestRateLimiter()
if limiter.is_rate_limit_exceeded(request):
AUDIT_LOG.warning("Rate limit exceeded in password_reset")
return HttpResponseForbidden()
form = PasswordResetFormNoActive(request.POST)
if form.is_valid():
form.save(use_https=request.is_secure(),
from_email=configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL),
request=request)
# When password change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the password is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "password",
"old": None,
"new": None,
"user_id": request.user.id,
}
)
else:
# bad user? tick the rate limiter counter
AUDIT_LOG.info("Bad password_reset user passed in.")
limiter.tick_bad_request_counter(request)
return JsonResponse({
'success': True,
'value': render_to_string('registration/password_reset_done.html', {}),
})
def uidb36_to_uidb64(uidb36):
"""
Needed to support old password reset URLs that use base36-encoded user IDs
https://github.com/django/django/commit/1184d077893ff1bc947e45b00a4d565f3df81776#diff-c571286052438b2e3190f8db8331a92bR231
Args:
uidb36: base36-encoded user ID
Returns: base64-encoded user ID. Otherwise returns a dummy, invalid ID
"""
try:
uidb64 = force_text(urlsafe_base64_encode(force_bytes(base36_to_int(uidb36))))
except ValueError:
uidb64 = '1' # dummy invalid ID (incorrect padding for base64)
return uidb64
def validate_password(user, password):
"""
Tie in password policy enforcement as an optional level of
security protection
Args:
user: the user object whose password we're checking.
password: the user's proposed new password.
Returns:
is_valid_password: a boolean indicating if the new password
passes the validation.
err_msg: an error message if there's a violation of one of the password
checks. Otherwise, `None`.
"""
err_msg = None
# check the password reuse policy
if not PasswordHistory.is_allowable_password_reuse(user, password):
if user.is_staff:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE']
else:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE']
# Because of how ngettext is, splitting the following into shorter lines would be ugly.
# pylint: disable=line-too-long
err_msg = ungettext(
"You are re-using a password that you have used recently. You must have {num} distinct password before reusing a previous password.",
"You are re-using a password that you have used recently. You must have {num} distinct passwords before reusing a previous password.",
num_distinct
).format(num=num_distinct)
# also, check to see if passwords are getting reset too frequent
if PasswordHistory.is_password_reset_too_soon(user):
num_days = settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS']
# Because of how ngettext is, splitting the following into shorter lines would be ugly.
# pylint: disable=line-too-long
err_msg = ungettext(
"You are resetting passwords too frequently. Due to security policies, {num} day must elapse between password resets.",
"You are resetting passwords too frequently. Due to security policies, {num} days must elapse between password resets.",
num_days
).format(num=num_days)
is_password_valid = err_msg is None
return is_password_valid, err_msg
def password_reset_confirm_wrapper(request, uidb36=None, token=None):
"""
A wrapper around django.contrib.auth.views.password_reset_confirm.
Needed because we want to set the user as active at this step.
We also optionally do some additional password policy checks.
"""
# convert old-style base36-encoded user id to base64
uidb64 = uidb36_to_uidb64(uidb36)
extra_context = {
"platform_name": configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
}
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
except (ValueError, User.DoesNotExist):
# if there's any error getting a user, just let django's
# password_reset_confirm function handle it.
return password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=extra_context
)
if request.method == 'POST':
password = request.POST['new_password1']
is_password_valid, password_err_msg = validate_password(user, password)
if not is_password_valid:
# We have a password reset attempt which violates some security
# policy. Use the existing Django template to communicate that
# back to the user.
context = {
'validlink': False,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': password_err_msg,
}
context.update(extra_context)
return TemplateResponse(
request, 'registration/password_reset_confirm.html', context
)
# Move this validation from `validate_password` so the error message will show in the view with form exist.
if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False):
try:
validate_password_strength(password)
except ValidationError as err:
extra_context['err_msg'] = _('Password: ') + '; '.join(err.messages)
# remember what the old password hash is before we call down
old_password_hash = user.password
response = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=extra_context
)
# get the updated user
updated_user = User.objects.get(id=uid_int)
# did the password hash change, if so record it in the PasswordHistory
if updated_user.password != old_password_hash:
entry = PasswordHistory()
entry.create(updated_user)
else:
response = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=extra_context
)
response_was_successful = response.context_data.get('validlink')
if response_was_successful and not user.is_active:
user.is_active = True
user.save()
return response
def reactivation_email_for_user(user):
try:
reg = Registration.objects.get(user=user)
except Registration.DoesNotExist:
return JsonResponse({
"success": False,
"error": _('No inactive user with this e-mail exists'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
context = {
'name': user.profile.name,
'key': reg.activation_key,
}
subject = render_to_string('emails/activation_email_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
try:
user.email_user(subject, message, configuration_helpers.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL,
))
except Exception: # pylint: disable=broad-except
log.error(
u'Unable to send reactivation email from "%s"',
configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL),
exc_info=True
)
return JsonResponse({
"success": False,
"error": _('Unable to send reactivation email')
}) # TODO: this should be status code 500 # pylint: disable=fixme
return JsonResponse({"success": True})
def validate_new_email(user, new_email):
"""
Given a new email for a user, does some basic verification of the new address If any issues are encountered
with verification a ValueError will be thrown.
"""
try:
validate_email(new_email)
except ValidationError:
raise ValueError(_('Valid e-mail address required.'))
if new_email == user.email:
raise ValueError(_('Old email is the same as the new email.'))
if User.objects.filter(email=new_email).count() != 0:
raise ValueError(_('An account with this e-mail already exists.'))
def do_email_change_request(user, new_email, activation_key=None):
"""
Given a new email for a user, does some basic verification of the new address and sends an activation message
to the new address. If any issues are encountered with verification or sending the message, a ValueError will
be thrown.
"""
pec_list = PendingEmailChange.objects.filter(user=user)
if len(pec_list) == 0:
pec = PendingEmailChange()
pec.user = user
else:
pec = pec_list[0]
# if activation_key is not passing as an argument, generate a random key
if not activation_key:
activation_key = uuid.uuid4().hex
pec.new_email = new_email
pec.activation_key = activation_key
pec.save()
context = {
'key': pec.activation_key,
'old_email': user.email,
'new_email': pec.new_email
}
subject = render_to_string('emails/email_change_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/email_change.txt', context)
from_address = configuration_helpers.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
mail.send_mail(subject, message, from_address, [pec.new_email])
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send email activation link to user from "%s"', from_address, exc_info=True)
raise ValueError(_('Unable to send email activation link. Please try again later.'))
# When the email address change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the email address is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "email",
"old": context['old_email'],
"new": context['new_email'],
"user_id": user.id,
}
)
@ensure_csrf_cookie
def confirm_email_change(request, key): # pylint: disable=unused-argument
"""
User requested a new e-mail. This is called when the activation
link is clicked. We confirm with the old e-mail, and update
"""
with transaction.atomic():
try:
pec = PendingEmailChange.objects.get(activation_key=key)
except PendingEmailChange.DoesNotExist:
response = render_to_response("invalid_email_key.html", {})
transaction.set_rollback(True)
return response
user = pec.user
address_context = {
'old_email': user.email,
'new_email': pec.new_email
}
if len(User.objects.filter(email=pec.new_email)) != 0:
response = render_to_response("email_exists.html", {})
transaction.set_rollback(True)
return response
subject = render_to_string('emails/email_change_subject.txt', address_context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/confirm_email_change.txt', address_context)
u_prof = UserProfile.objects.get(user=user)
meta = u_prof.get_meta()
if 'old_emails' not in meta:
meta['old_emails'] = []
meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()])
u_prof.set_meta(meta)
u_prof.save()
# Send it to the old email...
try:
user.email_user(
subject,
message,
configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to old address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': user.email})
transaction.set_rollback(True)
return response
user.email = pec.new_email
user.save()
pec.delete()
# And send it to the new email...
try:
user.email_user(
subject,
message,
configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to new address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': pec.new_email})
transaction.set_rollback(True)
return response
response = render_to_response("email_change_successful.html", address_context)
return response
@require_POST
@login_required
@ensure_csrf_cookie
def change_email_settings(request):
"""Modify logged-in user's setting for receiving emails from a course."""
user = request.user
course_id = request.POST.get("course_id")
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
receive_emails = request.POST.get("receive_emails")
if receive_emails:
optout_object = Optout.objects.filter(user=user, course_id=course_key)
if optout_object:
optout_object.delete()
log.info(
u"User %s (%s) opted in to receive emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "yes", "course": course_id},
page='dashboard',
)
else:
Optout.objects.get_or_create(user=user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "no", "course": course_id},
page='dashboard',
)
return JsonResponse({"success": True})
def _get_course_programs(user, user_enrolled_courses): # pylint: disable=invalid-name
"""Build a dictionary of program data required for display on the student dashboard.
Given a user and an iterable of course keys, find all programs relevant to the
user and return them in a dictionary keyed by course key.
Arguments:
user (User): The user to authenticate as when requesting programs.
user_enrolled_courses (list): List of course keys representing the courses in which
the given user has active enrollments.
Returns:
dict, containing programs keyed by course.
"""
course_programs = get_programs_for_dashboard(user, user_enrolled_courses)
programs_data = {}
for course_key, programs in course_programs.viewitems():
for program in programs:
if program.get('status') == 'active' and program.get('category') == 'xseries':
try:
programs_for_course = programs_data.setdefault(course_key, {})
programs_for_course.setdefault('course_program_list', []).append({
'course_count': len(program['course_codes']),
'display_name': program['name'],
'program_id': program['id'],
'program_marketing_url': urljoin(
settings.MKTG_URLS.get('ROOT'),
'xseries' + '/{}'
).format(program['marketing_slug'])
})
programs_for_course['category'] = program.get('category')
programs_for_course['display_category'] = get_display_category(program)
except KeyError:
log.warning('Program structure is invalid, skipping display: %r', program)
return programs_data
class LogoutView(TemplateView):
"""
Logs out user and redirects.
The template should load iframes to log the user out of OpenID Connect services.
See http://openid.net/specs/openid-connect-logout-1_0.html.
"""
oauth_client_ids = []
template_name = 'logout.html'
# Keep track of the page to which the user should ultimately be redirected.
target = reverse_lazy('cas-logout') if settings.FEATURES.get('AUTH_USE_CAS') else '/'
def dispatch(self, request, *args, **kwargs): # pylint: disable=missing-docstring
# We do not log here, because we have a handler registered to perform logging on successful logouts.
request.is_from_logout = True
# Get the list of authorized clients before we clear the session.
self.oauth_client_ids = request.session.get(edx_oauth2_provider.constants.AUTHORIZED_CLIENTS_SESSION_KEY, [])
logout(request)
# If we don't need to deal with OIDC logouts, just redirect the user.
if LogoutViewConfiguration.current().enabled and self.oauth_client_ids:
response = super(LogoutView, self).dispatch(request, *args, **kwargs)
else:
response = redirect(self.target)
# Clear the cookie used by the edx.org marketing site
delete_logged_in_cookies(response)
return response
def _build_logout_url(self, url):
"""
Builds a logout URL with the `no_redirect` query string parameter.
Args:
url (str): IDA logout URL
Returns:
str
"""
scheme, netloc, path, query_string, fragment = urlsplit(url)
query_params = parse_qs(query_string)
query_params['no_redirect'] = 1
new_query_string = urlencode(query_params, doseq=True)
return urlunsplit((scheme, netloc, path, new_query_string, fragment))
def get_context_data(self, **kwargs):
context = super(LogoutView, self).get_context_data(**kwargs)
# Create a list of URIs that must be called to log the user out of all of the IDAs.
uris = Client.objects.filter(client_id__in=self.oauth_client_ids,
logout_uri__isnull=False).values_list('logout_uri', flat=True)
referrer = self.request.META.get('HTTP_REFERER', '').strip('/')
logout_uris = []
for uri in uris:
if not referrer or (referrer and not uri.startswith(referrer)):
logout_uris.append(self._build_logout_url(uri))
context.update({
'target': self.target,
'logout_uris': logout_uris,
})
return context
|
Livit/Livit.Learn.EdX
|
common/djangoapps/student/views.py
|
Python
|
agpl-3.0
| 109,663
|
[
"VisIt"
] |
9e878bad9e84cf504cd9c601736d778215cc75b0bc4f81e11815c4da42d325fc
|
"""
Subpackage ``LLSG`` contains the first version of our Local Low-rank plus Sparse
plus Gaussian-noise decomposition (Gomez Gonzalez et al. 2016) for ADI data.
"""
from __future__ import absolute_import
from .llsg import *
from .thresholding import *
|
henry-ngo/VIP
|
vip_hci/llsg/__init__.py
|
Python
|
mit
| 253
|
[
"Gaussian"
] |
d7f1b3caf0042771708f047d2eef3122cdc10733a03e619df907346f3ca9cf5f
|
########################################################################
# File : ResourcesDefaults.py
# Author : Ricardo Graciani
########################################################################
"""
Some Helper class to access Default options for Different Resources (CEs, SEs, Catalags,...)
"""
from __future__ import print_function
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgResourceSection, cfgPath, cfgInstallPath, cfgPathToList
from DIRAC.Core.Utilities.CFG import CFG
__RCSID__ = "$Id$"
def defaultSection(resource):
"""
Build the path for the Defaults section
"""
return cfgPath(cfgResourceSection, 'Defaults', resource)
def getComputingElementDefaults(ceName='', ceType='', cfg=None, currentSectionPath=''):
"""
Return cfgDefaults with defaults for the given CEs defined either in arguments or in the provided cfg
"""
cesCfg = CFG()
if cfg:
try:
cesCfg.loadFromFile(cfg)
cesPath = cfgInstallPath('ComputingElements')
if cesCfg.isSection(cesPath):
for section in cfgPathToList(cesPath):
cesCfg = cesCfg[section]
except BaseException:
return CFG()
# Overwrite the cfg with Command line arguments
if ceName:
if not cesCfg.isSection(ceName):
cesCfg.createNewSection(ceName)
if currentSectionPath:
# Add Options from Command Line
optionsDict = __getExtraOptions(currentSectionPath)
for name, value in optionsDict.items():
cesCfg[ceName].setOption(name, value) # pylint: disable=no-member
if ceType:
cesCfg[ceName].setOption('CEType', ceType) # pylint: disable=no-member
ceDefaultSection = cfgPath(defaultSection('ComputingElements'))
# Load Default for the given type from Central configuration is defined
ceDefaults = __gConfigDefaults(ceDefaultSection)
for ceName in cesCfg.listSections():
if 'CEType' in cesCfg[ceName]:
ceType = cesCfg[ceName]['CEType']
if ceType in ceDefaults:
for option in ceDefaults[ceType].listOptions(): # pylint: disable=no-member
if option not in cesCfg[ceName]:
cesCfg[ceName].setOption(option, ceDefaults[ceType][option]) # pylint: disable=unsubscriptable-object
return cesCfg
def __gConfigDefaults(defaultPath):
"""
Build a cfg from a Default Section
"""
from DIRAC import gConfig
cfgDefaults = CFG()
result = gConfig.getSections(defaultPath)
if not result['OK']:
return cfgDefaults
for name in result['Value']:
typePath = cfgPath(defaultPath, name)
cfgDefaults.createNewSection(name)
result = gConfig.getOptionsDict(typePath)
if result['OK']:
optionsDict = result['Value']
for option, value in optionsDict.items():
cfgDefaults[name].setOption(option, value)
return cfgDefaults
def __getExtraOptions(currentSectionPath):
from DIRAC import gConfig
optionsDict = {}
if not currentSectionPath:
return optionsDict
result = gConfig.getOptionsDict(currentSectionPath)
if not result['OK']:
return optionsDict
print(result)
return result['Value']
|
chaen/DIRAC
|
ConfigurationSystem/Client/Helpers/ResourcesDefaults.py
|
Python
|
gpl-3.0
| 3,078
|
[
"DIRAC"
] |
608917cfc5d1e05a87eb666129e1bcd404e6895719d1c5c0f135980825d1229e
|
#encoding:utf-8
""" Abstract classes to build Coord, Gr, Ax, AxGr and Field class.
Most classes inherit from the abstract base classes Named, Associative, Directional, Membered, Valued contained in abstract.py.
The abstract module contains the following classes:
Named
-----
Base class for most other sg classes, representing objects with copy and same methods.
Associative
-----------
Associative class that objects with the equiv method can belong to. Two objects will be equivalent if they belong to the same associative class.
Directional
-----------
Base class for derived Coord and Ax classes, representing "direction" (e.g. "latitude" or "depth"). An abstract equivalence relationship is defined among Directional objects where two objects are equivalent when they have the same 'associative' attribute (pointing to an Associative object). This relationship is generally used to indicate whether two Directional objects have the same direction (e.g. X,Y), but could represent other relationships depending on the user.
Membered
--------
Base class for classes containing members such as a grid (Gr) object containing coordinate (Coord) members, or an AxGr object containing Ax objects, e.g. (X, Y).
Valued
------
Base class for classes that contain a ndarray value attribute. The Field class is derived from this.
"""
import numpy as np
import inspect
import copy
import warnings
from utilsg import *
from _config import *
from decorators import check_equiv, method2members, att2members
warnings.formatwarning = warning_on_one_line
# ----- most general classes ------------
class Named(object):
"""
Base class for most other sg classes, representing objects with copy and same methods.
The "same" method indicates when Named objects are "the same", namely when their "name" attribute is the same. This method coincides with the "weaksame" method. "Weaksame" is generally a weaker condition in the derived classes. The "same" method allows the implementation of the "samein" and "sameindex" methods at this abstract level, with generally the "same" method overriden in derived classes.
This class provides a copy method that is used by the derived classes.
Attributes:
name: (str) name of Object
"""
def __init__(self,name='scalar' ,long_name= ''):
"""
Initialisation of Name object.
Args:
name: (str) name of Object
long_name: (str) longer description (e.g. for display)
Returns:
Named object
"""
self.name = name
self.long_name = long_name
def __repr__(self):
return self.name
def copy(self, *args, **kwargs):
"""
Copy method for Named. See __init__ for arguments.
Most child classes should inherit this method.
Returns:
a copy of the Directional object.
Copy methods in sg work as follows: when no value is selected for an argument, a copy of the self attribute will be used. Otherwise, the **kwargs argument value will be used.
"""
# keys to exclude:
forbid = ['self','frame']
# keys value dict to examine:
allow = {key:self.__dict__[key] for key in inspect.getargspec(self.__init__)[0] if key not in forbid }
# new kwargs to use in new object .__init__ construction
new_kwargs = copy.deepcopy({})
for key in allow:
if key in kwargs:
# if given in kwargs, override
new_kwargs[key] = kwargs[key]
else:
# otherwise use value in self attribute
new_kwargs[key] = self.__dict__[key]
new_kwargs = self._copy_cleanup(**new_kwargs)
# initialize new object:
result = self.__class__(**new_kwargs)
return result
def _copy_cleanup(self, **new_kwargs):
"""
Override this method for specific work to be done before new object init in copy method (e.g. duals in Coord).
"""
return new_kwargs
def __and__(self,other):
"""Shorthand for weaksame method. See weaksame.
"""
return self.weaksame(other = other)
def weaksame(self,other):
"""
Tests if two Directional objects have the same name.
Weak test to see if two Directional objects are similar.
Args:
other: other Directional object to compare self with
Returns:
True/ False
**See also**
same method
samein method
sameindex method
"""
if (self.name == other.name):
return True
else:
return False
def same(self,other):
"""Method to check whether this Named object has identical main attributes to argument other.
Placeholder identical to weaksame: to be overriden in child classes.
Args:
other: (Named) to check against
Returns:
True/ False
Attributes checked:
name: via str ==
See also:
samein method
same_index method
"""
return self.weaksame(other)
def samein(self,L):
"""Tests whether this Directional is the same as any element in list L, under 'same' method.
Uses: same method.
Args:
L: (list of Directional objects) to test against
returns:
True/ False
See also:
same method
same_index method
"""
return reduce(lambda x,y:x or y, [self.same(it) for it in L] )
def sameindex(self,L):
"""Find index of this Directional in list L of Directional objects, under 'same' method.
Uses: same method.
Args:
L: (list of directional objects) to search
returns:
None or Integer, the index of the first item it in the list that satisfies self.same(it)
See also:
same method
samein method
"""
for i,it in enumerate(L):
if self.same(it):
return i
return None
def json(self, types_allow = []):
"""convert self to a json friendly object.
Usage: json.dumps(X.json())
"""
# class encoding doesn't work yet:
return_dict = {'class':str( type(self) )}
for k in self.__dict__:
ob_type = type(self.__dict__[k])
if ob_type in types_allow:
# this is the nested case on which to call the method recursively
return_dict[k] = self.__dict__[k].json(types_allow= [t for t in types_allow if t != ob_type ] )
else:
if isinstance(self.__dict__[k] , np.ndarray ):
return_dict[k] = self.__dict__[k].tolist()
else:
return_dict[k] = self.__dict__[k].__repr__()
return return_dict
class Associative(Named):
"""
Associative class that objects with the equiv method can belong to.
Two objects will be equivalent if they belong to the same associative class.
For Coord objects, this should remain consistent with the axis attribute: two Coord objects belong to the same associative class iff they have the same axis attribute. In this case, the associative class of Coord objects is effectively their direction or axis. The mechanisms for the two remain independent.
For classes using Associative as equivalence principle:
Their copy method should carry over the Associative object of the parent.
Their make_equiv method should make the associate the Associative object of the argument equal to the Associative object of the calling object.
Their __init__ method should create a new Associative class as default behaviour, and assign an argument Associative class if given.
"""
def __init__(self,name):
self.name = name
self.associative = self
class Directional(Named):
"""
Base class for derived Coord and Ax classes, representing "direction" (e.g. "latitude" or "depth").
An abstract equivalence relationship is defined among Directional objects where two objects are equivalent when they have the same 'associative' attribute (pointing to an Associative object). This relationship is generally used to indicate whether two Directional objects have the same direction (e.g. X,Y), but could represent other relationships depending on the user.
The same method is differentiated from the weaksame method (unlike the parent class), with the more strict additional condition that in addition to "name", the "direction" attribute also needs to be the same. therefore, two Directional objects are considered "same" when both "name" and "direction" match. They are "weaksame" when only the "name" matches.
This base class is closely related to the Ax class. The Coord class is also derived from it.
Attributes:
name: (str) name of Object
direction: (str) name of direction in which object points
long_name: (str) longer description (e.g. for display or in Netcdf)
"""
def __repr__(self):
"""Display alias if attribute present, name otherwise.
"""
if hasattr(self,'alias'):
return self.alias
else:
return self.name
def __init__(self,name='scalar',direction ='scalar',long_name= '', associative = None ):
"""
Initialisation of Directional object.
Args:
name: (str) name of Object
direction: (str) name of direction in which object points
long_name: (str) longer description (e.g. for display or in Netcdf)
associative: (Directional or Associative) object that this new object is equivalent to, or its Associative
Returns:
Directional object
Raises:
ValueError if associative has no associative attribute
"""
# choosing the name ID creates an identity object. ID*b = b for all Coord elements b.
# could implement the identity Field in __call__
# Metric could be a class. Objects of this class could be constructed by a method of the Coord class (Coord objects then spawn metric objects).
self.equivs = [self]
self.name = name
self.direction = direction
self.long_name = long_name
if associative is None:
self.associative = Associative(self.name+'_assoc')
else:
if hasattr(associative, 'associative'):
self.associative = associative.associative
else:
raise ValueError('provide object with associative attribute for associative.')
def __neg__(self):
"""
To be overriden for now. Could introduce +-1 here.
"""
pass
def same(self,other):
"""Method to check whether this Directional object has identical main attributes to argument other.
Overrides Name class same method.
Args:
other: (Directional) to check against
Returns:
True/ False
Attributes checked:
name: via str ==
direction: via str ==
See also:
samein method
same_index method
"""
return (self.name == other.name) and (self.direction == other.direction)
# ----- equivalence related ---------
def make_equiv(self,other):
"""
Register equivalence of two Directional objects.
Args:
other: (Directional)
Returns:
None
See also:
is_equiv
eq_index
eq_in
Examples:
>>> depth.is_equiv(longitude) # generally different directions.
False
>>> depth.make_equiv(longitude) # don't do this in real work
>>> depth.is_equiv(longitude) # uphysically:
True
"""
other.associative = self.associative
return
def is_equiv(self,other, checks = False):
"""
Test for equivalence (under make_equiv) between Directional objects. e.g. xt is equivalent to xu
Args:
other: (Coord or Ax)
Returns:
True when equivalent, False otherwise.
Examples:
>>> depth.is_equiv(longitude) # generally different directions.
False
See also:
make_equiv
eq_index
eq_in
"""
# if (other in self.equivs) | (self in other.equivs):
if self.associative is other.associative:
return True
else:
# Warnings helpful for debugging and spotting potential problems:
if checks is True:
try:
if ( self.same(other) ):
warnings.warn('Warning (severe): %s.is_equiv(%s) is False, but %s.same(%s) is True! ' % (self,other,self,other) )
elif ( self.weaksame(other) ):
warnings.warn('Warning (severe): %s.is_equiv(%s) is False, but %s.weaksame(%s) is True! ' % (self,other,self,other) )
except:
warnings.warn('Consistency check failed.')
# then go about normal business:
return False
def eq_index(self, collection ):
""" Find index of first element in collection equivalent to self under is_equiv.
Args:
collection: (e.g. List) order collection of Directional objects
Returns:
Integer if equivalent found (index to equiv element), None otherwise.
See also:
make_equiv
is_equiv
eq_in
"""
for i, e in enumerate(collection):
if self.is_equiv(e):
return i
return
def eq_in(self, collection):
""" Determines whether Coord (self) is equivalent to any of the constituent Coord objects of the argument Gr or GrAx, and returns equivalent object.
Uses: eq_index
Args:
collection: (Gr or AxGr) object to be checked.
Returns:
The equivalent object when crd is equivalent to one of the Coord objects in argument, None otherwise.
See also:
eq_in method of Ax, GrAx
is_equiv
make_equiv
eq_index
eq_in
"""
i = self.eq_index(collection)
if i is not None:
return collection[i ]
else:
return
# Belongs to Directional
def __or__(self,other):
"""
Shorthand calling make_equiv (to register equivalence with other Directional object).
Args:
other: (Directional)
Returns:
None
"""
self.make_equiv(other)
def __xor__(self,other):
"""
Shorthand calling is_equiv (to test equivalence with other Directional object).
Args:
other: (Directional)
Returns:
None
"""
return self.is_equiv(other)
# ----- multiplication related ---------
def __pow__(self,n):
"""
Repeated multiplication of object with itself.
"""
return reduce(lambda x,y: x*y, n*[self])
class Membered(Named):
"""
Base class for classes containing members such as a grid (Gr) object containing coordinate (Coord) members, or an AxGr object containing Ax objects, e.g. (X, Y).
This class is intended for multiple inheritance with classes that provide container functionality. For example Tuple. The elements inside the tuples are then referred to as "members", hence the name of this class. Methods relate to general operations with these members. Note that more specific member-related methods are relegated to derived classes.
__init__ method of joint-inheritance class must take a container of elements.
"""
def same(self,other):
"""
Member-wise same comparison.
"""
if len(self) == len(other):
for i,c in enumerate(self):
if not(c.same(other[i]) ):
return False
return True
else:
return False
def weaksame(self,other):
"""
Member-wise weaksame comparison.
"""
if len(self) == len(other):
for i,c in enumerate(self):
if not(c.weaksame(other[i]) ):
return False
return True
else:
return False
def call_on_members(self, method, *args, **kwargs):
"""
Call method on all members and construct new Membered object.
"""
return self.__class__( [ getattr(member, method)(*args, **kwargs) for member in self ] )
def get_from_members(self, att_name):
"""
Call method on all members and construct new Membered object.
"""
return self.__class__( [ getattr(member, att_name) for member in self ] )
def __and__(self,other):
"""
Shorthand to member-wise weaksame comparison.
"""
return self.weaksame(other)
@method2members
def __neg__(self):
pass
# """
# Call __neg__ on members and return corresponding Membered object.
# """
# return self.__class__( [-member for member in self] )
def reverse(self):
"""
Reverse the order of the members.
Examples:
>>> coord1 = sg.fieldcls.Coord(name = 'test1',direction ='X',value =np.array([1.,2.,3.]) )
>>> coord2 = sg.fieldcls.Coord(name = 'test2',direction ='Y',value =np.array([1.,2.,3.,4.]) )
>>> (coord1*coord2).reverse()
(test2, test1)
"""
return self.__class__([ self[len(self) -i -1 ] for i in range(len(self)) ])
@method2members
def copy(self,*args,**kwargs):
"""
Member-wise copy method.
"""
pass
def strict_equiv(self, other):
"""
Tests whether two Membered objects have equivalent Members at each position.
This is a stricter test than Membered equivalence testing via gr1.is_equiv(gr2), which only tests whether both Membered objects describe the same linear space (elements equivalent up to a permutation).
"""
if len(self) == len(other):
RO = True
for i,it in enumerate(self):
RO *= (it.is_equiv(other[i] ) )
return bool(RO)
else:
return False
def __xor__(self, other):
"""
Shorthand for is_equiv.
"""
if len(self) == len(other):
return self.is_equiv(other)
def is_equiv(self, other):
"""
Checks member-wise equivalence between Membered objects up to a permutation.
For grids, objects are equivalent if they define the same physical subspace, based on the equivalence definition for Coord classes. In other words, checks whether the individual Coord elements of the two grid (Gr object) arguments are equivalent up to a permutation. A stricter version of this test is strict_equiv, which allows no permutation.
Args:
other: (Membered) the object to compare with
Returns:
True if all (self) members are equivalent to a member of other and vice versa and both have equal length. False otherwise
"""
if len(self) == len(other):
if self.eq_perm(other):
return True
return False
else:
return False
def eq_in(self, member):
""" Determines whether argument is equivalent to any of the constituent members.
Args:
member: (Membered) object to be checked.
Returns:
True when member is equivalent to one of the member objects, False otherwise.
See also:
eq_in method of Coord
"""
for i in self:
if member.is_equiv(i): return True
return False
def eq_index(self,member):
"""
Returns index of argument in members.
"""
for i,v in enumerate(self):
if member.is_equiv(v): return i
return -1
def rearrange(self,permutation):
"""
Rearranges the order of the members of this object via permutation arrgument.
Args:
permutation: (List or Tuple) permutation to rearrange by
Returns:
object of same type as self with member rearranged.
Examples:
>>> g1 = latitude*depth
>>> g1.rearrange( (1,0) )
(depth, latitude)
See also Gr.perm method
"""
return self.__class__((self[i] for i in permutation))
def perm(self, other,verbose = False):
"""
yields permutation of axes going from self to other.
E.g. for grids gr1 and gr2, g2 = g1.rearrange( g1.perm(g2) )
Returns None if no permutation exists.
See also rearrange.
"""
return find_perm(self,other,verbose = verbose)
def eq_perm(self, other, verbose = True):
"""
Yields permutation of members going from self to other, where equivalent members are treated as identical.
See also perm.
"""
if len(self) == len(other):
perm = []
for r in other:
if self.eq_in(r):
perm.append(self.eq_index(r))
else:
warnings.warn( 'Warning from eq_perm (often benign): inputs not permutable, returning None.')
return
else:
if verbose:
print "Message from eq_perm: inputs must be of equal length."
return
return tuple(perm)
def json(self, types_allow = []):
"""convert self to a json friendly object.
Usage: json.dumps(X.json())
"""
members = [member.json(types_allow=types_allow) for member in self]
return_members
class Valued(Named):
"""
Base class for classes that contain a ndarray value attribute.
This class derives its name from the presence of an attribute named "value" that contains a Numpy ndarray. Methods relate to this attribute.
"""
def __init__(self,name='scalar',value = np.array([0]),long_name =''):
"""
Initialisation of Valued object.
Args:
name: (str) name of object.
value: (Numpy ndarray)
long_name: (str) a longer description.
"""
self.name = name
self.value = value
self.long_name = long_name
self.shape = value.shape
def __repr__(self):
"""Display alias if attribute present, name otherwise.
"""
if hasattr(self,'alias'):
return self.alias
else:
return self.name
def get_value(self,i):
return self.value[i]
def set_value(self, value):
self.value = value
def __getitem__(self,i):
"""Obtain item from value atttribute.
"""
return self.get_value(i)
def __setitem__(self,value):
return self.set_value(value)
def sliced(self,slice_obj = None ,suffix = '_sliced'):
"""Create new sliced Valued object with sliced value.
The slice argument must match the value dimensions, there are no checks.
Args:
slice_obj: (slice objects or tuple of) to slice value with
suffix: (str) suffix to use for sliced Valued object
Returns:
Valued object containing sliced value
"""
return self.copy(name = affix(self.name, suffix) , value = self.value[slice_obj] )
def array_equal(self,other):
""" test whether Valued objects contain identically valued ndarrays in value attributes.
This is a common method that should be inherited by child classes.
Args:
other: (Valued object) the Valued to compare with
Returns:
True/ False (using np.array_equal)
Raises:
ValueError: when argument is not a Coord object
"""
if not isinstance(other,Valued):
raise TypeError('Error: provide Valued argument (%s provided).'%other)
return np.array_equal(self.value,other.value)
def same(self,other):
"""
Tests whether this Valued object contains identical name and value to argument object.
Overrides Named same method and is a stronger condition. Generally to be overriden in child classes.
Args:
other: (Valued) object to compare against.
Returns:
True/ False
"""
# the following test and warning is to do with little things that we don't want to trip over with errors:
# self is a Coord, so it has a value attribute (this should be put into the specs!), but other might not:
if not hasattr(other, 'value'):
warnings.warn('Valued method %s.same(%s) on argument without Ax attribute: returning False.'%(self.name, other.name))
return False
return (self.name == other.name) and self.array_equal(other)
def __neg__(self):
"""
Obtain version of Valued with negative values (e.g. -xt).
Returns:
Valued copy with value is -self.value
"""
return self.copy(value =-self.value)
def __pow__(self,n):
"""
Repeated multiplication of object with itself.
"""
return reduce(lambda x,y: x*y, n*[self])
def __add__(self, other):
""" Addition of value attributes
"""
return self.copy(value = self.value + other.value)
def __sub__(self, other):
""" Substraction of value attributes
"""
return self.copy(value = self.value - other.value)
def __mul__(self, other):
""" Multiplication of value attributes
"""
return self.copy(value = self.value*other.value)
def __div__(self, other):
""" Division of value attributes
"""
return self.copy(value = self.value/other.value)
|
willo12/spacegrids
|
spacegrids/abstract.py
|
Python
|
bsd-3-clause
| 23,918
|
[
"NetCDF"
] |
4732abda7b3eb2dc51a062d1ca52cb58a2a55e5c29ba53a8ab5a767244393f6c
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates reports base on bisect result data."""
import copy
import math
_BISECT_HEADER = """
=== BISECT JOB RESULTS ===
<b>%s</b>
"""
_BISECT_TO_RUN = """
To Run This Test
%(command)s
"""
_BISECT_DEBUG_INFO = """
Debug Info
%(issue_url)s
"""
_BISECT_TRY_JOB = """
Is this bisect wrong?
https://chromeperf.appspot.com/bad_bisect?try_job_id=%(_tryjob_id)s
"""
_MEMORY_BENCHMARKS = [
'system_health.memory_',
'memory.top_10_mobile'
]
_MEMORY_DOC_URL = ('https://chromium.googlesource.com/chromium/src/+/'\
'master/docs/memory-infra/memory_benchmarks.md')
_BISECT_MEMORY_DOC_INFO = """
Please refer to the following doc on diagnosing memory regressions:
%s
""" % _MEMORY_DOC_URL
_BISECT_FOOTER = """
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \\ | file a bug with component Speed>Bisection. Thank you!"""
_BISECT_SUSPECTED_COMMIT = """
Suspected Commit
Author : %(author)s
Commit : %(cl)s
Date : %(cl_date)s
Subject: %(subject)s
"""
_BISECT_SUSPECTED_RANGE = """
Suspected Commit Range
%(num)d commits in range
"""
_BISECT_SUSPECTED_RANGE_URL = " %(url)s%(lkgr)s..%(fkbr)s\n"
_BISECT_SUSPECTED_RANGE_MISMATCH =\
""" Mismatching LKGR/FKBR depots, unable to provide handy url.
good_revision: %(lkgr)s
bad_revision : %(fkbr)s
"""
_BISECT_SUSPECTED_RANGE_UNSUPPORTED =\
" Unknown depot, please contact team to have this added.\n"
_BISECT_DETAILS = """
Bisect Details
Configuration: %(bisect_bot)s
Benchmark : %(benchmark)s
Metric : %(metric)s
"""
_BISECT_DETAILS_CHANGE =\
' Change : %(change)s | %(good_mean)s -> %(bad_mean)s\n'
_BISECT_WARNING_HEADER =\
'The following warnings were raised by the bisect job:\n'
_BISECT_WARNING = ' * %s\n'
_REVISION_TABLE_TEMPLATE = """
%(table)s"""
COMMIT_RANGE_URL_BY_DEPOT = {
'chromium': 'https://chromium.googlesource.com/chromium/src/+log/',
'angle': 'https://chromium.googlesource.com/angle/angle/+log/',
'v8': 'https://chromium.googlesource.com/v8/v8.git/+log/',
'skia': 'https://chromium.googlesource.com/skia/+log/',
}
STATUS_REPRO_WITH_CULPRIT = '%(test_type)s found with culprit'
STATUS_REPRO_UNABLE_NARROW =\
'%(test_type)s found but unable to narrow commit range'
STATUS_REPRO_BUT_UNDECIDED = \
'%(test_type)s found but unable to continue'
STATUS_NO_REPRO = 'NO %(test_type)s found'
STATUS_NO_VALUES = 'NO %(test_type)s found, tests failed to produce values'
STATUS_FAILED_UNEXPECTED = 'Bisect failed unexpectedly'
STATUS_INCOMPLETE = 'Bisect was unable to run to completion'
STATUS_UNKNOWN = 'Bisect failed for unknown reasons'
STATUS_IN_PROGRESS = 'Bisect is still in progress, results below are incomplete'
STATUS_TYPE_IN_PROGRESS = 'in_progress'
STATUS_TYPE_STARTED = 'started'
MESSAGE_REPRO_BUT_UNDECIDED = """
Bisect was stopped because a commit couldn't be classified as either
good or bad."""
MESSAGE_CONTACT_TEAM = """
Please contact the team (see below) and report the error."""
MESSAGE_FAILED_UNEXPECTED = """
Bisect was aborted with the following:
%s"""
MESSAGE_INCOMPLETE = """%s
If failures persist contact the team (see below) and report the error."""
MESSAGE_FAILURE_REASON = "Error: %(failure_reason)s"
MESSAGE_RERUN = """
Please try rerunning the bisect.
"""
MESSAGE_RERUN_FROM_PARTIAL_RESULTS = """
The bisect was able to narrow the range, you can try running with:
good_revision: %(lkgr)s
bad_revision : %(fkbr)s"""
MESSAGE_REPRO_BUILD_FAILURES = """
Build failures prevented the bisect from narrowing the range further."""
_NON_TELEMETRY_TEST_COMMANDS = {
'angle_perftests': 'angle_perftests',
'cc_perftests': 'cc_perftests',
'idb_perf': 'performance_ui_tests',
'load_library_perf_tests': 'load_library_perf_tests',
'media_perftests': 'media_perftests',
'performance_browser_tests': 'performance_browser_tests',
'resource_sizes': 'resource_sizes.py',
}
def _GuessBenchmarkFromRunCommand(run_command):
if 'run_benchmark' in run_command:
return run_command.split()[-1]
for k, v in _NON_TELEMETRY_TEST_COMMANDS.iteritems():
if v in run_command:
return k
return '???'
def _WasCommitTested(commit):
return commit.get('failed') or commit.get(
'n_observations', len(commit.get('values', [])))
def _GenerateReport(results_data):
revision_data = results_data.get('revision_data', [])
lkgr_index = -1
fkbr_index = -1
lkgr = {}
fkbr = {}
for i in xrange(len(revision_data)):
r = revision_data[i]
if r.get('result') == 'good':
lkgr_index = i
lkgr = revision_data[i]
if r.get('result') == 'bad':
fkbr_index = i
fkbr = revision_data[i]
break
test_type = 'Perf regression'
if results_data.get('test_type') == 'return_code':
test_type = 'Test failure'
# Generally bisects end a few ways:
# 1 - Success, found a culprit
# 2 - Unexpected failure, bisect aborts suddenly with an exception
# 3 - Interrupted, bisect didn't finish and we only got partial results
# 4 - Semi-success, found a range but couldn't narrow further
message = STATUS_UNKNOWN
message_details = ''
# 1 - Easiest case, bisect named a culprit.
if results_data.get('culprit_data'):
message = STATUS_REPRO_WITH_CULPRIT
# 2 - Unexpected failure in the recipe, could be a master restart, exception
# thrown, etc.
if message == STATUS_UNKNOWN:
aborted_reason = results_data.get('aborted_reason', '')
if aborted_reason:
# TODO(simonhatch); Ideally the recipe would only set the "aborted"
# field on an unexpected failure. We have to wait until the dashbaord
# changes are live to remove that, so for now we'll just filter those.
if ('The metric values for the initial' in aborted_reason or
'Bisect failed to reproduce the regression' in aborted_reason):
message = STATUS_NO_REPRO
elif ('No values were found while testing' in aborted_reason or
'Test runs failed to produce output' in aborted_reason):
message = STATUS_NO_VALUES
elif 'Bisect cannot identify a culprit' in aborted_reason:
message = STATUS_REPRO_BUT_UNDECIDED
message_details = MESSAGE_REPRO_BUT_UNDECIDED
else:
message = STATUS_FAILED_UNEXPECTED
message_details = MESSAGE_FAILED_UNEXPECTED % aborted_reason
# 3 - Incomplete bisects, try to print out a useful narrowed range and ask
# them to try rerunning.
if message == STATUS_UNKNOWN:
if (results_data.get('status') == STATUS_TYPE_STARTED or
results_data.get('status') == STATUS_TYPE_IN_PROGRESS):
# Try to provide some useful info on where to restart the bisect from
rerun_info = MESSAGE_RERUN
if lkgr_index > 0 or fkbr_index < (len(revision_data) - 1):
if lkgr.get('depot_name') == fkbr.get('depot_name'):
rerun_info = MESSAGE_RERUN_FROM_PARTIAL_RESULTS % {
'lkgr': lkgr.get('commit_hash'),
'fkbr': fkbr.get('commit_hash'),
}
if results_data.get('failure_reason'):
failure_reason = MESSAGE_FAILURE_REASON % results_data
rerun_info = '\n%s\n%s' % (failure_reason, rerun_info)
if results_data.get('status') == STATUS_TYPE_STARTED:
message = STATUS_INCOMPLETE
message_details = MESSAGE_INCOMPLETE % rerun_info
else:
message = STATUS_IN_PROGRESS
message_details = rerun_info
# 4 - Semi-successful in that they were able to run the tests, but failed to
# either repro the regression or narrow it to a single commit.
if message == STATUS_UNKNOWN:
if revision_data:
commits = revision_data[lkgr_index+1:fkbr_index-1]
if lkgr_index == 0 and fkbr_index == len(revision_data) - 1:
# The bisect never got past the initial testing.
if (lkgr.get('n_observations') == 0 and
fkbr.get('n_observations') == 0):
message = STATUS_NO_VALUES
else:
if all([_WasCommitTested(c) for c in commits]):
message = STATUS_REPRO_UNABLE_NARROW
else:
message = STATUS_NO_REPRO
else:
message = STATUS_REPRO_UNABLE_NARROW
if message == STATUS_REPRO_UNABLE_NARROW:
if all([c.get('failed') for c in commits]):
message_details = MESSAGE_REPRO_BUILD_FAILURES
# No idea what happened, ask them to file a bug.
if message == STATUS_UNKNOWN:
message_details = MESSAGE_CONTACT_TEAM
# Start constructing the full output.
result = ''
result += _BISECT_HEADER % (message % {'test_type': test_type})
if message_details:
result += '%s\n\n' % message_details
warnings = results_data.get('warnings')
if warnings:
result += _BISECT_WARNING_HEADER
for w in warnings:
result += _BISECT_WARNING % w
result += '\n'
results_data['benchmark'] = _GuessBenchmarkFromRunCommand(
results_data.get('command'))
# Print out the suspect commit info
if results_data.get('culprit_data'):
result += _BISECT_SUSPECTED_COMMIT % results_data.get('culprit_data')
result += _BISECT_DETAILS % results_data
if results_data.get('test_type') == 'perf':
results_data['good_mean'] = None
results_data['bad_mean'] = None
for r in results_data.get('revision_data', []):
if r.get('commit_hash') == results_data.get('good_revision'):
results_data['good_mean'] = r.get('mean_value')
if r.get('commit_hash') == results_data.get('bad_revision'):
results_data['bad_mean'] = r.get('mean_value')
if results_data['good_mean'] and results_data['bad_mean']:
result += _BISECT_DETAILS_CHANGE % results_data
# If we're unable to narrow for whatever reason, try to print out a link to
# a log containing all entries in the suspected range.
if message == STATUS_REPRO_UNABLE_NARROW:
depot_name = lkgr.get('depot_name')
depot_url = COMMIT_RANGE_URL_BY_DEPOT.get(depot_name)
result += _BISECT_SUSPECTED_RANGE % {'num': fkbr_index - lkgr_index}
if depot_url and lkgr.get('depot_name') == fkbr.get('depot_name'):
result += _BISECT_SUSPECTED_RANGE_URL % {
'url': depot_url,
'lkgr': lkgr.get('commit_hash'),
'fkbr': fkbr.get('commit_hash')}
elif not depot_url:
result += _BISECT_SUSPECTED_RANGE_UNSUPPORTED
else:
result += _BISECT_SUSPECTED_RANGE_MISMATCH % {
'lkgr': '%s@%s' % (
lkgr.get('depot_name'), lkgr.get('commit_hash')),
'fkbr': '%s@%s' % (
fkbr.get('depot_name'), fkbr.get('commit_hash'))}
result += '\n'
# Print out a nice table of all the tested commits.
if results_data.get('revision_data'):
result += _RevisionTable(results_data)
# Print out common footer stuff for all bisects, info like the command line,
# and how to contact the team.
result += '\n'
# (github:3128): Requested that all memory benchmarks include a doc url.
# TODO(eakuefner): Replace this with a generic property in TestMetadata
# when data pipe is available.
if any(results_data['benchmark'].startswith(b) for b in _MEMORY_BENCHMARKS):
result += _BISECT_MEMORY_DOC_INFO
result += _BISECT_TO_RUN % results_data
result += _BISECT_DEBUG_INFO % results_data
if '_tryjob_id' in results_data:
result += _BISECT_TRY_JOB % results_data
result += '\n'
result += _BISECT_FOOTER
return result
def GetReport(try_job_entity, in_progress=False):
"""Generates a report for bisect results.
This was ported from recipe_modules/auto_bisect/bisect_results.py.
Args:
try_job_entity: A TryJob entity.
Returns:
Bisect report string.
"""
results_data = copy.deepcopy(try_job_entity.results_data)
if not results_data:
return ''
# This is an in-progress bisect, and we want it to display a message
# indicating so.
if in_progress:
results_data['status'] = STATUS_TYPE_IN_PROGRESS
if try_job_entity.bug_id > 0:
results_data['_tryjob_id'] = try_job_entity.key.id()
return _GenerateReport(results_data)
def _MakeLegacyRevisionString(r):
result = 'chromium@' + str(r.get('commit_pos', 'unknown'))
if r.get('depot_name', 'chromium') != 'chromium':
result += ',%s@%s' % (r['depot_name'], r.get('deps_revision', 'unknown'))
return result
def _RevisionTable(results_data):
is_return_code = results_data.get('test_type') == 'return_code'
culprit_commit_hash = None
if 'culprit_data' in results_data and results_data['culprit_data']:
culprit_commit_hash = results_data['culprit_data']['cl']
# Only display some rows depending on whether they're part of the failure or
# regression.
last_good = 0
first_bad = len(results_data['revision_data'])
for i in xrange(len(results_data['revision_data'])):
r = results_data['revision_data'][i]
if r['result'] == 'good':
last_good = i
if r['result'] == 'bad':
first_bad = i
break
revision_rows = []
for i in xrange(len(results_data['revision_data'])):
r = results_data['revision_data'][i]
number_of_observations = r.get(
'n_observations', len(r.get('values', [])) or None)
result = None
if not r.get('failed') and number_of_observations:
result = [
r.get('revision_string', _MakeLegacyRevisionString(r)),
'%s +- %s' % (
_FormatNumber(r['mean_value']),
_FormatNumber(r['std_dev'])),
_FormatNumber(number_of_observations),
r['result'],
'<--' if r['commit_hash'] == culprit_commit_hash else '',
]
elif r.get('failed'):
# Outside the culprit range we don't care about displaying build failures.
if i > last_good and i < first_bad:
if first_bad - last_good > 10:
if i == last_good + 1 or i == first_bad - 1:
result = [
r.get('revision_string', _MakeLegacyRevisionString(r)),
'---',
'---',
'build failure',
'',
]
elif i == last_good + 2:
# Inside the culprit range, if there were more than 10 failures,
# just mention they all failed.
result = [
'---',
'---',
'---',
'too many build failures to list',
'',
]
else:
result = [
r.get('revision_string', _MakeLegacyRevisionString(r)),
'---',
'---',
'build failure',
'',
]
if result:
revision_rows.append(result)
revision_rows = [map(str, r) for r in revision_rows if r]
if not revision_rows:
return ''
headers_row = [[
'Revision',
'Result' if not is_return_code else 'Exit Code',
'N',
'',
'',
]]
all_rows = headers_row + revision_rows
return _REVISION_TABLE_TEMPLATE % {'table': _PrettyTable(all_rows)}
def _FormatNumber(x):
if x is None:
return 'N/A'
if isinstance(x, int) or x == 0:
return str(x)
if x >= 10**5:
# It's a little awkward to round 123456789.987 to 123457000.0,
# so just make it 123456790.
return str(int(round(x)))
# Round to 6 significant figures.
return str(round(x, 5-int(math.floor(math.log10(abs(x))))))
def _PrettyTable(data):
column_lengths = [max(map(len, c)) for c in zip(*data)]
formatted_rows = []
for row in data:
formatted_elements = []
for element_length, element in zip(column_lengths, row):
formatted_elements.append(element.ljust(element_length))
formatted_rows.append(' '.join(formatted_elements).strip())
return '\n'.join(formatted_rows)
|
sahiljain/catapult
|
dashboard/dashboard/bisect_report.py
|
Python
|
bsd-3-clause
| 15,882
|
[
"VisIt"
] |
fb6225702dc463ec64afa1de04c02388a5e6eb267d5e396ac16e4ca7316db41e
|
__author__ = 'amarch'
# -*- coding: utf-8 -*-
from utils import strutils as infoutils
import itertools
from scipy.integrate import *
from RotationCurve import *
from Galaxy import *
from utils import strutils as infoutils
import itertools
import copy
from RadialToAzimuthalRatioHandler import *
import scipy.optimize
class RadialToVerticalRatioHandler():
def __init__(self, galaxy):
self.galaxy = galaxy
self.sigZ_to_sigR = 0.0
self.sig_R_0 = 0.0
def residuals(self, params, xdata, ydata):
return (ydata - numpy.dot(xdata, params))
def experimental_alpha_evaluation(self, normalize=False):
r_eff = self.galaxy.r_eff
x0 = [0.3, 0.3]
sig_max = self.galaxy.sig_los_mi.bezier(0.0)**2
points = map(lambda p: [abs(p[0]), p[1]], self.galaxy.sig_los_ma.data_points)
# points = filter(lambda p: p[0] > r_eff, points)
points.sort()
radii = [p[0] for p in points]
if normalize:
ydata = numpy.concatenate(([sig_max],[(p[1]**2)/(self.norm_sig_los_mi(p[0])**2) for p in points]))
xdata = numpy.transpose(numpy.array([numpy.concatenate(([1.0],[self.galaxy.sve_handler.sigPhi2_to_sigR2(x) for x in radii])),
numpy.concatenate(([1.0],[1.0 for x in radii]))]))
else:
ydata = numpy.concatenate(([sig_max],[p[1]**2 for p in points]))
xdata = numpy.transpose(numpy.array([numpy.concatenate(([1.0],[(self.norm_sig_los_mi(x)**2)*self.galaxy.sve_handler.sigPhi2_to_sigR2(x) for x in radii])),
numpy.concatenate(([1.0],[(self.norm_sig_los_mi(x)**2) for x in radii]))]))
solution = scipy.optimize.leastsq(self.residuals, x0, args=(xdata, ydata))[0]
print 'Solution: <',solution[0],' : ',solution[1],'>'
if solution[0] > 0 and solution[1] > 0:
tan = math.tan(self.galaxy.incl*math.pi/180.0)
sin = math.sin(self.galaxy.incl*math.pi/180.0)
self.sig_R_0 = math.sqrt(solution[0])/sin
self.sigZ_to_sigR = math.sqrt(solution[1]/solution[0])*tan
print 'sig_R_0: ', self.sig_R_0
print 'sigZ/sigR: ', self.sigZ_to_sigR
# self.set_sigZ_to_sigR(0.19)
def experimental_alpha_evaluation2(self, normalize=False):
r_eff = self.galaxy.r_eff
x0 = [0.3, 0.3]
sig_max = self.galaxy.sig_los_mi.bezier(0.0)**2
radii_range = [abs(x[0]) for x in self.galaxy.sig_los_ma.data_points]
# points = map(lambda p: [abs(p[0]), self.galaxy.sig_los_ma.bezier(abs(p[0]))], self.galaxy.sig_los_ma.data_points)
points = map(lambda p: [abs(p), self.galaxy.sig_los_ma.bezier(abs(p))], numpy.arange(min(radii_range), max(radii_range), 0.1).tolist())
points = filter(lambda p: p[0] > r_eff, points)
points.sort()
radii = [p[0] for p in points]
if normalize:
ydata = numpy.concatenate(([sig_max],[(p[1]**2)/(self.norm_sig_los_mi(p[0])**2) for p in points]))
xdata = numpy.transpose(numpy.array([numpy.concatenate(([1.0],[self.galaxy.sve_handler.sigPhi2_to_sigR2(x) for x in radii])),
numpy.concatenate(([1.0],[1.0 for x in radii]))]))
else:
ydata = numpy.concatenate(([sig_max],[p[1]**2 for p in points]))
xdata = numpy.transpose(numpy.array([numpy.concatenate(([1.0],[(self.norm_sig_los_mi(x)**2)*self.galaxy.sve_handler.sigPhi2_to_sigR2(x) for x in radii])),
numpy.concatenate(([1.0],[(self.norm_sig_los_mi(x)**2) for x in radii]))]))
solution = scipy.optimize.leastsq(self.residuals, x0, args=(xdata, ydata))[0]
print 'Solution: <',solution[0],' : ',solution[1],'>'
if solution[0] > 0 and solution[1] > 0:
tan = math.tan(self.galaxy.incl*math.pi/180.0)
sin = math.sin(self.galaxy.incl*math.pi/180.0)
self.sig_R_0 = math.sqrt(solution[0])/sin
self.sigZ_to_sigR = math.sqrt(solution[1]/solution[0])*tan
print 'sig_R_0: ', self.sig_R_0
print 'sigZ/sigR: ', self.sigZ_to_sigR
# self.set_sigZ_to_sigR(0.19)
def experimental_alpha_evaluation3(self):
r_eff = self.galaxy.r_eff
x0 = [0.3, 0.3]
sig_max = self.galaxy.sig_los_mi.bezier(0.0)**2
points_ma = map(lambda p: [abs(p[0]), self.galaxy.sig_los_ma.bezier(abs(p[0]))], self.galaxy.sig_los_ma.data_points)
# points_ma = filter(lambda p: p[0] > r_eff, points_ma)
points_ma.sort()
radii_ma = [p[0] for p in points_ma]
points_mi = map(lambda p: [abs(p[0]), p[1]], self.galaxy.sig_los_mi.data_points)
# points_mi = filter(lambda p: p[0] > r_eff, points_mi)
points_mi.sort()
radii_mi = [p[0] for p in points_mi]
ydata = numpy.concatenate(([sig_max],[p[1]**2 for p in points_ma], [p[1]**2 for p in points_mi]))
xdata = numpy.transpose(numpy.array([numpy.concatenate(([1.0],
[(self.norm_sig_los_mi(x)**2)*self.galaxy.sve_handler.sigPhi2_to_sigR2(x) for x in radii_ma],
[(self.galaxy.sig_los_mi.bezier(x)**2)/sig_max for x in radii_mi])),
numpy.concatenate(([1.0],
[(self.norm_sig_los_mi(x)**2) for x in radii_ma],
[(self.galaxy.sig_los_mi.bezier(x)**2)/sig_max for x in radii_mi]))]))
solution = scipy.optimize.leastsq(self.residuals, x0, args=(xdata, ydata))[0]
print 'Solution: <',solution[0],' : ',solution[1],'>'
if solution[0] > 0 and solution[1] > 0:
tan = math.tan(self.galaxy.incl*math.pi/180.0)
sin = math.sin(self.galaxy.incl*math.pi/180.0)
self.sig_R_0 = math.sqrt(solution[0])/sin
self.sigZ_to_sigR = math.sqrt(solution[1]/solution[0])*tan
print 'sig_R_0: ', self.sig_R_0
print 'sigZ/sigR: ', self.sigZ_to_sigR
def set_sigZ_to_sigR(self, alpha):
self.sigZ_to_sigR = 0.2
self.sig_R_0 = self.galaxy.sig_los_mi.bezier(0.0)/math.sqrt(math.sin(self.galaxy.incl*math.pi/180.0)**2 +
(self.sigZ_to_sigR*math.cos(self.galaxy.incl*math.pi/180.0))**2)
def norm_sig_los_mi(self, x):
sig_max = self.galaxy.sig_los_mi.bezier(0.0)
return self.galaxy.sig_los_mi.bezier(x)/sig_max
def plot_residuals(self):
# plt.plot([0.0] + radii, map(abs, self.residuals((solution[0], solution[1]), xdata, ydata)), 'x-')
pass
def plot_sig_R(self):
points = map(lambda p : [abs(p[0]), p[1]], self.galaxy.sig_los_ma.data_points)
points.sort()
radii = [p[0] for p in points]
plt.plot(radii, [self.sig_R(x) for x in radii], 'x-', label=(r'$\sigma_{R}^{\alpha=%s}$' % self.sigZ_to_sigR))
def sig_R(self, x):
return self.sig_R_0*self.norm_sig_los_mi(x)
def sig_Z(self, x):
return self.sigZ_to_sigR*self.sig_R(x)
def sig_Phi(self, x):
return self.sig_R(x)*self.galaxy.sve_handler.sigPhi_to_sigR(x)
def plot_sig_Z(self):
points = map(lambda p : [abs(p[0]), p[1]], self.galaxy.sig_los_ma.data_points)
points.sort()
radii = [p[0] for p in points]
plt.plot(radii, [self.sig_Z(x) for x in radii], 'x-', label=(r'$\sigma_{Z}^{\alpha=%s}$' % self.sigZ_to_sigR))
def plot_reconstructed_sig_los_mi(self):
points = map(lambda p : [abs(p[0]), p[1]], self.galaxy.sig_los_ma.data_points)
points.sort()
radii = [p[0] for p in points]
def zero_or_positive(x):
return 0 if x < 0 else x
def new_sig_mi_2(x):
return self.sig_R(x)**2*(math.sin(self.galaxy.incl*math.pi/180.0)**2 +
(self.sigZ_to_sigR*math.cos(self.galaxy.incl*math.pi/180.0))**2)
new_sig_los_mi = [math.sqrt(zero_or_positive(new_sig_mi_2(x))) for x in radii]
plt.plot(radii, new_sig_los_mi, 'v-', label=(r'$\sigma_{mi}^{\alpha=%s}$' % self.sigZ_to_sigR))
def plot_reconstructed_sig_los_ma(self):
points = map(lambda p : [abs(p[0]), p[1]], self.galaxy.sig_los_ma.data_points)
points.sort()
radii = [p[0] for p in points]
def zero_or_positive(x):
return 0 if x < 0 else x
def new_sig_ma_2(x):
return (self.sig_Phi(x)*math.sin(self.galaxy.incl*math.pi/180.0))**2 + \
(self.sig_Z(x)*math.cos(self.galaxy.incl*math.pi/180.0))**2
new_sig_los_ma = [math.sqrt(zero_or_positive(new_sig_ma_2(x))) for x in radii]
plt.plot(radii, new_sig_los_ma, 'v-', label=(r'$\sigma_{ma}^{\alpha=%s}$' % self.sigZ_to_sigR))
|
Amarchuk/2FInstability
|
core/RadialToVerticalRatioHandler.py
|
Python
|
gpl-3.0
| 8,998
|
[
"Galaxy"
] |
64b28a15e43cb96b7df13e8527fbe9ca6f672178f75339b407c01314cedf8df6
|
import numpy
from chainer.backends import cuda
from chainer import function_node
import chainer.functions
from chainer.utils import type_check
from chainer import variable
def _as_mat(x):
if x.ndim == 2:
return x
return x.reshape(len(x), -1)
def _matmul(a, b, xp):
if xp is numpy:
# numpy 1.9 does not support matmul.
# So we use numpy.einsum instead of numpy.matmul.
return xp.einsum('...jk,...kl->...jl', a, b)
else:
return xp.matmul(a, b)
class SimplifiedDropconnect(function_node.FunctionNode):
"""Linear unit regularized by simplified dropconnect."""
def __init__(self, ratio, mask=None, use_batchwise_mask=True):
self.ratio = ratio
self.mask = mask
self.use_batchwise_mask = use_batchwise_mask
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 <= n_in, n_in <= 3)
x_type, w_type = in_types[:2]
type_check.expect(
x_type.dtype.kind == 'f',
w_type.dtype.kind == 'f',
x_type.ndim >= 2,
w_type.ndim == 2,
type_check.prod(x_type.shape[1:]) == w_type.shape[1],
)
if type_check.eval(n_in) == 3:
b_type = in_types[2]
type_check.expect(
b_type.dtype == x_type.dtype,
b_type.ndim == 1,
b_type.shape[0] == w_type.shape[0],
)
if self.mask is not None:
if self.use_batchwise_mask:
type_check.expect(
self.mask.shape[0] == x_type.shape[0],
self.mask.shape[1:] == w_type.shape,
)
else:
type_check.expect(self.mask.shape == w_type.shape)
def forward(self, inputs):
self.retain_inputs((0, 1))
scale = inputs[1].dtype.type(1. / (1 - self.ratio))
xp = cuda.get_array_module(*inputs)
if self.mask is None:
if self.use_batchwise_mask:
mask_shape = (inputs[0].shape[0], inputs[1].shape[0],
inputs[1].shape[1])
else:
mask_shape = (inputs[1].shape[0], inputs[1].shape[1])
if xp == numpy:
self.mask = xp.random.rand(*mask_shape) >= self.ratio
else:
self.mask = xp.random.rand(*mask_shape,
dtype=numpy.float32) >= self.ratio
elif isinstance(self.mask, variable.Variable):
self.mask = self.mask.data
x = _as_mat(inputs[0])
W = inputs[1] * scale * self.mask
# (i)jk,ik->ij
y = _matmul(W, x[:, :, None], xp)
y = y.reshape(y.shape[0], y.shape[1]).astype(x.dtype, copy=False)
if len(inputs) == 3:
b = inputs[2]
y += b
return y,
def backward(self, indexes, grad_outputs):
inputs = self.get_retained_inputs()
ret = []
scale = inputs[1].dtype.type(1. / (1 - self.ratio))
x = _as_mat(inputs[0])
W = inputs[1]
if self.use_batchwise_mask:
W = chainer.functions.broadcast_to(
W, self.mask.shape) * scale * self.mask
else:
W = chainer.functions.broadcast_to(
W * scale * self.mask, (x.shape[0],) + self.mask.shape)
gy = grad_outputs[0]
if 0 in indexes:
# ij,(i)jk->ik
gx = chainer.functions.matmul(
gy[:, None, :], W).reshape(inputs[0].shape)
gx = chainer.functions.cast(gx, x.dtype)
ret.append(gx)
if 1 in indexes:
# ij,ik,ijk->jk
gy2 = gy[:, :, None]
x2 = x[:, None, :]
shape = (gy2.shape[0], gy2.shape[1], x2.shape[2])
gy2 = chainer.functions.broadcast_to(gy2, shape)
x2 = chainer.functions.broadcast_to(x2, shape)
gW = chainer.functions.sum(gy2 * x2 * self.mask, axis=0) * scale
gW = chainer.functions.cast(gW, W.dtype)
ret.append(gW)
if 2 in indexes:
gb = chainer.functions.sum(gy, axis=0)
ret.append(gb)
return ret
def simplified_dropconnect(x, W, b=None, ratio=.5, train=True, mask=None,
use_batchwise_mask=True):
"""Linear unit regularized by simplified dropconnect.
Simplified dropconnect drops weight matrix elements randomly with
probability ``ratio`` and scales the remaining elements by factor
``1 / (1 - ratio)``.
It accepts two or three arguments: an input minibatch ``x``, a weight
matrix ``W``, and optionally a bias vector ``b``. It computes
:math:`Y = xW^\\top + b`.
In testing mode, zero will be used as simplified dropconnect ratio instead
of ``ratio``.
Notice:
This implementation cannot be used for reproduction of the paper.
There is a difference between the current implementation and the
original one.
The original version uses sampling with gaussian distribution before
passing activation function, whereas the current implementation averages
before activation.
Args:
x (chainer.Variable or :class:`numpy.ndarray` or cupy.ndarray):
Input variable. Its first dimension ``n`` is assumed
to be the *minibatch dimension*. The other dimensions are treated
as concatenated one dimension whose size must be ``N``.
W (~chainer.Variable): Weight variable of shape ``(M, N)``.
b (~chainer.Variable): Bias variable (optional) of shape ``(M,)``.
ratio (float):
Dropconnect ratio.
train (bool):
If ``True``, executes simplified dropconnect.
Otherwise, simplified dropconnect function works as a linear
function.
mask (None or chainer.Variable or numpy.ndarray or cupy.ndarray):
If ``None``, randomized dropconnect mask is generated.
Otherwise, The mask must be ``(n, M, N)`` or ``(M, N)`` shaped
array, and `use_batchwise_mask` is ignored.
Main purpose of this option is debugging.
`mask` array will be used as a dropconnect mask.
use_batchwise_mask (bool):
If ``True``, dropped connections depend on each sample in
mini-batch.
Returns:
~chainer.Variable: Output variable.
.. seealso:: :class:`~chainer.links.Dropconnect`
.. seealso::
Li, W., Matthew Z., Sixin Z., Yann L., Rob F. (2013).
Regularization of Neural Network using DropConnect.
International Conference on Machine Learning.
`URL <https://cs.nyu.edu/~wanli/dropc/>`_
"""
if not train:
ratio = 0
if b is None:
return SimplifiedDropconnect(
ratio, mask, use_batchwise_mask).apply((x, W))[0]
else:
return SimplifiedDropconnect(
ratio, mask, use_batchwise_mask).apply((x, W, b))[0]
|
aonotas/chainer
|
chainer/functions/noise/simplified_dropconnect.py
|
Python
|
mit
| 6,999
|
[
"Gaussian"
] |
ef3b28d2200f7d86fa9cc8b625ddf7de710246bc501ca6afdac3e47bff75c406
|
"""
Dabble, a membrane protein system builder
Author: Robin Betz
Copyright (C) 2019 Robin Betz
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your option) any
later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
59 Temple Place - Suite 330
Boston, MA 02111-1307, USA.
"""
from __future__ import print_function
import argparse
import os
import shutil
import signal
import sys
import tempfile
from dabble import VmdSilencer, DabbleBuilder, supported_formats
from dabble.param import supported_forcefields, supported_water_models
from pkg_resources import resource_filename
__version__ = '2.7.12'
__author__ = 'Robin Betz'
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# CLASSES #
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Handle interrupts
def signal_handler(*args, **kwargs): # pylint: disable=unused-argument
""" Catch signals """
sys.stdout.write('\nInterrupted\n')
sys.exit(1)
#==============================================================================
class DabbleTempDir(object): # pylint: disable=too-few-public-methods
"""
Creates a destroyable temporary directory, but also allows it not
to be destroyed, or it to be in a custom location.
"""
def __init__(self, retain=False, path=None):
if path is not None:
if not os.path.isdir(path):
os.mkdir(path)
self.dir = path
self.retain = True
else:
self.dir = tempfile.mkdtemp(prefix="dabble", dir=os.getcwd())
self.retain = retain
def __enter__(self):
return self.dir
def __exit__(self, types, value, traceback):
if not self.retain:
shutil.rmtree(self.dir, ignore_errors=True)
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def main(argv=None):
WELCOME_SCREEN = '''
===============================================
| _ _ _ |
| >(.)__ <(.)__ =(.)__ |
| (___/ (___/ (___/ |
| |
| DABBLE ______ |
| _ _ _ / \\ |
| __(.)< __(.)> __(.)= < beta! | |
| \\___) \\___) \\___) \\_______/ |
| |
| Robin Betz, 2019 |
| Stanford University |
| %s |
===============================================
''' % ('{0:^45}'.format("Version " + __version__))
# pylint: disable=invalid-name
parser = argparse.ArgumentParser(prog='dabble')
group = parser.add_argument_group('Input and Output Files')
group.add_argument('-i', '--input', dest='solute_filename',
metavar='<input>', type=str,
required=True,
help="Path to input protein or ligand file"
)
group.add_argument('-o', '--output', dest='output_filename',
metavar='<output>', type=str,
required=True,
help="Name of output file. If -format argument is "
"supplied, the appropriate extension will be added. If "
"not, format will be inferred by the extension here. "
"Currently supported extensions: .pdb, .mae, .psf, .dms,"
" .prmtop"
)
group.add_argument('--format', dest='format',
metavar='<format>', type=str,
default=None,
choices=supported_formats.keys(),
help="Format of output file. Supported: %s"
% ", ".join(supported_formats.keys())
)
group.add_argument('-O', '--overwrite', dest='overwrite',
action='store_true',
help="Overwrite existing files with requested output",
)
group.add_argument('-M', '--membrane', dest='membrane_system',
type=str, metavar='<solvent>',
default=resource_filename(__name__, "lipid_membranes/popc.mae"),
help="Path to pre-built membrane + solvent block. Must "
"be a .mae file. See documentation to create your own. "
"Defaults to a POPC membrane. Specify 'water' for no "
"membrane."
)
group = parser.add_argument_group('Parameterization Options')
group.add_argument('-ff', '--forcefield', dest='forcefield',
type=str, metavar='<forcefield>',
default="charmm",
choices=supported_forcefields.keys(),
required=True, action="store",
help="Force field to use for parameterization. Currently"
" supported values: %s"
% ", ".join(supported_forcefields.keys())
)
group.add_argument('--water', dest='water_model',
type=str, metavar='<water model>',
default='tip3',
choices=supported_water_models.keys(),
required=False, action="store",
help="Water model to use for paramterization. Currently"
" supported values: %s"
% ", ".join(supported_water_models.keys())
)
group.add_argument('--hmr', dest='hmassrepartition',
default=False,
action='store_true',
help="Repartition Hydrogen masses to allow up to 4fs "
"time steps. Currently supported for AMBER (.prmtop) "
"output format only"
)
group.add_argument('-top', '--topology', dest='extra_topos',
type=str, metavar='<topology file>',
default=None,
action='append',
help="Additional topology (rtf, off, lib, leaprc) file "
"to include in parameterization"
)
group.add_argument('-par', '--parameters', dest='extra_params',
type=str, metavar='<parameter file>',
default=None,
action='append',
help="Additional parameter (prm, lib, frcmod) file to "
"include in parameterization"
)
group = parser.add_argument_group('Lipid Membrane Options')
group.add_argument('-L', '--lipid-selection', dest='lipid_sel',
type=str,
default='lipid or resname POPS POPG',
help="Atom selection string (VMD syntax) for the lipids "
"in the membrane. Defaults to 'lipid or resname POPS'"
)
group.add_argument('-C', '--lipid-clash-check', dest='clash_lipids',
type=str,
default='resname CLR CLOL',
help="Atom selection string (VMD syntax) for lipids or "
"other integral membrane molecules with rings (i.e. "
"cholesterol) that might clash with other lipids. "
"Defaults to 'resname CLR CLOL'",
)
group.add_argument('-f', '--lipid-friendly-sel', dest='lipid_friendly_sel',
type=str,
help="Atom selection string (VMD syntax) for parts of "
"the protein that are 'lipid-friendly' and should not be"
"considered when calculating which lipids are clashing "
"with the protein (i.e.: lipid tails, palmitoylations). "
"Defaults to no selection."
)
group = parser.add_argument_group('Ion Options')
group.add_argument('--cation',
default='Na', type=str,
help='Specify element of cation. Defaults to "Na"'
)
group.add_argument('--anion',
default='Cl', type=str,
help='Specify element of anion. Defaults to "Cl"'
)
group.add_argument('-s', '--salt-concentration', dest='salt_conc',
type=float,
default=0.150,
help="Salt concentration in final system, in M. Defaults"
" to physiological 0.150M NaCl"
)
group = parser.add_argument_group('System Size Options')
z_buffer_opts = group.add_mutually_exclusive_group()
z_buffer_opts.add_argument('-w', '--water-buffer', dest='wat_buffer',
type=float,
default=20.0,
help="Buffer, in A, from each side of the "
"protein/solute to the edge of the periodic box."
" Defaults to a conservative 20.0 A"
)
group.add_argument('-m', '--membrane-buffer-dist', dest='xy_buf', default=17.5,
type=float, help='membrane buffer distance from the protein to the '
'box edge in the XY plane.'
'[default: 17.5 angstroms]')
group.add_argument('-d', '--lipid-dist', dest='lipid_dist',
default=1.75, type=float,
help='minimum distance from solute to lipid acyl group'
'[default: 1.75]')
group.add_argument('--absolute-x', type=float, default=None,
dest='user_x', help='Specifies the x dimension. Takes '
'precedence over buffer-based calculation.')
group.add_argument('--absolute-y', type=float, default=None,
dest='user_y', help='Specifies the y dimension. Takes '
'precedence over buffer-based calculation.')
group.add_argument('--absolute-z', type=float, default=None,
dest='user_z', help='Specifies the z dimension. Takes '
'precedence over buffer-based calculation.')
group = parser.add_argument_group('Orientation Options',
'These options control how the input solute '
'is oriented before inserting it into the '
'solvent. Although it is recommended you '
'pre-align the solute, these options are '
'here for your convenience.')
group.add_argument('--opm-pdb', dest='opm_pdb',
default=None, type=str,
help='oriented pdb file from OPM to align protein to'
'[default: None]')
group.add_argument('--opm-align', dest='opm_align',
default='protein and backbone', type=str,
help='atomsel for OPM backbone atoms to align to'
'[default: protein and backbone]')
group.add_argument('--move-solute', dest='z_move',
default=None, type=float,
help='value added to solute z coordinates'
'[default: 0]')
group.add_argument('--membrane-rotation', dest='z_rotation',
default=None, type=float,
help='Membrane rotation relative to Z axis of protein, in '
'degrees. Use the number from OPM if you have it. '
'[default: 0]')
group = parser.add_argument_group('Debug and Testing Options')
group.add_argument('--tmp-dir', dest='tmp_dir', default=None)
group.add_argument('--verbose', dest='debug_verbose', default=False,
action='store_true')
print(WELCOME_SCREEN)
print("\nCommand was:\n %s\n" % " ".join([i for i in sys.argv]))
#opts = parser.parse_args(sys.argv[1:])
opts = parser.parse_args(argv)
# Make the temporary directory. Needs to be done now so there is somewhere
# to save the vmd output
with DabbleTempDir(path=opts.tmp_dir, retain=opts.debug_verbose) as tdir:
opts.tmp_dir = tdir # Needs to be defined in opts for builder to work
soutput = sys.stdout if opts.debug_verbose else os.path.join(tdir,
"vmd_output.txt")
with VmdSilencer(output=soutput):
signal.signal(signal.SIGINT, signal_handler)
builder = DabbleBuilder(**vars(opts))
builder.write()
sys.stdout.flush()
print("\nSuccess!")
if __name__ == "__main__":
main()
|
Eigenstate/dabble
|
dabble/__main__.py
|
Python
|
gpl-2.0
| 13,843
|
[
"Amber",
"CHARMM",
"VMD"
] |
dcceebddf5003aa5bbbb42df7d9af7cb0b5e5f71b463e3e5c45f44f2297dbacb
|
# -*- coding: utf-8 -*-
"""Testing functions."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
from contextlib import contextmanager
from distutils.version import LooseVersion
from functools import partial, wraps
import os
import inspect
from io import StringIO
from shutil import rmtree
import sys
import tempfile
import traceback
from unittest import SkipTest
import warnings
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
from scipy import linalg
from ._logging import warn, ClosingStringIO
from .numerics import object_diff
def nottest(f):
"""Mark a function as not a test (decorator)."""
f.__test__ = False
return f
def _explain_exception(start=-1, stop=None, prefix='> '):
"""Explain an exception."""
# start=-1 means "only the most recent caller"
etype, value, tb = sys.exc_info()
string = traceback.format_list(traceback.extract_tb(tb)[start:stop])
string = (''.join(string).split('\n') +
traceback.format_exception_only(etype, value))
string = ':\n' + prefix + ('\n' + prefix).join(string)
return string
class _TempDir(str):
"""Create and auto-destroy temp dir.
This is designed to be used with testing modules. Instances should be
defined inside test functions. Instances defined at module level can not
guarantee proper destruction of the temporary directory.
When used at module level, the current use of the __del__() method for
cleanup can fail because the rmtree function may be cleaned up before this
object (an alternative could be using the atexit module instead).
"""
def __new__(self): # noqa: D105
new = str.__new__(self, tempfile.mkdtemp(prefix='tmp_mne_tempdir_'))
return new
def __init__(self): # noqa: D102
self._path = self.__str__()
def __del__(self): # noqa: D105
rmtree(self._path, ignore_errors=True)
def requires_nibabel():
"""Wrap to requires_module with a function call (fewer lines to change)."""
return partial(requires_module, name='nibabel')
def requires_dipy():
"""Check for dipy."""
import pytest
# for some strange reason on CIs we cane get:
#
# can get weird ImportError: dlopen: cannot load any more object
# with static TLS
#
# so let's import everything in the decorator.
try:
from dipy.align import imaffine, imwarp, metrics, transforms # noqa, analysis:ignore
from dipy.align.reslice import reslice # noqa, analysis:ignore
from dipy.align.imaffine import AffineMap # noqa, analysis:ignore
from dipy.align.imwarp import DiffeomorphicMap # noqa, analysis:ignore
except Exception:
have = False
else:
have = True
return pytest.mark.skipif(not have, reason='Requires dipy >= 0.10.1')
def requires_version(library, min_version='0.0'):
"""Check for a library version."""
import pytest
return pytest.mark.skipif(not check_version(library, min_version),
reason=('Requires %s version >= %s'
% (library, min_version)))
def requires_module(function, name, call=None):
"""Skip a test if package is not available (decorator)."""
import pytest
call = ('import %s' % name) if call is None else call
reason = 'Test %s skipped, requires %s.' % (function.__name__, name)
try:
exec(call) in globals(), locals()
except Exception as exc:
if len(str(exc)) > 0 and str(exc) != 'No module named %s' % name:
reason += ' Got exception (%s)' % (exc,)
skip = True
else:
skip = False
return pytest.mark.skipif(skip, reason=reason)(function)
_pandas_call = """
import pandas
version = LooseVersion(pandas.__version__)
if version < '0.8.0':
raise ImportError
"""
_mayavi_call = """
with warnings.catch_warnings(record=True): # traits
from mayavi import mlab
"""
_mne_call = """
if not has_mne_c():
raise ImportError
"""
_fs_call = """
if not has_freesurfer():
raise ImportError
"""
_n2ft_call = """
if 'NEUROMAG2FT_ROOT' not in os.environ:
raise ImportError
"""
requires_pandas = partial(requires_module, name='pandas', call=_pandas_call)
requires_pylsl = partial(requires_module, name='pylsl')
requires_sklearn = partial(requires_module, name='sklearn')
requires_mayavi = partial(requires_module, name='mayavi', call=_mayavi_call)
requires_mne = partial(requires_module, name='MNE-C', call=_mne_call)
def requires_freesurfer(arg):
"""Require Freesurfer."""
if isinstance(arg, str):
# Calling as @requires_freesurfer('progname'): return decorator
# after checking for progname existence
call = """
from . import run_subprocess
run_subprocess([%r, '--version'])
""" % (arg,)
return partial(
requires_module, name='Freesurfer (%s)' % (arg,), call=call)
else:
# Calling directly as @requires_freesurfer: return decorated function
# and just check env var existence
return requires_module(arg, name='Freesurfer', call=_fs_call)
requires_neuromag2ft = partial(requires_module, name='neuromag2ft',
call=_n2ft_call)
requires_vtk = partial(requires_module, name='vtk')
requires_pysurfer = partial(requires_module, name='PySurfer',
call="""import warnings
with warnings.catch_warnings(record=True):
from surfer import Brain""")
requires_good_network = partial(
requires_module, name='good network connection',
call='if int(os.environ.get("MNE_SKIP_NETWORK_TESTS", 0)):\n'
' raise ImportError')
requires_nitime = partial(requires_module, name='nitime')
requires_h5py = partial(requires_module, name='h5py')
def requires_numpydoc(func):
"""Decorate tests that need numpydoc."""
return requires_version('numpydoc', '1.0')(func) # validate needs 1.0
def check_version(library, min_version):
r"""Check minimum library version required.
Parameters
----------
library : str
The library name to import. Must have a ``__version__`` property.
min_version : str
The minimum version string. Anything that matches
``'(\d+ | [a-z]+ | \.)'``. Can also be empty to skip version
check (just check for library presence).
Returns
-------
ok : bool
True if the library exists with at least the specified version.
"""
ok = True
try:
library = __import__(library)
except ImportError:
ok = False
else:
if min_version:
this_version = LooseVersion(
getattr(library, '__version__', '0.0').lstrip('v'))
if this_version < min_version:
ok = False
return ok
def _check_mayavi_version(min_version='4.3.0'):
"""Check mayavi version."""
if not check_version('mayavi', min_version):
raise RuntimeError("Need mayavi >= %s" % min_version)
def _import_mlab():
"""Quietly import mlab."""
with warnings.catch_warnings(record=True):
from mayavi import mlab
return mlab
@contextmanager
def traits_test_context():
"""Context to raise errors in trait handlers."""
from traits.api import push_exception_handler
push_exception_handler(reraise_exceptions=True)
try:
yield
finally:
push_exception_handler(reraise_exceptions=False)
def traits_test(test_func):
"""Raise errors in trait handlers (decorator)."""
@wraps(test_func)
def dec(*args, **kwargs):
with traits_test_context():
return test_func(*args, **kwargs)
return dec
@nottest
def run_tests_if_main():
"""Run tests in a given file if it is run as a script."""
local_vars = inspect.currentframe().f_back.f_locals
if local_vars.get('__name__', '') != '__main__':
return
import pytest
code = pytest.main([local_vars['__file__'], '-v'])
if code:
raise AssertionError('pytest finished with errors (%d)' % (code,))
def run_command_if_main():
"""Run a given command if it's __main__."""
local_vars = inspect.currentframe().f_back.f_locals
if local_vars.get('__name__', '') == '__main__':
local_vars['run']()
class ArgvSetter(object):
"""Temporarily set sys.argv."""
def __init__(self, args=(), disable_stdout=True,
disable_stderr=True): # noqa: D102
self.argv = list(('python',) + args)
self.stdout = ClosingStringIO() if disable_stdout else sys.stdout
self.stderr = ClosingStringIO() if disable_stderr else sys.stderr
def __enter__(self): # noqa: D105
self.orig_argv = sys.argv
sys.argv = self.argv
self.orig_stdout = sys.stdout
sys.stdout = self.stdout
self.orig_stderr = sys.stderr
sys.stderr = self.stderr
return self
def __exit__(self, *args): # noqa: D105
sys.argv = self.orig_argv
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
class SilenceStdout(object):
"""Silence stdout."""
def __init__(self, close=True):
self.close = close
def __enter__(self): # noqa: D105
self.stdout = sys.stdout
sys.stdout = StringIO()
return sys.stdout
def __exit__(self, *args): # noqa: D105
if self.close:
sys.stdout.close()
sys.stdout = self.stdout
def has_nibabel():
"""Determine if nibabel is installed.
Returns
-------
has : bool
True if the user has nibabel.
"""
try:
import nibabel # noqa
except ImportError:
return False
else:
return True
def has_mne_c():
"""Check for MNE-C."""
return 'MNE_ROOT' in os.environ
def has_freesurfer():
"""Check for Freesurfer."""
return 'FREESURFER_HOME' in os.environ
def buggy_mkl_svd(function):
"""Decorate tests that make calls to SVD and intermittently fail."""
@wraps(function)
def dec(*args, **kwargs):
try:
return function(*args, **kwargs)
except np.linalg.LinAlgError as exp:
if 'SVD did not converge' in str(exp):
msg = 'Intel MKL SVD convergence error detected, skipping test'
warn(msg)
raise SkipTest(msg)
raise
return dec
def assert_and_remove_boundary_annot(annotations, n=1):
"""Assert that there are boundary annotations and remove them."""
from ..io.base import BaseRaw
if isinstance(annotations, BaseRaw): # allow either input
annotations = annotations.annotations
for key in ('EDGE', 'BAD'):
idx = np.where(annotations.description == '%s boundary' % key)[0]
assert len(idx) == n
annotations.delete(idx)
def assert_object_equal(a, b):
"""Assert two objects are equal."""
d = object_diff(a, b)
assert d == '', d
def _raw_annot(meas_date, orig_time):
from .. import Annotations, create_info
from ..annotations import _handle_meas_date
from ..io import RawArray
info = create_info(ch_names=10, sfreq=10.)
raw = RawArray(data=np.empty((10, 10)), info=info, first_samp=10)
if meas_date is not None:
meas_date = _handle_meas_date(meas_date)
raw.info['meas_date'] = meas_date
raw.info._check_consistency()
annot = Annotations([.5], [.2], ['dummy'], orig_time)
raw.set_annotations(annotations=annot)
return raw
def _get_data(x, ch_idx):
"""Get the (n_ch, n_times) data array."""
from ..evoked import Evoked
from ..io import BaseRaw
if isinstance(x, BaseRaw):
return x[ch_idx][0]
elif isinstance(x, Evoked):
return x.data[ch_idx]
def _check_snr(actual, desired, picks, min_tol, med_tol, msg, kind='MEG'):
"""Check the SNR of a set of channels."""
actual_data = _get_data(actual, picks)
desired_data = _get_data(desired, picks)
bench_rms = np.sqrt(np.mean(desired_data * desired_data, axis=1))
error = actual_data - desired_data
error_rms = np.sqrt(np.mean(error * error, axis=1))
np.clip(error_rms, 1e-60, np.inf, out=error_rms) # avoid division by zero
snrs = bench_rms / error_rms
# min tol
snr = snrs.min()
bad_count = (snrs < min_tol).sum()
msg = ' (%s)' % msg if msg != '' else msg
assert bad_count == 0, ('SNR (worst %0.2f) < %0.2f for %s/%s '
'channels%s' % (snr, min_tol, bad_count,
len(picks), msg))
# median tol
snr = np.median(snrs)
assert snr >= med_tol, ('%s SNR median %0.2f < %0.2f%s'
% (kind, snr, med_tol, msg))
def assert_meg_snr(actual, desired, min_tol, med_tol=500., chpi_med_tol=500.,
msg=None):
"""Assert channel SNR of a certain level.
Mostly useful for operations like Maxwell filtering that modify
MEG channels while leaving EEG and others intact.
"""
from ..io.pick import pick_types
picks = pick_types(desired.info, meg=True, exclude=[])
picks_desired = pick_types(desired.info, meg=True, exclude=[])
assert_array_equal(picks, picks_desired, err_msg='MEG pick mismatch')
chpis = pick_types(actual.info, meg=False, chpi=True, exclude=[])
chpis_desired = pick_types(desired.info, meg=False, chpi=True, exclude=[])
if chpi_med_tol is not None:
assert_array_equal(chpis, chpis_desired, err_msg='cHPI pick mismatch')
others = np.setdiff1d(np.arange(len(actual.ch_names)),
np.concatenate([picks, chpis]))
others_desired = np.setdiff1d(np.arange(len(desired.ch_names)),
np.concatenate([picks_desired,
chpis_desired]))
assert_array_equal(others, others_desired, err_msg='Other pick mismatch')
if len(others) > 0: # if non-MEG channels present
assert_allclose(_get_data(actual, others),
_get_data(desired, others), atol=1e-11, rtol=1e-5,
err_msg='non-MEG channel mismatch')
_check_snr(actual, desired, picks, min_tol, med_tol, msg, kind='MEG')
if chpi_med_tol is not None and len(chpis) > 0:
_check_snr(actual, desired, chpis, 0., chpi_med_tol, msg, kind='cHPI')
def assert_snr(actual, desired, tol):
"""Assert actual and desired arrays are within some SNR tolerance."""
with np.errstate(divide='ignore'): # allow infinite
snr = (linalg.norm(desired, ord='fro') /
linalg.norm(desired - actual, ord='fro'))
assert snr >= tol, '%f < %f' % (snr, tol)
def assert_stcs_equal(stc1, stc2):
"""Check that two STC are equal."""
assert_allclose(stc1.times, stc2.times)
assert_allclose(stc1.data, stc2.data)
assert_array_equal(stc1.vertices[0], stc2.vertices[0])
assert_array_equal(stc1.vertices[1], stc2.vertices[1])
assert_allclose(stc1.tmin, stc2.tmin)
assert_allclose(stc1.tstep, stc2.tstep)
def _dig_sort_key(dig):
"""Sort dig keys."""
return (dig['kind'], dig['ident'])
def assert_dig_allclose(info_py, info_bin, limit=None):
"""Assert dig allclose."""
from ..bem import fit_sphere_to_headshape
from ..io.constants import FIFF
# test dig positions
dig_py = sorted(info_py['dig'], key=_dig_sort_key)
dig_bin = sorted(info_bin['dig'], key=_dig_sort_key)
assert len(dig_py) == len(dig_bin)
for ii, (d_py, d_bin) in enumerate(zip(dig_py[:limit], dig_bin[:limit])):
for key in ('ident', 'kind', 'coord_frame'):
assert d_py[key] == d_bin[key]
assert_allclose(d_py['r'], d_bin['r'], rtol=1e-5, atol=1e-5,
err_msg='Failure on %s:\n%s\n%s'
% (ii, d_py['r'], d_bin['r']))
if any(d['kind'] == FIFF.FIFFV_POINT_EXTRA for d in dig_py):
r_bin, o_head_bin, o_dev_bin = fit_sphere_to_headshape(
info_bin, units='m', verbose='error')
r_py, o_head_py, o_dev_py = fit_sphere_to_headshape(
info_py, units='m', verbose='error')
assert_allclose(r_py, r_bin, atol=1e-6)
assert_allclose(o_dev_py, o_dev_bin, rtol=1e-5, atol=1e-6)
assert_allclose(o_head_py, o_head_bin, rtol=1e-5, atol=1e-6)
@contextmanager
def modified_env(**d):
"""Use a modified os.environ with temporarily replaced key/value pairs.
Parameters
----------
**kwargs : dict
The key/value pairs of environment variables to replace.
"""
orig_env = dict()
for key, val in d.items():
orig_env[key] = os.getenv(key)
if val is not None:
assert isinstance(val, str)
os.environ[key] = val
elif key in os.environ:
del os.environ[key]
try:
yield
finally:
for key, val in orig_env.items():
if val is not None:
os.environ[key] = val
elif key in os.environ:
del os.environ[key]
|
Teekuningas/mne-python
|
mne/utils/_testing.py
|
Python
|
bsd-3-clause
| 17,015
|
[
"Mayavi",
"VTK"
] |
50dad377214cfd3d024b84bf9703d3c0efedb64280566acda4eed2a672a1afd9
|
""" Defines a series of scripts for running server and maintenance
FLASK_APP=manage.py flask --help
(bogus comment line added to triger build)
"""
import copy
from datetime import datetime
import json
import os
import sys
import alembic.config
import click
from flask import url_for
from flask_migrate import Migrate
import redis
import requests
from sqlalchemy import func
from sqlalchemy.orm.exc import NoResultFound
from portal.audit import auditable_event
from portal.date_tools import FHIR_datetime
from portal.config.site_persistence import SitePersistence
from portal.extensions import db, user_manager
from portal.factories.app import create_app
from portal.models.clinical_constants import add_static_concepts
from portal.models.i18n_utils import (
build_pot_files,
compile_pos,
download_all_translations,
smartling_download,
smartling_upload,
)
from portal.models.intervention import add_static_interventions
from portal.models.organization import add_static_organization
from portal.models.qb_timeline import (
QBT,
invalidate_users_QBT,
update_users_QBT,
)
from portal.models.questionnaire_bank import (
QuestionnaireBank,
add_static_questionnaire_bank,
)
from portal.models.questionnaire_response import QuestionnaireResponse
from portal.models.relationship import add_static_relationships
from portal.models.research_study import (
BASE_RS_ID,
add_static_research_studies,
research_study_id_from_questionnaire,
)
from portal.models.role import ROLE, Role, add_static_roles
from portal.models.url_token import (
BadSignature,
SignatureExpired,
verify_token,
)
from portal.models.user import (
User,
flag_test,
permanently_delete_user,
suppress_email,
validate_email,
)
from portal.tasks import celery_beat_health_check
app = create_app()
MIGRATIONS_DIR = os.path.join(app.root_path, 'migrations')
migrate = Migrate(app, db, directory=MIGRATIONS_DIR)
def _run_alembic_command(args):
"""Helper to manage working directory and run given alembic commands"""
# Alembic looks for the alembic.ini file in CWD
# hop over there and then return to CWD
cwd = os.getcwd()
os.chdir(MIGRATIONS_DIR)
alembic.config.main(argv=args)
os.chdir(cwd) # restore cwd
def stamp_db():
"""Run the alembic command to stamp the db with the current head"""
# if the alembic_version table exists, this db has been stamped,
# don't update to head, as it would potentially skip steps.
if db.engine.dialect.has_table(db.engine.connect(), 'alembic_version'):
return
_run_alembic_command(['--raiseerr', 'stamp', 'head'])
def upgrade_db():
"""Run any outstanding migration scripts"""
_run_alembic_command(['--raiseerr', 'upgrade', 'head'])
def flush_cache():
"""Flush redis of all values.
Cached values may no longer correspond with new DB entries.
NB this may incur a significant performance hit as all cached
entries will be invalidated.
"""
if app.config.get('FLUSH_CACHE_ON_SYNC'):
r = redis.from_url(app.config['REDIS_URL'])
r.flushdb()
@app.cli.command()
def last_usage():
"""Returns number of seconds since last usage was recorded
NB in the event of no recorded usage, such as after a redis flush
a value of -1 will be returned
"""
from portal.usage_monitor import last_usage
seconds_old = last_usage() or -1
click.echo(seconds_old)
@app.cli.command()
def sync():
"""Synchronize database with latest schema and persistence data.
Idempotent function takes necessary steps to build tables, migrate
schema and run `seed`. Safe to run on existing or brand new databases.
To re/create the database, [delete and] create within the DBMS itself,
then invoke this function.
"""
if not db.engine.dialect.has_table(db.engine.connect(), 'alembic_version'):
db.create_all()
stamp_db()
flush_cache()
upgrade_db()
seed()
@click.option(
'--keep_unmentioned', '-k', default=False,
help='Keep orgs and interventions not mentioned in persistence file')
@app.cli.command(name="seed")
def seed_command(keep_unmentioned):
"""Seed database with required data"""
seed(keep_unmentioned)
def seed(keep_unmentioned=False):
"""Actual seed function
NB this is defined separately so it can also be called internally,
i.e. from sync
"""
# Request context necessary for generating data from own HTTP APIs
with app.test_request_context():
add_static_concepts()
add_static_interventions()
add_static_organization()
add_static_questionnaire_bank()
add_static_relationships()
add_static_roles()
add_static_research_studies()
db.session.commit()
# import site export file if found
SitePersistence(target_dir=None).import_(
keep_unmentioned=keep_unmentioned)
@click.option('--directory', '-d', default=None, help="Export directory")
@click.option(
'--staging_exclusion', '-x', default=False, is_flag=True,
help="Staging Exclusions Export")
@app.cli.command()
def export_site(directory, staging_exclusion):
"""Generate JSON file containing dynamic site config
:param directory: used to name a non-default target directory for export
files
:param staging_exclusion: set only if persisting exclusions to retain
on staging when pulling over production data.
Portions of site configuration live in the database, such as
Organizations and Access Strategies. Generate a single export
file for migration of this data to other instances of the service.
NB the seed command imports the data file if found, along with
other static data.
"""
if staging_exclusion and not directory:
directory = app.config.get("PERSISTENCE_EXCLUSIONS_DIR")
SitePersistence(target_dir=directory).export(
staging_exclusion=staging_exclusion)
@click.option('--directory', '-d', default=None, help="Import directory")
@app.cli.command()
def import_site_exclusions(directory):
"""Import serialized exclusions (saved on stage prior to prod db overwrite)
:param directory: used to name a non-default target directory for import
files
"""
if not directory:
directory = app.config.get("PERSISTENCE_EXCLUSIONS_DIR")
SitePersistence(target_dir=directory).import_(
staging_exclusion=True, keep_unmentioned=True)
@click.option('--email', '-e', help="email address for new user")
@click.option('--role', '-r', help="Comma separated role(s) for new user")
@click.option('--password', '-p', help="password for new user")
@app.cli.command()
def add_user(email, role, password):
"""Add new user as specified """
validate_email(email)
if not password or len(str(password)) < 5:
raise ValueError("requires a password")
pw = user_manager.hash_password(password)
user = User(email=email, password=pw)
db.session.add(user)
roles = role.split(',') if role else []
try:
role_list = [
Role.query.filter_by(name=r).one() for r in roles]
user.update_roles(role_list, acting_user=user)
except NoResultFound:
raise ValueError(
"one or more roles ill defined {}".format(roles))
db.session.commit()
auditable_event(
"new account generated (via cli) for {}".format(user),
user_id=user.id, subject_id=user.id, context='account')
@click.option('--email', '-e', help="target user email for password reset")
@click.option('--password', '-p', help="new password")
@click.option(
'--actor', '-a',
help='Email of user taking this action (must be admin)'
)
@app.cli.command()
def password_reset(email, password, actor):
"""Reset given user's password """
try:
acting_user = User.query.filter(
func.lower(User.email) == actor.lower()).one()
except NoResultFound:
raise ValueError("email for acting user <{}> not found".format(actor))
try:
target_user = User.query.filter(
func.lower(User.email) == email.lower()).one()
except NoResultFound:
raise ValueError("email for target user not found")
if not acting_user.has_role(ROLE.ADMIN.value):
raise ValueError("Actor must be an admin")
if not password or len(str(password)) < 8:
raise ValueError("requires a valid password")
target_user.password = user_manager.hash_password(password)
db.session.commit()
auditable_event(
"cli password reset for {}".format(target_user),
user_id=acting_user.id, subject_id=target_user.id, context='account')
@click.option('--email', '-e', help='Email of user to purge.')
@click.option(
'--actor', '-a',
help='Email of user to act as.',
prompt=(
"\n\nWARNING!!!\n\n"
" This will permanently delete the target user and all their related"
" data.\n"
" If you want to continue,"
" enter a valid user email as the acting party for our records")
)
@app.cli.command()
def purge_user(email, actor):
"""Purge the given user from the system"""
permanently_delete_user(email, actor=actor)
@click.argument('token')
@app.cli.command()
def token_details(token):
valid_seconds = app.config.get(
'TOKEN_LIFE_IN_DAYS') * 24 * 3600
try:
user_id = verify_token(token, valid_seconds)
except SignatureExpired:
click.echo("EXPIRED token (older than {} seconds)".format(
valid_seconds))
except BadSignature:
click.echo("INVALID token")
else:
click.echo("Valid token for user_id {}".format(user_id))
@app.cli.command()
def mark_test():
"""Designate all current users as test users"""
flag_test()
@app.cli.command()
def compile_po_files():
"""Compile PO files to MO files"""
compile_pos()
click.echo("Compiled backend PO files to MO files")
@app.cli.command()
def translation_upload():
"""Update .pot file on Smartling
Creates a new .pot file, updates the file with relevant DB entries, then
POSTs said .pot file to Smartling via their API
"""
smartling_upload()
@app.cli.command()
def extract_i18n():
"""Update .pot file on Smartling
Creates a new .pot file, updates the file with relevant DB entries
"""
build_pot_files()
@click.option('--language', '-l', help='language code (e.g. en_US).')
@click.option('--state', '-s', help='Translation state', type=click.Choice([
'pseudo',
'pending',
'published',
'contextMatchingInstrumented',
]))
@app.cli.command()
def translation_download(language, state):
"""Download .po file(s) from Smartling
GETs the .po file for the specified language from Smartling via their API.
If no language is specified, all available translations will be downloaded.
After download, .po file(s) are compiled into .mo file(s) using pybabel
"""
default_state = 'pending'
if app.config['SYSTEM_TYPE'].lower() == 'production':
default_state = 'published'
state = state or default_state
click.echo(
'Downloading {state} translations from Smartling'.format(state=state))
smartling_download(state=state, language=language)
@click.option('--state', '-s', help='Translation state', type=click.Choice([
'pseudo',
'pending',
'published',
'contextMatchingInstrumented',
]))
@app.cli.command()
def download_translations(state):
default_state = 'pending'
if app.config['SYSTEM_TYPE'].lower() == 'production':
default_state = 'published'
state = state or default_state
click.echo(
'Downloading {state} translations from every Smartling project'.format(state=state)
)
download_all_translations(state=state)
@click.option(
'--config_key',
'-c',
help='Return a single config value, or empty string if value is None'
)
@app.cli.command()
def config(config_key):
"""List current flask configuration values in JSON"""
if config_key:
# Remap None values to an empty string
print(app.config.get(config_key, '') or '')
return
print(json.dumps(
# Skip un-serializable values
{k: v for k, v in app.config.items() if isinstance(v, str)},
indent=2,
))
@app.cli.command()
def set_celery_beat_healthy():
return celery_beat_health_check()
@app.cli.command()
def healthcheck():
"""Calls the healthcheck API"""
result = requests.get(
url_for('check')
)
print(json.dumps(result.json(), indent=4))
# Return success (0) if passing status code
if result.ok:
return sys.exit()
# Healthcheck failed. Return a failing status code
return sys.exit(result.status_code)
@click.option('--email', '-e', help='Email address wanting no communication')
@click.option(
'--actor', '-a',
help='email address of user taking this action, for audit trail'
)
@app.cli.command()
def no_email(email, actor):
"""Suppress all future emails for user (beyond p/w reset)"""
suppress_email(email, actor)
@click.option('--qnr_id', help="Questionnaire Response ID", required=True)
@click.option(
'--authored',
required=True,
help="new datetime for qnr authored, format example: 2019-04-09 15:14:43")
@click.option(
'--actor',
required=True,
help='email address of user taking this action, for audit trail'
)
@app.cli.command()
def update_qnr_authored(qnr_id, authored, actor):
"""Modify authored date on given Questionnaire Response ID"""
try:
acting_user = User.query.filter(
func.lower(User.email) == actor.lower()).one()
except NoResultFound:
raise ValueError("email for acting user <{}> not found".format(actor))
qnr = QuestionnaireResponse.query.get(qnr_id)
if not qnr:
raise ValueError(
"Questionnaire Response {qnr_id} not found".format(qnr_id))
acting_user.check_role(permission='edit', other_id=qnr.subject_id)
document = copy.deepcopy(qnr.document)
new_authored = FHIR_datetime.parse(authored)
old_authored = FHIR_datetime.parse(document['authored'])
document['authored'] = datetime.strftime(new_authored, "%Y-%m-%dT%H:%M:%SZ")
qnr.document = document
# Determine research study if qb_id is currently set, default to 0
rs_id = 0
if qnr.questionnaire_bank_id:
qb = QuestionnaireBank.query.get(qnr.questionnaire_bank_id)
rs_id = research_study_id_from_questionnaire(
qb.questionnaires[0].name)
# Must clear the qb_id and iteration in case this authored date
# change moves the QNR to a different visit.
qnr.questionnaire_bank_id = None
qnr.qb_iteration = None
db.session.commit()
# Invalidate timeline as this probably altered the status
invalidate_users_QBT(qnr.subject_id, research_study_id=rs_id)
message = (
"Updated QNR {qnr_id} authored from {old_authored} to "
"{new_authored}".format(
qnr_id=qnr_id, old_authored=old_authored,
new_authored=new_authored))
auditable_event(
message=message,
context="assessment",
user_id=acting_user.id,
subject_id=qnr.subject_id)
print(message)
@click.option('--src_id', type=int, help="Source Patient ID (WILL BE DELETED!)")
@click.option('--tgt_id', type=int, help="Target Patient ID")
@click.option(
'--actor',
help='email address of user taking this action, for audit trail'
)
@app.cli.command()
def merge_users(src_id, tgt_id, actor):
"""Copy useful portion of source to target user and delete source"""
from flask import current_app
from portal.models.audit import Audit
from portal.models.user import internal_identifier_systems
from portal.models.tou import ToU
try:
acting_user = User.query.filter(
func.lower(User.email) == actor.lower()).one()
except NoResultFound:
raise ValueError("email for acting user <{}> not found".format(actor))
acting_user.check_role(permission='edit', other_id=src_id)
acting_user.check_role(permission='edit', other_id=tgt_id)
src_user = User.query.get(src_id)
tgt_user = User.query.get(tgt_id)
if not all((src_user, tgt_user)) or (
src_user.birthdate != tgt_user.birthdate):
raise ValueError("Birth dates don't match; can't continue")
if src_user.auth_providers.count() > 0:
raise ValueError("extend to include auth_providers")
if src_user.identifiers != tgt_user.identifiers and (
click.confirm("Add identifiers \n\t{} \nto \n\t{}".format(
"\n\t".join((str(i) for i in src_user.identifiers if
i.system not in internal_identifier_systems)),
"\n\t".join((str(i) for i in tgt_user.identifiers if
i.system not in internal_identifier_systems))))):
tgt_user.merge_others_relationship(src_user, '_identifiers')
if src_user.organizations != tgt_user.organizations and (
click.confirm("Add organizations \n\t{} \nto \n\t{}".format(
"\n\t".join((str(i) for i in src_user.organizations)),
"\n\t".join((str(i) for i in tgt_user.organizations))))):
tgt_user.merge_others_relationship(src_user, 'organizations')
if src_user.roles != tgt_user.roles:
only_on_tgt = [r for r in tgt_user.roles if r not in src_user.roles]
if all((
i for i in only_on_tgt if i.name in
current_app.config['PRE_REGISTERED_ROLES'])):
if click.confirm(
"Remove role(s) `{}` only found on target user".format(
",".join((j.name for j in only_on_tgt)))):
tgt_user.remove_pre_registered_roles()
else:
raise ValueError("mismatch on roles beyond pre-registered")
if src_user.valid_consents != tgt_user.valid_consents and (
click.confirm("Add consents \n\t{} \nto \n\t{}".format(
"\n\t".join((str(i) for i in src_user.valid_consents)),
"\n\t".join((str(i) for i in tgt_user.valid_consents))))):
tgt_user.merge_others_relationship(src_user, '_consents')
src_tous = ToU.query.join(Audit).filter(Audit.subject_id == src_user.id)
tgt_tous = ToU.query.join(Audit).filter(Audit.subject_id == tgt_user.id)
if src_tous.count() and (
click.confirm("Add ToUs \n\t{} \nto \n\t{}".format(
"\n\t".join((str(i) for i in src_tous)),
"\n\t".join((str(i) for i in tgt_tous))))):
for tou in src_tous:
tou.audit.subject_id = tgt_user.id
if src_user.questionnaire_responses.count() and (
click.confirm(
"Add questionnaire_responses \n\t{} \nto \n\t{}".format(
"\n\t".join(
(str(i) for i in src_user.questionnaire_responses)),
"\n\t".join(
(str(i) for i in tgt_user.questionnaire_responses))
))):
tgt_user.merge_others_relationship(src_user, 'questionnaire_responses')
invalidate_users_QBT(tgt_user.id, research_study_id=0)
src_email = src_user.email # capture, as it changes on delete
replace_email = False
if click.confirm("Replace email {} with {}?".format(
tgt_user.email, src_email)):
# must wait till delete_user masks existing
replace_email = True
if click.confirm("Replace first name {} with {}".format(
tgt_user.first_name, src_user.first_name)):
tgt_user.first_name = src_user.first_name
if click.confirm("Replace last name {} with {}".format(
tgt_user.last_name, src_user.last_name)):
tgt_user.last_name = src_user.last_name
if click.confirm("Replace password {} with {}".format(
tgt_user.password, src_user.password)):
tgt_user.password = src_user.password
src_user.delete_user(acting_user=acting_user)
if replace_email:
tgt_user.email = src_email
message = "Merged user {} into {} ".format(src_id, tgt_id)
auditable_event(
message=message,
context="account",
user_id=acting_user.id,
subject_id=tgt_id)
print(message)
def capture_patient_state(patient_id):
"""Call to capture QBT and QNR state for patient, used for before/after"""
qnrs = QuestionnaireResponse.qnr_state(patient_id)
tl = QBT.timeline_state(patient_id)
return {'qnrs': qnrs, 'timeline': tl}
def present_before_after_state(user_id, external_study_id, before_state):
from portal.dict_tools import dict_compare
after_qnrs = QuestionnaireResponse.qnr_state(user_id)
after_timeline = QBT.timeline_state(user_id)
# Compare results
added_q, removed_q, modified_q, same = dict_compare(
after_qnrs, before_state['qnrs'])
assert not added_q
assert not removed_q
added_t, removed_t, modified_t, same = dict_compare(
after_timeline, before_state['timeline'])
if any((added_t, removed_t, modified_t, modified_q)):
print(f"\nPatient {user_id} ({external_study_id}):")
if modified_q:
print("\tModified QNRs (old, new)")
for mod in sorted(modified_q):
print(f"\t\t{mod} {modified_q[mod][1]} ==>"
f" {modified_q[mod][0]}")
if added_t:
print("\tAdditional timeline rows:")
for item in sorted(added_t):
print(f"\t\t{item} {after_timeline[item]}")
if removed_t:
print("\tRemoved timeline rows:")
for item in sorted(removed_t):
print(
f"\t\t{item} "
f"{before_state['timeline'][item]}")
if modified_t:
print(f"\tModified timeline rows: (old, new)")
for item in sorted(modified_t):
print(f"\t\t{item}")
print(f"\t\t\t{modified_t[item][1]} ==> {modified_t[item][0]}")
@app.cli.command()
@click.option(
'--correct_overlaps', '-c', default=False, is_flag=True,
help="Correct overlaps moving previous expired prior to subsequent start")
@click.option(
'--reprocess_qnrs', '-r', default=False, is_flag=True,
help="When correcting, also reprocess all QNRs for affected patients")
def find_overlaps(correct_overlaps, reprocess_qnrs):
from portal.models.qb_timeline import check_for_overlaps
from portal.models.user import patients_query
admin = User.query.filter(User.email == '__system__').one()
query = patients_query(
acting_user=admin,
include_test_role=False,
include_deleted=False)
for patient in query:
qbt_rows = QBT.query.filter(
QBT.user_id == patient.id).filter(
QBT.research_study_id == BASE_RS_ID).order_by(
QBT.at, QBT.id).all()
# Check for overlaps prints out any found with given flag
if check_for_overlaps(
qbt_rows, cli_presentation=True) and correct_overlaps:
# Reprocess w/ adjusting expired, report differences
b4 = capture_patient_state(patient.id)
if reprocess_qnrs:
# Extends runtime and makes for noisy audit logs.
# Furthermore in practice no QNRs require updates as
# expiration isn't moved if QNRs were posted in the overlap.
QuestionnaireResponse.purge_qb_relationship(
subject_id=patient.id,
research_study_id=0,
acting_user_id=admin.id,
)
update_users_QBT(
patient.id, research_study_id=0, invalidate_existing=True)
present_before_after_state(
patient.id, patient.external_study_id, b4)
@click.option('--org_id', help="Organization (site) ID", required=True)
@click.option(
'--retired',
required=True,
help="datetime for site's current protocol expiration,"
" format example: 2019-04-09 15:14:43")
@app.cli.command()
def preview_site_update(org_id, retired):
"""Preview Timeline changes for affected users
As research protocol changes can affect patients' timeline (for example
if the new protocol overlaps with visits, i.e. quarterly time points,
and user's submission prevents inclusion of new overlapped visits),
capture the organization's patients' timeline state before and after the
protocol change, and generate a diff report.
"""
from portal.models.organization import (
Organization,
OrganizationResearchProtocol,
)
from portal.models.research_protocol import ResearchProtocol
from portal.models.user import patients_query
if app.config['SYSTEM_TYPE'].lower() == 'production':
raise RuntimeError("Not to be run on prod: changes user records")
organization = Organization.query.get(org_id)
admin = User.query.filter(User.email == '__system__').one()
query = patients_query(
acting_user=admin,
include_test_role=False,
include_deleted=False,
requested_orgs=[org_id])
# Capture state for all potentially affected patients
patient_state = {}
for patient in query:
patient_state[patient.id] = capture_patient_state(patient.id)
# Update the org's research protocol as requested - assume to the latest
previous_rp = organization.research_protocols[-1]
assert previous_rp.name == 'IRONMAN v3'
latest_rp = ResearchProtocol.query.filter(
ResearchProtocol.name == 'IRONMAN v5').one()
previous_org_rp = OrganizationResearchProtocol.query.filter(
OrganizationResearchProtocol.research_protocol_id ==
previous_rp.id).filter(
OrganizationResearchProtocol.organization_id == org_id).one()
previous_org_rp.retired_as_of = retired
new_org_rp = OrganizationResearchProtocol(
research_protocol=latest_rp,
organization=organization)
db.session.add(new_org_rp)
db.session.commit()
print(f"Extending Research Protocols for {organization}")
print(f" - Adding RP {latest_rp.name}")
print(f" - {previous_rp.name} now retired as of {retired}")
print("-"*80)
# With new RP in place, cycle through patients, purge and
# refresh timeline and QNR data, and report any diffs
for patient in query:
QuestionnaireResponse.purge_qb_relationship(
subject_id=patient.id,
research_study_id=0,
acting_user_id=admin.id,
)
update_users_QBT(
patient.id, research_study_id=0, invalidate_existing=True)
present_before_after_state(
patient.id, patient.external_study_id, patient_state[patient.id])
# Restore organization to pre-test RPs
db.session.delete(new_org_rp)
db.session.commit()
|
uwcirg/true_nth_usa_portal
|
manage.py
|
Python
|
bsd-3-clause
| 26,824
|
[
"VisIt"
] |
335885586bbdde0d9e2e086282ede5938f0fe983ed364bf7dd5d246038aeaaed
|
import numpy as np
import copy
import numpy.linalg as la
import summary_output as SUMMARY
import robust as ROBUST
import user_output as USER
from utils import spdot, sphstack, RegressionPropsY, RegressionPropsVM
__author__ = "Luc Anselin luc.anselin@asu.edu, David C. Folch david.folch@asu.edu, Jing Yao jingyao@asu.edu"
__all__ = ["TSLS"]
class BaseTSLS(RegressionPropsY, RegressionPropsVM):
"""
Two stage least squares (2SLS) (note: no consistency checks,
diagnostics or constant added)
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x); cannot be
used in combination with h
h : array
Two dimensional array with n rows and one column for each
exogenous variable to use as instruments (note: this
can contain variables from x); cannot be used in
combination with q
robust : string
If 'white', then a White consistent estimator of the
variance-covariance matrix is given. If 'hac', then a
HAC consistent estimator of the variance-covariance
matrix is given. Default set to None.
gwk : pysal W object
Kernel spatial weights needed for HAC estimation. Note:
matrix must have ones along the main diagonal.
sig2n_k : boolean
If True, then use n-k to estimate sigma^2. If False, use n.
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
kstar : integer
Number of endogenous variables.
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
z : array
nxk array of variables (combination of x and yend)
h : array
nxl array of instruments (combination of x and q)
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
utu : float
Sum of squared residuals
sig2 : float
Sigma squared used in computations
sig2n : float
Sigma squared (computed with n in the denominator)
sig2n_k : float
Sigma squared (computed with n-k in the denominator)
hth : float
H'H
hthi : float
(H'H)^-1
varb : array
(Z'H (H'H)^-1 H'Z)^-1
zthhthi : array
Z'H(H'H)^-1
pfora1a2 : array
n(zthhthi)'varb
Examples
--------
>>> import numpy as np
>>> import pysal
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),'r')
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
>>> X = np.hstack((np.ones(y.shape),X))
>>> yd = []
>>> yd.append(db.by_col("HOVAL"))
>>> yd = np.array(yd).T
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
>>> reg = BaseTSLS(y, X, yd, q=q)
>>> print reg.betas
[[ 88.46579584]
[ 0.5200379 ]
[ -1.58216593]]
>>> reg = BaseTSLS(y, X, yd, q=q, robust="white")
"""
def __init__(self, y, x, yend, q=None, h=None,\
robust=None, gwk=None, sig2n_k=False):
if issubclass(type(q), np.ndarray) and issubclass(type(h), np.ndarray):
raise Exception, "Please do not provide 'q' and 'h' together"
if q==None and h==None:
raise Exception, "Please provide either 'q' or 'h'"
self.y = y
self.n = y.shape[0]
self.x = x
self.kstar = yend.shape[1]
z = sphstack(self.x,yend) # including exogenous and endogenous variables
if type(h).__name__ not in ['ndarray', 'csr_matrix']:
h = sphstack(self.x,q) # including exogenous variables and instrument
self.z = z
self.h = h
self.q = q
self.yend = yend
self.k = z.shape[1] # k = number of exogenous variables and endogenous variables
hth = spdot(h.T,h)
hthi = la.inv(hth)
zth = spdot(z.T,h)
hty = spdot(h.T,y)
factor_1 = np.dot(zth,hthi)
factor_2 = np.dot(factor_1,zth.T)
varb = la.inv(factor_2) # this one needs to be in cache to be used in AK
factor_3 = np.dot(varb,factor_1)
betas = np.dot(factor_3,hty)
self.betas = betas
self.varb = varb
self.zthhthi = factor_1
# predicted values
self.predy = spdot(z,betas)
# residuals
u = y - self.predy
self.u = u
# attributes used in property
self.hth = hth # Required for condition index
self.hthi =hthi # Used in error models
if robust:
self.vm = ROBUST.robust_vm(reg=self, gwk=gwk)
self._cache = {}
if sig2n_k:
self.sig2 = self.sig2n_k
else:
self.sig2 = self.sig2n
@property
def pfora1a2(self):
if 'pfora1a2' not in self._cache:
self._cache['pfora1a2'] = self.n*np.dot(self.zthhthi.T, self.varb)
return self._cache['pfora1a2']
@property
def vm(self):
if 'vm' not in self._cache:
self._cache['vm'] = np.dot(self.sig2, self.varb)
return self._cache['vm']
class TSLS(BaseTSLS):
"""
Two stage least squares with results and diagnostics.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : pysal W object
Spatial weights object (required if running spatial
diagnostics)
robust : string
If 'white', then a White consistent estimator of the
variance-covariance matrix is given. If 'hac', then a
HAC consistent estimator of the variance-covariance
matrix is given. Default set to None.
gwk : pysal W object
Kernel spatial weights needed for HAC estimation. Note:
matrix must have ones along the main diagonal.
sig2n_k : boolean
If True, then use n-k to estimate sigma^2. If False, use n.
spat_diag : boolean
If True, then compute Anselin-Kelejian test (requires w)
vm : boolean
If True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_w : string
Name of weights matrix for use in output
name_gwk : string
Name of kernel weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
kstar : integer
Number of endogenous variables.
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
z : array
nxk array of variables (combination of x and yend)
h : array
nxl array of instruments (combination of x and q)
robust : string
Adjustment for robust standard errors
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
utu : float
Sum of squared residuals
sig2 : float
Sigma squared used in computations
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
ak_test : tuple
Anselin-Kelejian test; tuple contains the pair (statistic,
p-value)
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_z : list of strings
Names of exogenous and endogenous variables for use in
output
name_q : list of strings
Names of external instruments
name_h : list of strings
Names of all instruments used in ouput
name_w : string
Name of weights matrix for use in output
name_gwk : string
Name of kernel weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
sig2n : float
Sigma squared (computed with n in the denominator)
sig2n_k : float
Sigma squared (computed with n-k in the denominator)
hth : float
H'H
hthi : float
(H'H)^-1
varb : array
(Z'H (H'H)^-1 H'Z)^-1
zthhthi : array
Z'H(H'H)^-1
pfora1a2 : array
n(zthhthi)'varb
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),'r')
Extract the CRIME column (crime rates) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Extract INC (income) vector from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this model adds a vector of ones to the
independent variables passed in, but this can be overridden by passing
constant=False.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
In this case we consider HOVAL (home value) is an endogenous regressor.
We tell the model that this is so by passing it in a different parameter
from the exogenous variables (x).
>>> yd = []
>>> yd.append(db.by_col("HOVAL"))
>>> yd = np.array(yd).T
Because we have endogenous variables, to obtain a correct estimate of the
model, we need to instrument for HOVAL. We use DISCBD (distance to the
CBD) for this and hence put it in the instruments parameter, 'q'.
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
We are all set with the preliminars, we are good to run the model. In this
case, we will need the variables (exogenous and endogenous) and the
instruments. If we want to have the names of the variables printed in the
output summary, we will have to pass them in as well, although this is optional.
>>> reg = TSLS(y, X, yd, q, name_x=['inc'], name_y='crime', name_yend=['hoval'], name_q=['discbd'], name_ds='columbus')
>>> print reg.betas
[[ 88.46579584]
[ 0.5200379 ]
[ -1.58216593]]
"""
def __init__(self, y, x, yend, q,\
w=None,\
robust=None, gwk=None, sig2n_k=False,\
spat_diag=False,\
vm=False, name_y=None, name_x=None,\
name_yend=None, name_q=None,\
name_w=None, name_gwk=None, name_ds=None):
n = USER.check_arrays(y, x, yend, q)
USER.check_y(y, n)
USER.check_weights(w, y)
USER.check_robust(robust, gwk)
USER.check_spat_diag(spat_diag, w)
x_constant = USER.check_constant(x)
BaseTSLS.__init__(self, y=y, x=x_constant, yend=yend, q=q,\
robust=robust, gwk=gwk, sig2n_k=sig2n_k)
self.title = "TWO STAGE LEAST SQUARES"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_yend = USER.set_name_yend(name_yend, yend)
self.name_z = self.name_x + self.name_yend
self.name_q = USER.set_name_q(name_q, q)
self.name_h = USER.set_name_h(self.name_x, self.name_q)
self.robust = USER.set_robust(robust)
self.name_w = USER.set_name_w(name_w, w)
self.name_gwk = USER.set_name_w(name_gwk, gwk)
SUMMARY.TSLS(reg=self, vm=vm, w=w, spat_diag=spat_diag)
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
import numpy as np
import pysal
db = pysal.open(pysal.examples.get_path("columbus.dbf"),'r')
y_var = 'CRIME'
y = np.array([db.by_col(y_var)]).reshape(49,1)
x_var = ['INC']
x = np.array([db.by_col(name) for name in x_var]).T
yd_var = ['HOVAL']
yd = np.array([db.by_col(name) for name in yd_var]).T
q_var = ['DISCBD']
q = np.array([db.by_col(name) for name in q_var]).T
w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
w.transform = 'r'
tsls = TSLS(y, x, yd, q, w=w, spat_diag=True, name_y=y_var, name_x=x_var, name_yend=yd_var, name_q=q_var, name_ds='columbus', name_w='columbus.gal')
print tsls.summary
|
AlanZatarain/pysal
|
pysal/spreg/twosls.py
|
Python
|
bsd-3-clause
| 18,699
|
[
"COLUMBUS"
] |
62f2b2fc9582d09b878d7c243c42e798336ff5bd549e3c38159aae268e7b4c2d
|
import gen_utils
from module_base import ModuleBase
from module_mixins import NoConfigModuleMixin
import module_utils
import wx
import vtk
import vtkdevide
class modifyHomotopySlow(NoConfigModuleMixin, ModuleBase):
"""
WARNING, WARNING, DANGER WILL ROBINSON: this filter exists purely
for experimental purposes. If you really want to use
modifyHomotopy, use the module in modules.Filters (also part of
'Morphology'). This filter implements the modification according
to very basic math and is dog-slow. In addition, it's throw-away
code.
Modifies homotopy of input image I so that the only minima will
be at the user-specified seed-points or marker image, all other
minima will be suppressed and ridge lines separating minima will
be preserved.
Either the seed-points or the marker image (or both) can be used.
The marker image has to be >1 at the minima that are to be enforced
and 0 otherwise.
This module is often used as a pre-processing step to ensure that
the watershed doesn't over-segment.
$Revision: 1.1 $
"""
def __init__(self, module_manager):
# initialise our base class
ModuleBase.__init__(self, module_manager)
NoConfigModuleMixin.__init__(self)
# these will be our markers
self._inputPoints = None
# we can't connect the image input directly to the masksource,
# so we have to keep track of it separately.
self._inputImage = None
self._inputImageObserverID = None
# we need to modify the mask (I) as well. The problem with a
# ProgrammableFilter is that you can't request GetOutput() before
# the input has been set...
self._maskSource = vtk.vtkProgrammableSource()
self._maskSource.SetExecuteMethod(self._maskSourceExecute)
# we'll use this to synthesise a volume according to the seed points
self._markerSource = vtk.vtkProgrammableSource()
self._markerSource.SetExecuteMethod(self._markerSourceExecute)
# second input is J (the marker)
# we'll use this to change the markerImage into something we can use
self._imageThreshold = vtk.vtkImageThreshold()
# everything equal to or above 1.0 will be "on"
self._imageThreshold.ThresholdByUpper(1.0)
self._imageThresholdObserverID = self._imageThreshold.AddObserver(
'EndEvent', self._observerImageThreshold)
self._viewFrame = self._createViewFrame(
{'Module (self)' : self})
# we're not going to give imageErode any input... that's going to
# to happen manually in the execute_module function :)
self._imageErode = vtk.vtkImageContinuousErode3D()
self._imageErode.SetKernelSize(3,3,3)
module_utils.setup_vtk_object_progress(self, self._imageErode,
'Performing greyscale 3D erosion')
self._sup = vtk.vtkImageMathematics()
self._sup.SetOperationToMax()
self._sup.SetInput1(self._imageErode.GetOutput())
self._sup.SetInput2(self._maskSource.GetStructuredPointsOutput())
# pass the data down to the underlying logic
self.config_to_logic()
# and all the way up from logic -> config -> view to make sure
self.syncViewWithLogic()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
NoConfigModuleMixin.close(self)
ModuleBase.close(self)
#
self._imageThreshold.RemoveObserver(self._imageThresholdObserverID)
# get rid of our reference
del self._markerSource
del self._maskSource
del self._imageThreshold
del self._sup
del self._imageErode
def get_input_descriptions(self):
return ('VTK Image Data', 'Minima points', 'Minima image')
def set_input(self, idx, inputStream):
if idx == 0:
if inputStream != self._inputImage:
# if we have a different image input, the seeds will have to
# be rebuilt!
self._markerSource.Modified()
# and obviously the masksource has to know that its "input"
# has changed
self._maskSource.Modified()
if inputStream:
# we have to add an observer
s = inputStream.GetSource()
if s:
self._inputImageObserverID = s.AddObserver(
'EndEvent', self._observerInputImage)
else:
# if we had an observer, remove it
if self._inputImage:
s = self._inputImage.GetSource()
if s and self._inputImageObserverID:
s.RemoveObserver(
self._inputImageObserverID)
self._inputImageObserverID = None
# finally store the new data
self._inputImage = inputStream
elif idx == 1:
if inputStream != self._inputPoints:
# check that the inputStream is either None (meaning
# disconnect) or a valid type
try:
if inputStream != None and \
inputStream.devideType != 'namedPoints':
raise TypeError
except (AttributeError, TypeError):
raise TypeError, 'This input requires a points-type'
if self._inputPoints:
self._inputPoints.removeObserver(
self._observerInputPoints)
self._inputPoints = inputStream
if self._inputPoints:
self._inputPoints.addObserver(self._observerInputPoints)
# the input points situation has changed, make sure
# the marker source knows this...
self._markerSource.Modified()
# as well as the mask source of course
self._maskSource.Modified()
else:
if inputStream != self._imageThreshold.GetInput():
self._imageThreshold.SetInput(inputStream)
# we have a different inputMarkerImage... have to recalc
self._markerSource.Modified()
self._maskSource.Modified()
def get_output_descriptions(self):
return ('Modified VTK Image Data', 'I input', 'J input')
def get_output(self, idx):
if idx == 0:
return self._sup.GetOutput()
elif idx == 1:
return self._maskSource.GetStructuredPointsOutput()
else:
return self._markerSource.GetStructuredPointsOutput()
def logic_to_config(self):
pass
def config_to_logic(self):
pass
def view_to_config(self):
pass
def config_to_view(self):
pass
def execute_module(self):
# FIXME: if this module ever becomes anything other than an experiment, build
# this logic into yet another ProgrammableSource
# make sure marker is up to date
self._markerSource.GetStructuredPointsOutput().Update()
self._maskSource.GetStructuredPointsOutput().Update()
tempJ = vtk.vtkStructuredPoints()
tempJ.DeepCopy(self._markerSource.GetStructuredPointsOutput())
self._imageErode.SetInput(tempJ)
self._diff = vtk.vtkImageMathematics()
self._diff.SetOperationToSubtract()
self._accum = vtk.vtkImageAccumulate()
self._accum.SetInput(self._diff.GetOutput())
# now begin our loop
stable = False
while not stable:
# do erosion, get supremum of erosion and mask I
self._sup.GetOutput().Update()
# compare this result with tempJ
self._diff.SetInput1(tempJ)
self._diff.SetInput2(self._sup.GetOutput())
self._accum.Update()
print "%f == %f ?" % (self._accum.GetMin()[0], self._accum.GetMax()[0])
if abs(self._accum.GetMin()[0] - self._accum.GetMax()[0]) < 0.0001:
stable = True
else:
# not stable yet...
print "Trying again..."
tempJ.DeepCopy(self._sup.GetOutput())
def _markerSourceExecute(self):
imageI = self._inputImage
if imageI:
imageI.Update()
# setup and allocate J output
outputJ = self._markerSource.GetStructuredPointsOutput()
# _dualGreyReconstruct wants inputs the same with regards to
# dimensions, origin and type, so this is okay.
outputJ.CopyStructure(imageI)
outputJ.AllocateScalars()
# we need this to build up J
minI, maxI = imageI.GetScalarRange()
mi = self._imageThreshold.GetInput()
if mi:
if mi.GetOrigin() == outputJ.GetOrigin() and \
mi.GetExtent() == outputJ.GetExtent():
self._imageThreshold.SetInValue(minI)
self._imageThreshold.SetOutValue(maxI)
self._imageThreshold.SetOutputScalarType(imageI.GetScalarType())
self._imageThreshold.GetOutput().SetUpdateExtentToWholeExtent()
self._imageThreshold.Update()
outputJ.DeepCopy(self._imageThreshold.GetOutput())
else:
vtk.vtkOutputWindow.GetInstance().DisplayErrorText(
'modifyHomotopy: marker input should be same dimensions as image input!')
# we can continue as if we only had seeds
scalars = outputJ.GetPointData().GetScalars()
scalars.FillComponent(0, maxI)
else:
# initialise all scalars to maxI
scalars = outputJ.GetPointData().GetScalars()
scalars.FillComponent(0, maxI)
# now go through all seed points and set those positions in
# the scalars to minI
if self._inputPoints:
for ip in self._inputPoints:
x,y,z = ip['discrete']
outputJ.SetScalarComponentFromDouble(x, y, z, 0, minI)
def _maskSourceExecute(self):
inputI = self._inputImage
if inputI:
inputI.Update()
self._markerSource.Update()
outputJ = self._markerSource.GetStructuredPointsOutput()
# we now have an outputJ
if not inputI.GetScalarPointer() or \
not outputJ.GetScalarPointer() or \
not inputI.GetDimensions() > (0,0,0):
vtk.vtkOutputWindow.GetInstance().DisplayErrorText(
'modifyHomotopy: Input is empty.')
return
iMath = vtk.vtkImageMathematics()
iMath.SetOperationToMin()
iMath.SetInput1(outputJ)
iMath.SetInput2(inputI)
iMath.GetOutput().SetUpdateExtentToWholeExtent()
iMath.Update()
outputI = self._maskSource.GetStructuredPointsOutput()
outputI.DeepCopy(iMath.GetOutput())
def _observerInputPoints(self, obj):
# this will be called if anything happens to the points
# simply make sure our markerSource knows that it's now invalid
self._markerSource.Modified()
self._maskSource.Modified()
def _observerInputImage(self, obj, eventName):
# the inputImage has changed, so the marker will have to change too
self._markerSource.Modified()
# logical, input image has changed
self._maskSource.Modified()
def _observerImageThreshold(self, obj, eventName):
# if anything in the threshold has changed, (e.g. the input) we
# have to invalidate everything else after it
self._markerSource.Modified()
self._maskSource.Modified()
|
chrisidefix/devide
|
modules/user/experimental/modifyHomotopySlow.py
|
Python
|
bsd-3-clause
| 12,547
|
[
"VTK"
] |
ff7da02eb472f6d72cb11da6b48f145352d27f82894a510f726eecc0913d45df
|
''' http://www.biopython.org/DIST/docs/api/Bio.KDTree.KDTree%27-module.html '''
def photoz(list):
import sys, pyfits, os
file = os.environ['sne'] + '/cosmos/cosmos_zphot_mag25.nums.fits'
hdulist = pyfits.open(file)
table = hdulist["OBJECTS"].data
r = []
for i in list[0]:
r.append(table.field('zp_best')[i])
print r
import pylab, scipy
a = scipy.array(r)
a, b, varp = pylab.hist(a,bins=scipy.arange(0,4,0.05))
pylab.xlabel("Z")
pylab.ylabel("Number of Galaxies")
pylab.show()
raw_input()
return
def tree(start,end):
import sys, pyfits, os
#caltable = '/tmp/' + cluster + 'output.cat' #sys.argv[1]
#print cluster, caltable
#hdulist = pyfits.open(caltable)
#table = hdulist["OBJECTS"].data
from scipy.spatial import KDTree
file = os.environ['sne'] + '/cosmos/cosmos_zphot_mag25.nums.fits'
#file = os.environ['subdir'] + '/MACS1423+24/PHOTOMETRY/MACS1423+24.slr.cat'
hdulist = pyfits.open(file)
table = hdulist["OBJECTS"].data
array = []
cols = []
lim_mags = {}
#for filter in ['MAG_APER1-MEGAPRIME-0-1-u']: # ['umag','bmag','vmag','gmag','rmag','imag','zmag']: #,'icmag','jmag','kmag']:
for filter in ['umag','bmag','vmag','gmag','rmag','imag','zmag']: #,'icmag','jmag','kmag']:
print hdulist['OBJECTS'].columns
for column in hdulist['OBJECTS'].columns:
if filter == column.name:
print column.format
cols.append(pyfits.Column(name=filter,format=column.format,array=hdulist['OBJECTS'].data.field(filter)[start:end]))
#import pylab, scipy
l = hdulist['OBJECTS'].data.field(filter)[start:end]
#a,b,varp = pylab.hist(l,bins=scipy.arange(20,30,0.1))
#print a, b
#c = zip(a,b)
#c.sort()
#lim_mags[filter] = c[-1][1]
#pylab.xlabel('Mag')
#pylab.ylabel('Number of Galaxies')
#pylab.show()
print cols
tbhdu=pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))
from copy import copy
tbhdu_good=copy(tbhdu)
#mask = reduce(lambda x,y:x*y,[tbhdu.data.field(filter) < (lim_mags[filter]-1.) for filter in lim_mags.keys()])
#print len(tbhdu_good.data.field('umag')[mask])
for filter in ['umag','bmag','vmag','gmag','rmag','imag','zmag']: #,'icmag','jmag','kmag']:
print hdulist['OBJECTS'].columns
for column in hdulist['OBJECTS'].columns:
if filter == column.name:
print column.format
cols.append(pyfits.Column(name=filter,format=column.format,array=hdulist['OBJECTS'].data.field(filter)[start:end]))
#import pylab, scipy
#l = hdulist['OBJECTS'].data.field(filter)[mask][0:length]
#pylab.clf()
#a,b,varp = pylab.hist(l,bins=scipy.arange(20,30,0.1))
#print a, b
#c = zip(a,b)
#c.sort()
#lim_mags[filter] = c[-1][1]
#pylab.xlabel('Mag')
#pylab.ylabel('Number of Galaxies')
#pylab.show()
tbhdu_bad=copy(tbhdu)
import scipy
p = scipy.array([[tbhdu.data[2200][i] for i in range(7)]])
print p
#return KDTree(p)
hdu = pyfits.PrimaryHDU()
thdulist = pyfits.HDUList([hdu,tbhdu])
#os.system('rm temp.fits')
#thdulist.writeto('temp.fits')
import numpy
sarray = (tbhdu.data.tolist())
print numpy.shape(sarray)
#a = KDTree(sarray)
print lim_mags
return sarray
|
deapplegate/wtgpipeline
|
non_essentials/kdtree/kdtree.py
|
Python
|
mit
| 3,807
|
[
"Biopython"
] |
c36cea80fcc470476381367f34488d3aea0108e45eb29799377e16468ead4bf6
|
from ase import *
from hotbit import *
from numpy import *
from box.systems import nanotube
atoms = nanotube(9,0)
angle = atoms.container.get('angle')
traj = PickleTrajectory('twisting.traj','w',atoms)
# twist without scaling
for twist in linspace(0,pi/10,100):
atoms.set_container(angle=angle+twist)
traj.write()
# twist with scaling
atoms.set_container(angle=angle)
for twist in linspace(0,pi/10,100):
atoms.set_container(angle=angle+twist,scale_atoms=True)
traj.write()
# twist with scaling + view copies
cp = atoms.extended_copy((1,1,10))
traj = PickleTrajectory('twisting_extended.traj','w',cp)
atoms.set_container(angle=angle,scale_atoms=True)
for twist in linspace(0,pi/10,100):
atoms.set_container(angle=angle+twist,scale_atoms=True)
cp.set_positions( atoms.extended_copy((1,1,10)).get_positions() )
traj.write()
|
pekkosk/hotbit
|
examples/twist_nanotube.py
|
Python
|
gpl-2.0
| 878
|
[
"ASE"
] |
94a2e5d92d242b8dea1d239ec3b32e3ef8a306b37988542b9b4888c8cf731297
|
# Copyright 2004-2012 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This file contains functions that load and save the game state.
import pickle
import cPickle
import StringIO
import cStringIO
import zipfile
import os
import re
import threading
import sys
import platform
import types
import renpy.display
# This is used to cache information about saved games.
cache = { }
# Dump that choses which pickle to use:
def dump(o, f):
if renpy.config.use_cpickle:
cPickle.dump(o, f, cPickle.HIGHEST_PROTOCOL)
else:
pickle.dump(o, f, pickle.HIGHEST_PROTOCOL)
def loads(s):
if renpy.config.use_cpickle:
return cPickle.loads(s)
else:
return pickle.loads(s)
# This is used as a quick and dirty way of versioning savegame
# files.
savegame_suffix = renpy.savegame_suffix
def save_dump(roots, log):
"""
Dumps information about the save to save_dump.txt. We dump the size
of the object (including unique children), the path to the object,
and the type or repr of the object.
"""
o_repr_cache = { }
def visit(o, path):
ido = id(o)
if ido in o_repr_cache:
f.write("{0: 7d} {1} = alias {2}\n".format(0, path, o_repr_cache[ido]))
return 0
if isinstance(o, (int, float, types.NoneType, types.ModuleType, types.ClassType)):
o_repr = repr(o)
elif isinstance(o, (str, unicode)):
if len(o) <= 80:
o_repr = repr(o).encode("utf-8")
else:
o_repr = repr(o[:80] + "...").encode("utf-8")
elif isinstance(o, (tuple, list)):
o_repr = "<" + o.__class__.__name__ + ">"
elif isinstance(o, dict):
o_repr = "<" + o.__class__.__name__ + ">"
elif isinstance(o, types.MethodType):
o_repr = "<method {0}.{1}>".format(o.im_class.__name__, o.im_func.__name__)
elif isinstance(o, object):
o_repr = "<{0}>".format(type(o).__name__)
else:
o_repr = "BAD TYPE <{0}>".format(type(o).__name__)
o_repr_cache[ido] = o_repr
if isinstance(o, (int, float, types.NoneType, types.ModuleType, types.ClassType)):
size = 1
elif isinstance(o, (str, unicode)):
size = len(o) / 40 + 1
elif isinstance(o, (tuple, list)):
size = 1
for i, oo in enumerate(o):
size += 1
size += visit(oo, "{0}[{1!r}]".format(path, i))
elif isinstance(o, dict):
size = 2
for k, v in o.iteritems():
size += 2
size += visit(v, "{0}[{1!r}]".format(path, k))
elif isinstance(o, types.MethodType):
size = 1 + visit(o.im_self, path + ".im_self")
else:
try:
reduction = o.__reduce_ex__(2)
except:
reduction = [ ]
o_repr = "BAD REDUCTION " + o_repr
# Gets an element from the reduction, or o if we don't have
# such an element.
def get(idx, default):
if idx < len(reduction) and reduction[idx] is not None:
return reduction[idx]
else:
return default
# An estimate of the size of the object, in arbitrary units. (These units are about 20-25 bytes on
# my computer.)
size = 1
state = get(2, { })
if isinstance(state, dict):
for k, v in state.iteritems():
size += 2
size += visit(v, path + "." + k)
else:
size += visit(state, path + ".__getstate__()")
for i, oo in enumerate(get(3, [])):
size += 1
size += visit(oo, "{0}[{1}]".format(path, i))
for k, v in get(4, []):
size += 2
size += visit(v, "{0}[{1!r}]".format(path, k))
f.write("{0: 7d} {1} = {2}\n".format(size, path, o_repr_cache[ido]))
return size
f = file("save_dump.txt", "w")
visit(roots, "roots")
visit(log, "log")
f.close()
# A file that can only be written to while the cpu is idle.
class IdleFile(file):
def write(self, s):
renpy.display.core.cpu_idle.wait()
return file.write(self, s)
# A similar StringIO.
class IdleStringIO(StringIO.StringIO):
def write(self, s):
renpy.display.core.cpu_idle.wait()
return StringIO.StringIO.write(self, s)
# Used to indicate an aborted save, due to the game being mutated
# while the save is in progress.
class SaveAbort(Exception):
pass
def save(filename, extra_info='',
file=file, StringIO=cStringIO.StringIO, #@ReservedAssignment
mutate_flag=False, wait=None):
"""
Saves the game in the given filename. This will save the game
along with a screnshot and the given extra_info, which is just
serialized.
It's expected that a screenshot will be taken (with
renpy.take_screenshot) before this is called.
"""
cache.pop(filename, None)
filename = filename + savegame_suffix
try:
os.unlink(renpy.config.savedir + "/" + filename)
except:
pass
if mutate_flag:
renpy.python.mutate_flag = False
roots = renpy.game.log.freeze(wait)
logf = StringIO()
dump((roots, renpy.game.log), logf)
if mutate_flag and renpy.python.mutate_flag:
raise SaveAbort()
if renpy.config.save_dump:
save_dump(roots, renpy.game.log)
rf = file(renpy.config.savedir + "/" + filename, "wb")
zf = zipfile.ZipFile(rf, "w", zipfile.ZIP_DEFLATED)
# Screenshot.
zf.writestr("screenshot.png", renpy.game.interface.get_screenshot())
# Extra info.
zf.writestr("extra_info", extra_info.encode("utf-8"))
# Version.
zf.writestr("renpy_version", renpy.version)
# The actual game.
zf.writestr("log", logf.getvalue())
zf.close()
rf.close()
def scan_saved_game(name):
if name in cache:
return cache[name]
try:
f = name + savegame_suffix
zf = zipfile.ZipFile(renpy.config.savedir + "/" + f, "r")
try:
png = False
zf.getinfo('screenshot.tga')
except:
png = True
zf.getinfo('screenshot.png')
extra_info = zf.read("extra_info").decode("utf-8")
zf.close()
mtime = os.path.getmtime(renpy.config.savedir + "/" + f)
if png:
screenshot = renpy.display.im.ZipFileImage(renpy.config.savedir + '/' + f, "screenshot.png", mtime)
else:
screenshot = renpy.display.im.ZipFileImage(renpy.config.savedir + '/' + f, "screenshot.tga", mtime)
rv = extra_info, screenshot, mtime
except:
rv = None
cache[name] = rv
return rv
def list_saved_games(regexp=r'.'):
"""
This scans the savegames that we know about and returns
information about them. It returns a list of tuples, where each
tuple represents one savegame and consists of:
- The filename of the save.
- The extra_info that was passed to renpy.save.
- A displayable, the screenshot used to show the game.
- The time the game was saved at, seconds since 1/1/1970 UTC.
The regexp matches at the start of the filename, and filters the list.
"""
try:
files = os.listdir(renpy.config.savedir)
except:
return [ ]
files.sort()
files = [ i[:-len(savegame_suffix)]
for i in files
if i.endswith(savegame_suffix) and re.match(regexp, i) ]
rv = [ ]
for f in files:
info = scan_saved_game(f)
if info is not None:
extra_info, screenshot, mtime = info
rv.append((f, extra_info, screenshot, mtime))
return rv
def can_load(filename):
"""
Returns true if we can load the given savegame file, False otherwise.
"""
try:
zf = zipfile.ZipFile(renpy.config.savedir + "/" + filename + savegame_suffix, "r")
zf.close()
return True
except:
return False
def load(filename):
"""
Loads the game from the given file. This function never returns.
"""
zf = zipfile.ZipFile(renpy.config.savedir + "/" + filename + savegame_suffix, "r")
roots, log = loads(zf.read("log"))
zf.close()
log.unfreeze(roots, label="_after_load")
def rename_save(old, new):
unlink_save(new)
os.rename(renpy.config.savedir + "/" + old + savegame_suffix,
renpy.config.savedir + "/" + new + savegame_suffix)
cache.pop(old, None)
cache.pop(new, None)
def unlink_save(filename):
if os.path.exists(renpy.config.savedir + "/" + filename + savegame_suffix):
os.unlink(renpy.config.savedir + "/" + filename + savegame_suffix)
cache.pop(filename, None)
def cycle_saves(name, count):
for count in range(1, count + 1):
if not os.path.exists(renpy.config.savedir + "/" + name + str(count) + savegame_suffix):
break
for i in range(count - 1, 0, -1):
rename_save(name + str(i), name + str(i + 1))
# Flag that lets us know if an autosave is in progress.
autosave_not_running = threading.Event()
autosave_not_running.set()
# The number of times autosave has been called without a save occuring.
autosave_counter = 0
def autosave_thread(take_screenshot):
global autosave_counter
try:
try:
renpy.display.core.cpu_idle.wait()
cycle_saves("auto-", renpy.config.autosave_slots)
renpy.display.core.cpu_idle.wait()
if renpy.config.auto_save_extra_info:
extra_info = renpy.config.auto_save_extra_info()
else:
extra_info = ""
if take_screenshot:
renpy.exports.take_screenshot(background=True)
save("auto-1", file=IdleFile, StringIO=IdleStringIO, mutate_flag=True, wait=renpy.display.core.cpu_idle.wait, extra_info=extra_info)
autosave_counter = 0
except:
pass
finally:
autosave_not_running.set()
def autosave():
global autosave_counter
if not renpy.config.autosave_frequency:
return
# That is, autosave is running.
if not autosave_not_running.isSet():
return
if renpy.config.skipping:
return
if len(renpy.game.contexts) > 1:
return
autosave_counter += 1
if autosave_counter < renpy.config.autosave_frequency:
return
force_autosave(True)
# This assumes a screenshot has already been taken.
def force_autosave(take_screenshot=False):
# That is, autosave is running.
if not autosave_not_running.isSet():
return
autosave_not_running.clear()
threading.Thread(target=autosave_thread, args=(take_screenshot,)).start()
class _MultiPersistent(object):
def __getstate__(self):
state = self.__dict__.copy()
del state['_filename']
return state
def __setstate__(self, state):
self.__dict__.update(state)
def __getattr__(self, name):
if name.startswith("__") and name.endswith("__"):
raise AttributeError()
return None
def save(self):
fn = self._filename
f = file(fn + ".new", "wb")
dump(self, f)
f.close()
try:
os.rename(fn + ".new", fn)
except:
os.unlink(fn)
os.rename(fn + ".new", fn)
def MultiPersistent(name):
if not renpy.game.context().init_phase:
raise Exception("MultiPersistent objects must be created during the init phase.")
if sys.platform == 'win32':
files = [ os.path.expanduser("~/RenPy/Persistent") ]
if 'APPDATA' in os.environ:
files.append(os.environ['APPDATA'] + "/RenPy/persistent")
elif platform.mac_ver()[0]:
files = [ os.path.expanduser("~/.renpy/persistent"),
os.path.expanduser("~/Library/RenPy/persistent") ]
else:
files = [ os.path.expanduser("~/.renpy/persistent") ]
# Make the new persistent directory, why not?
try:
os.makedirs(files[-1])
except:
pass
fn = "" # prevent a warning from happening.
# Find the first file that actually exists. Otherwise, use the last
# file.
for fn in files:
fn = fn + "/" + name
if os.path.exists(fn):
break
try:
rv = loads(file(fn).read())
except:
rv = _MultiPersistent()
rv._filename = fn # W0201
return rv
|
MSEMJEJME/tkot
|
renpy/loadsave.py
|
Python
|
gpl-2.0
| 14,620
|
[
"VisIt"
] |
4d685cd7428cb703b5fa42077dd49bc43f87034c2929f820d5d56e7820dfd2b4
|
from distutils.core import setup
from distutils.command.install_data import install_data
from distutils.command.install import INSTALL_SCHEMES
import os
import sys
class osx_install_data(install_data):
# On MacOS, the platform-specific lib dir is /System/Library/Framework/Python/.../
# which is wrong. Python 2.5 supplied with MacOS 10.5 has an Apple-specific fix
# for this in distutils.command.install_data#306. It fixes install_lib but not
# install_data, which is why we roll our own install_data class.
def finalize_options(self):
# By the time finalize_options is called, install.install_lib is set to the
# fixed directory, so we set the installdir to install_lib. The
# install_data class uses ('install_data', 'install_dir') instead.
self.set_undefined_options('install', ('install_lib', 'install_dir'))
install_data.finalize_options(self)
if sys.platform == "darwin":
cmdclasses = {'install_data': osx_install_data}
else:
cmdclasses = {'install_data': install_data}
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join) in a
platform-neutral way.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
# Tell distutils to put the data_files in platform-specific installation
# locations. See here for an explanation:
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
for code_dir in [ 'lintory' ]:
for dirpath, dirnames, filenames in os.walk(code_dir):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
packages.append('.'.join(fullsplit(dirpath)))
elif filenames:
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])
# Small hack for working with bdist_wininst.
# See http://mail.python.org/pipermail/distutils-sig/2004-August/004134.html
if len(sys.argv) > 1 and sys.argv[1] == 'bdist_wininst':
for file_info in data_files:
file_info[0] = '\\PURELIB\\%s' % file_info[0]
# data_files += [ ( "/etc/lintory", [ "conf/settings.py" ] ) ]
scripts = [
'bin/lintory',
]
setup(
name = "lintory",
version = "0.1",
author = 'Brian May',
author_email = 'brian@microcomaustralia.com.au',
description = 'SPUD is a Sortable Photo album Using a Django based database.',
packages = packages,
cmdclass = cmdclasses,
data_files = data_files,
scripts = scripts,
)
|
VPAC/lintory
|
setup.py
|
Python
|
gpl-3.0
| 3,062
|
[
"Brian"
] |
33102aef787cf6213d10d61208574b63d01e5ad786b5bbe344e79b714e2a392d
|
#!/usr/bin/env python
from ase.lattice.surface import fcc111
import ase
from kmos.utils import get_ase_constructor
from kmos.types import *
import numpy as np
slab = fcc111('Pt', [1,1,4], vacuum=10)
positions = slab.get_scaled_positions()
pt = Project()
pt.set_meta(model_name='pt111',
model_dimension='2',
author='Max J. Hoffmann',
email='mjhoffmann@gmail.com',
debug=0)
layer = Layer(name='pt111')
pos1 = np.array([positions[1, 0],
positions[1, 1], 0.672])
layer.add_site(Site(name='hollow1',
pos=pos1))
pos2 = np.array([positions[2, 0],
positions[2, 1], 0.672])
#slab += ase.atoms.Atoms('H', cell=slab.cell, scaled_positions=[pos1])
#slab += ase.atoms.Atoms('H', cell=slab.cell, scaled_positions=[pos2])
#ase.visualize.view(slab, repeat=(1,1,1))
rpos = np.linalg.solve(slab.cell, np.array(pos2))
layer.add_site(Site(name='hollow2',
pos=pos2))
pt.add_layer(layer)
pt.lattice.representation = '[%s]' % get_ase_constructor(slab)
# Add species
pt.add_species(name='empty', color='#ffffff')
pt.add_species(name='H', representation="Atoms('H')", color='#ffff00')
#Add Processes
pt.parse_and_add_process('H_adsorption_hollow1; ->H@hollow1; 100000')
pt.parse_and_add_process('H_adsorption_hollow2; ->H@hollow2; 100000')
pt.parse_and_add_process('H_desorption_hollow1; H@hollow1->; 100000')
pt.parse_and_add_process('H_desorption_hollow2; H@hollow2->; 100000')
pt.parse_and_add_process('H_diff_h1h2; H@hollow1 -> H@hollow2; 1000000000')
pt.parse_and_add_process('H_diff_h2h1; H@hollow2 -> H@hollow1; 1000000000')
# Export, Save
xmlfile = file('Pt_111.xml', 'w')
xmlfile.write(str(pt))
xmlfile.close()
|
mhoffman/kmos
|
examples/model_Pt111_surface.py
|
Python
|
gpl-3.0
| 1,724
|
[
"ASE"
] |
9a05b30c6bdcdd5f43f70033674718ff093964897d7d867e6bc368302a2912ef
|
#!/usr/bin/env python
import tkinter as Tk
from tkinter import ttk, messagebox
import matplotlib
import numpy as np
import numpy.ma as ma
import new_cmaps
import sys, traceback
from new_cnorms import PowerNormWithNeg, PowerNormFunc
import matplotlib.colors as mcolors
import matplotlib.gridspec as gridspec
import matplotlib.patheffects as PathEffects
import matplotlib.transforms as mtransforms
class FieldsPanel:
# A dictionary of all of the parameters for this plot with the default parameters
example = """## WHATEVER IS TYPED HERE IS EVALUATED AS PURE PYTHON. THERE IS NO ERROR CHECKING
## OR ANY SANITIZATION OF USER INPUT. YOU WILL INHERIT THE NAMESPACE OF THE MAIN
## PROGRAM, BUT YOU CAN IMPORT OTHER LIBRARIES, DEFINE HELPER FUNCTIONS, WHATEVER.
## JUST BE SURE AT SOME POINT YOU DEFINE A FUNCTION NAMED FieldFunc THAT RETURNS
## SOMETHING THE SAME SHAPE AS YOUR FIELD ARRAYS. SIMULATION DATA CAN ONLY BE
## ACCESSED INSIDE OF FieldFunc.
#
## IT'S EASY TO DO BAD THINGS HERE... TYPE CAREFULLY :)
#
#def FieldFunc(bx, by, bz):
# # Be sure to include all the neccesary data you need to calculate your
# # derived field quantity as arguments to the 'FieldFunc' function.
# # The only valid arguments to field function are things saved in the Tristan
# # HDF5 files: e.g., ui, bx, jz...etc. The argumes return the raw tristan arrays.
#
# # You must return an array the same shape as the fields array, or an array that
# # is the same length as the x axis of the simulation (and then checking 1D)
#
# return bx**2+by**2+bz**2
# """
plot_param_dict = {'twoD': 0,
'field_type': 0, #0 = B-Field, 1 = E-field, 2 Currents, 3 = UserDefined quantity
'cmdstr1': example,
'cmdstr2': example,
'cmdstr3': example,
'OneDOnly': [False, False, False],
'yaxis_label': ['$B$','$E$','$J$','$B$'],
'2D_label': [['$B_x$','$B_y$','$B_z$'],
['$E_x$','$E_y$','$E_z$'],
['$J_x$','$J_y$','$J_z$'],
['$B_\mathrm{tot}$','$B_\mathrm{tot}$','$B_\mathrm{tot}$']],
'1D_label': [['$B_x$','$B_y$','$B_z$'],
['$E_x$','$E_y$','$E_z$'],
['$J_x$','$J_y$','$J_z$'],
['$B_\mathrm{tot}$','$B_\mathrm{tot}$','$B_\mathrm{tot}$']],
'show_x' : 1,
'show_y' : 1,
'show_z' : 1,
'show_cbar': True,
'v_min': 0,
'v_max' : 10,
'set_v_min': False,
'set_v_max': False,
'show_shock' : False,
'show_FFT_region': False,
'OutlineText': True,
'spatial_x': True,
'spatial_y': False,
'normalize_fields': True, # Normalize fields to their upstream values
'cnorm_type': 'Linear', # Colormap norm; options are Log, Pow or Linear
'cpow_num': 1.0, # Used in the PowerNorm
'div_midpoint': 0.0, # The cpow color norm normalizes data to [0,1] using np.sign(x-midpoint)*np.abs(x-midpoint)**(-cpow_num) -> [0,midpoint,1] if it is a divering cmap or [0,1] if it is not a divering cmap
'interpolation': 'none',
'cmap': 'None', # If cmap is none, the plot will inherit the parent's cmap
'UseDivCmap': True, # Use a diverging cmap for the 2d plots
'stretch_colors': False, # If stretch colors is false, then for a diverging cmap the plot ensures -b and b are the same distance from the midpoint of the cmap.
'show_cpu_domains': False, # plots lines showing how the CPUs are divvying up the computational region
'face_color': 'gainsboro' }
gradient = np.linspace(0, 1, 256)# A way to make the colorbar display better
gradient = np.vstack((gradient, gradient))
def __init__(self, parent, figwrapper):
self.settings_window = None
self.FigWrap = figwrapper
self.parent = parent
self.ChartTypes = self.FigWrap.PlotTypeDict.keys()
self.chartType = self.FigWrap.chartType
self.figure = self.FigWrap.figure
self.SetPlotParam('spatial_y', self.GetPlotParam('twoD'), update_plot = False)
self.InterpolationMethods = ['none','nearest', 'bilinear', 'bicubic', 'spline16',
'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric',
'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos']
def ChangePlotType(self, str_arg):
self.FigWrap.ChangeGraph(str_arg)
def norm(self, vmin=None, vmax=None):
if self.GetPlotParam('cnorm_type') =="Linear":
if self.GetPlotParam('UseDivCmap'):
return PowerNormWithNeg(1.0, vmin, vmax, midpoint = self.GetPlotParam('div_midpoint'), stretch_colors = self.GetPlotParam('stretch_colors'))
else:
return mcolors.Normalize(vmin, vmax)
elif self.GetPlotParam('cnorm_type') == "Log":
return mcolors.LogNorm(vmin, vmax)
else:
return PowerNormWithNeg(self.GetPlotParam('cpow_num'), vmin, vmax, div_cmap = self.GetPlotParam('UseDivCmap'),midpoint = self.GetPlotParam('div_midpoint'), stretch_colors = self.GetPlotParam('stretch_colors'))
def set_plot_keys(self):
'''A helper function that will insure that each hdf5 file will only be
opened once per time step'''
# First make sure that omega_plasma & xi is loaded so we can fix the
# x & y distances.
# Then see if we are plotting E-field or B-Field
if self.GetPlotParam('field_type') == 0: # Load the B-Field
self.arrs_needed = ['c_omp', 'istep', 'bx']#, 'by', 'bz']
if self.GetPlotParam('show_y'):
self.arrs_needed.append('by')
if self.GetPlotParam('show_z'):
self.arrs_needed.append('bz')
if self.GetPlotParam('field_type') == 1: # Load the E-Field
self.arrs_needed = ['c_omp', 'istep', 'ex']
if self.GetPlotParam('show_y'):
self.arrs_needed.append('ey')
if self.GetPlotParam('show_z'):
self.arrs_needed.append('ez')
if self.GetPlotParam('field_type') == 2: # Load the currents
self.arrs_needed = ['c_omp', 'istep', 'jx']
if self.GetPlotParam('show_y'):
self.arrs_needed.append('jy')
if self.GetPlotParam('show_z'):
self.arrs_needed.append('jz')
if self.GetPlotParam('field_type') == 3: # Check what the user wants.
self.arrs_needed = ['c_omp', 'istep', 'bx']
if self.GetPlotParam('show_x'):
for line in self.GetPlotParam('cmdstr1').splitlines():
if line[1:15] == 'def FieldFunc(':
self.f1args = [elm.strip() for elm in line[15:-2].split(',')]
self.arrs_needed += self.f1args
if self.GetPlotParam('show_y'):
for line in self.GetPlotParam('cmdstr2').splitlines():
if line[1:15] == 'def FieldFunc(':
self.f2args = [elm.strip() for elm in line[15:-2].split(',')]
self.arrs_needed += self.f2args
if self.GetPlotParam('show_z'):
for line in self.GetPlotParam('cmdstr3').splitlines():
if line[1:15] == 'def FieldFunc(':
self.f3args = [elm.strip() for elm in line[15:-2].split(',')]
self.arrs_needed += self.f3args
return self.arrs_needed
def LoadData(self):
''' A Helper function that loads the data for the plot'''
# First see of the x_axis and y_axis values have already been calculated
# and stored in the DataDict for this time step
self.c_omp = self.FigWrap.LoadKey('c_omp')[0]
self.istep = self.FigWrap.LoadKey('istep')[0]
if self.GetPlotParam('cmap') == 'None':
if self.GetPlotParam('UseDivCmap'):
self.cmap = self.parent.MainParamDict['DivColorMap']
else:
self.cmap = self.parent.MainParamDict['ColorMap']
else:
self.cmap = self.GetPlotParam('cmap')
self.xcolor = new_cmaps.cmaps[self.parent.MainParamDict['ColorMap']](0.2)
self.ycolor = new_cmaps.cmaps[self.parent.MainParamDict['ColorMap']](0.5)
self.zcolor = new_cmaps.cmaps[self.parent.MainParamDict['ColorMap']](0.8)
if np.isnan(self.parent.btheta):
# Maybe B_0 is 0????
self.SetPlotParam('normalize_fields', 0, update_plot = False)
# see if the axis values are saved in the data dict
if 'xaxis_values' in self.parent.DataDict.keys():
self.xaxis_values = self.parent.DataDict['xaxis_values']
else:
# x-values haven't been calculated yet, generate them then save them to the dictionary for later.
if self.GetPlotParam('field_type') ==0 or self.GetPlotParam('field_type') == 3:
self.xaxis_values = np.arange(self.FigWrap.LoadKey('bx').shape[2])/self.c_omp*self.istep
elif self.GetPlotParam('field_type') ==1:
self.xaxis_values = np.arange(self.FigWrap.LoadKey('ex').shape[2])/self.c_omp*self.istep
elif self.GetPlotParam('field_type') ==2:
self.xaxis_values = np.arange(self.FigWrap.LoadKey('jx').shape[2])/self.c_omp*self.istep
self.parent.DataDict['xaxis_values'] = np.copy(self.xaxis_values)
self.flagx = 0 # 0 means it didn't plot, 1 means it is 1D only, 2 means it returned a 3d object
self.flagy = 0
self.flagz = 0
if self.GetPlotParam('field_type') == 0: # Load the B-Field
if self.GetPlotParam('show_x'):
self.flagx = 2
if self.GetPlotParam('normalize_fields'):
self.fx = self.FigWrap.LoadKey('bx')*self.parent.b0**-1
else:
self.fx = self.FigWrap.LoadKey('bx')
if self.GetPlotParam('show_y'):
self.flagy = 2
if self.GetPlotParam('normalize_fields'):
self.fy = self.FigWrap.LoadKey('by')*self.parent.b0**-1
else:
self.fy = self.FigWrap.LoadKey('by')
if self.GetPlotParam('show_z'):
self.flagz = 2
if self.GetPlotParam('normalize_fields'):
self.fz =self.FigWrap.LoadKey('bz')*self.parent.b0**-1
else:
self.fz =self.FigWrap.LoadKey('bz')
if self.GetPlotParam('field_type') == 1: # Load the E-Field
if self.GetPlotParam('show_x'):
self.flagx = 2
if self.GetPlotParam('normalize_fields'):
self.fx = self.FigWrap.LoadKey('ex')*self.parent.e0**-1
else:
self.fx = self.FigWrap.LoadKey('ex')
if self.GetPlotParam('show_y'):
self.flagy = 2
if self.GetPlotParam('normalize_fields'):
self.fy = self.FigWrap.LoadKey('ey')*self.parent.e0**-1
else:
self.fy = self.FigWrap.LoadKey('ey')
if self.GetPlotParam('show_z'):
self.flagz = 2
if self.GetPlotParam('normalize_fields'):
self.fz =self.FigWrap.LoadKey('ez')*self.parent.e0**-1
else:
self.fz =self.FigWrap.LoadKey('ez')
elif self.GetPlotParam('field_type') == 2: # Load the currents
if self.GetPlotParam('show_x'):
self.fx = self.FigWrap.LoadKey('jx')
self.flagx = 2
if self.GetPlotParam('show_y'):
self.fy = self.FigWrap.LoadKey('jy')
self.flagy = 2
if self.GetPlotParam('show_z'):
self.fz = self.FigWrap.LoadKey('jz')
self.flagz = 2
elif self.GetPlotParam('field_type') == 3: # User Defined fields
if self.GetPlotParam('show_x'):
if not set(self.f1args).isdisjoint(self.parent.prtl_keys):
keyx = hash(self.GetPlotParam('cmdstr1')+str(self.parent.stride)+str(self.GetPlotParam('OneDOnly')[0]))
else:
keyx = hash(self.GetPlotParam('cmdstr1')+str(self.GetPlotParam('OneDOnly')[0]) )
if keyx in self.parent.DataDict.keys():
self.fx = self.parent.DataDict[keyx]
if self.GetPlotParam('OneDOnly')[0]:
self.flagx = 1
else:
self.flagx = 2
else:
try:
tmpcstr = ''
for line in self.GetPlotParam('cmdstr1').splitlines():
tmpcstr += line[1:] +'\n'
tmpcstr += 'self.fx = FieldFunc(*[self.FigWrap.LoadKey(k) for k in self.f1args])'
exec(compile(tmpcstr,'<string>', 'exec'), locals(), locals())#, '<string>', 'exec'), **{'self':self})
#print(FieldFunc)
self.parent.DataDict[keyx] = self.fx
if self.GetPlotParam('OneDOnly')[0]:
self.flagx = 1
else:
self.flagx = 2
except:
print(sys.exc_info())
"""
tb_lines = traceback.format_exc(sys.exc_info()[2]).splitlines()
tb_lines.pop(1)
tb_lines[1] = ''
err_msg = ''
for l in tb_lines:
if l[0:17] == ' File "<string>"':
err_msg += ' User Defined Function,'
err_msg += l[18:] +'\n'
else:
err_msg += l+'\n'
"""
messagebox.showinfo('Error when evaluating user defined function 1:', print(sys.exc_info()))#(err_msg)
self.fx = np.NAN
self.flagx = 0
if self.GetPlotParam('show_y'):
if not set(self.f2args).isdisjoint(self.parent.prtl_keys):
keyy = hash(self.GetPlotParam('cmdstr2')+str(self.parent.stride)+str(self.GetPlotParam('OneDOnly')[1]))
else:
keyy = hash(self.GetPlotParam('cmdstr2')+str(self.GetPlotParam('OneDOnly')[1]))
if keyy in self.parent.DataDict.keys():
self.fy = self.parent.DataDict[keyy]
if self.GetPlotParam('OneDOnly')[0]:
self.flagy = 1
else:
self.flagy = 2
else:
try:
tmpcstr = ''
for line in self.GetPlotParam('cmdstr2').splitlines():
tmpcstr += line[1:] +'\n'
tmpcstr += 'self.fy = FieldFunc(*[self.FigWrap.LoadKey(k) for k in self.f2args])'
eval(compile(tmpcstr, '<string>', 'exec'), locals(), locals())
self.parent.DataDict[keyy] = self.fy
if self.GetPlotParam('OneDOnly')[1]:
self.flagy = 1
else:
self.flagy = 2
except:
print(sys.exc_info())
"""
tb_lines = traceback.format_exc(sys.exc_info()[2]).splitlines()
tb_lines.pop(1)
tb_lines[1] = ''
err_msg = ''
for l in tb_lines:
if l[0:17] == ' File "<string>"':
err_msg += ' User Defined Function,'
err_msg += l[18:] +'\n'
else:
err_msg += l+'\n'
"""
messagebox.showinfo('Error when evaluating user defined function 2:', print(sys.exc_info()))#(err_msg)
self.fy = np.NAN
self.flagy = 0
if self.GetPlotParam('show_z'):
if not set(self.f3args).isdisjoint(self.parent.prtl_keys):
keyz = hash(self.GetPlotParam('cmdstr3')+str(self.parent.stride)+str(self.GetPlotParam('OneDOnly')[2]))
else:
keyz = hash(self.GetPlotParam('cmdstr3')+str(self.GetPlotParam('OneDOnly')[2]))
if keyz in self.parent.DataDict.keys():
self.fz = self.parent.DataDict[keyz]
if self.GetPlotParam('OneDOnly')[2]:
self.flagz = 1
else:
self.flagz = 2
else:
try:
tmpcstr = ''
for line in self.GetPlotParam('cmdstr3').splitlines():
tmpcstr += line[1:] +'\n'
tmpcstr += 'self.fz = FieldFunc(*[self.FigWrap.LoadKey(k) for k in self.f3args])'
eval(compile(tmpcstr, '<string>', 'exec'), locals(), locals())
self.parent.DataDict[keyz] = self.fz
if self.GetPlotParam('OneDOnly')[2]:
self.flagz = 1
else:
self.flagz = 2
except:
print(sys.exc_info())
"""
tb_lines = traceback.format_exc(sys.exc_info()[2]).splitlines()
tb_lines.pop(1)
tb_lines[1] = ''
err_msg = ''
for l in tb_lines:
if l[0:17] == ' File "<string>"':
err_msg += ' User Defined Function,'
err_msg += l[18:] +'\n'
else:
err_msg += l+'\n'
"""
messagebox.showinfo('Error when evaluating user defined function 3:', print(sys.exc_info()))#(err_msg)
self.fz = np.NAN
self.flagz = 0
def draw(self):
''' A function that draws the data. In the interest in speeding up the
code, draw should only be called when you want to recreate the whole
figure, i.e. it will be slow. Most times you will only want to update
what has changed in the figure. This will be done in a function called
refresh, that should be much much faster.'''
if self.GetPlotParam('OutlineText'):
self.annotate_kwargs = {'horizontalalignment': 'right',
'verticalalignment': 'top',
'size' : self.parent.MainParamDict['annotateTextSize'],
'path_effects' : [PathEffects.withStroke(linewidth=1.5,foreground="k")]
}
else:
self.annotate_kwargs = {'horizontalalignment' : 'right',
'verticalalignment' : 'top',
'size' : self.parent.MainParamDict['annotateTextSize']}
# Set the tick color
tick_color = 'black'
# Create a gridspec to handle spacing better
self.gs = gridspec.GridSpecFromSubplotSpec(100,100, subplot_spec = self.parent.gs0[self.FigWrap.pos])
# Now that the data is loaded, start making the plots
if self.GetPlotParam('twoD'):
if self.parent.MainParamDict['LinkSpatial'] != 0:
if self.FigWrap.pos == self.parent.first_x and self.FigWrap.pos == self.parent.first_y:
self.axes = self.figure.add_subplot(self.gs[self.parent.axes_extent[0]:self.parent.axes_extent[1], self.parent.axes_extent[2]:self.parent.axes_extent[3]])
elif self.FigWrap.pos == self.parent.first_x:
self.axes = self.figure.add_subplot(self.gs[self.parent.axes_extent[0]:self.parent.axes_extent[1], self.parent.axes_extent[2]:self.parent.axes_extent[3]],
sharey = self.parent.SubPlotList[self.parent.first_y[0]][self.parent.first_y[1]].graph.axes)
elif self.FigWrap.pos == self.parent.first_y:
self.axes = self.figure.add_subplot(self.gs[self.parent.axes_extent[0]:self.parent.axes_extent[1], self.parent.axes_extent[2]:self.parent.axes_extent[3]],
sharex = self.parent.SubPlotList[self.parent.first_x[0]][self.parent.first_x[1]].graph.axes)
else:
self.axes = self.figure.add_subplot(self.gs[self.parent.axes_extent[0]:self.parent.axes_extent[1], self.parent.axes_extent[2]:self.parent.axes_extent[3]],
sharex = self.parent.SubPlotList[self.parent.first_x[0]][self.parent.first_x[1]].graph.axes,
sharey = self.parent.SubPlotList[self.parent.first_y[0]][self.parent.first_y[1]].graph.axes)
else:
self.axes = self.figure.add_subplot(self.gs[self.parent.axes_extent[0]:self.parent.axes_extent[1], self.parent.axes_extent[2]:self.parent.axes_extent[3]])
# First choose the 'zval' to plot, we can only do one because it is 2-d.
self.plotFlag = -1
if self.GetPlotParam('show_x') and self.flagx == 2:
if self.parent.MainParamDict['2DSlicePlane'] == 0: # Show the x-y plane
if self.parent.MainParamDict['ImageAspect']:
self.cax = self.axes.imshow(self.fx[self.parent.zSlice,:,:], norm = self.norm(), origin = 'lower')
else:
self.cax = self.axes.imshow(self.fx[self.parent.zSlice,:,:], origin = 'lower', norm = self.norm(),
aspect= 'auto')
elif self.parent.MainParamDict['2DSlicePlane'] == 1: # Show the x-z plane
if self.parent.MainParamDict['ImageAspect']:
self.cax = self.axes.imshow(self.fx[:,self.parent.ySlice,:], norm = self.norm(), origin = 'lower')
else:
self.cax = self.axes.imshow(self.fx[:,self.parent.ySlice,:], origin = 'lower', norm = self.norm(),
aspect= 'auto')
self.plotFlag = 0
self.SetPlotParam('show_y', 0, update_plot = False)
self.SetPlotParam('show_z', 0, update_plot = False)
elif self.GetPlotParam('show_y') and self.flagy == 2:
if self.parent.MainParamDict['2DSlicePlane'] == 0: # Show the x-y plane
if self.parent.MainParamDict['ImageAspect']:
self.cax = self.axes.imshow(self.fy[self.parent.zSlice,:,:], norm = self.norm(), origin = 'lower')
else:
self.cax = self.axes.imshow(self.fy[self.parent.zSlice,:,:], origin = 'lower', norm = self.norm(),
aspect= 'auto')
elif self.parent.MainParamDict['2DSlicePlane'] == 1: # Show the x-z plane
if self.parent.MainParamDict['ImageAspect']:
self.cax = self.axes.imshow(self.fy[:,self.parent.ySlice,:], norm = self.norm(), origin = 'lower')
else:
self.cax = self.axes.imshow(self.fy[:,self.parent.ySlice,:], origin = 'lower', norm = self.norm(),
aspect= 'auto')
self.plotFlag = 1
self.SetPlotParam('show_x', 0, update_plot = False)
self.SetPlotParam('show_z', 0, update_plot = False)
elif self.GetPlotParam('show_z') and self.flagz == 2:
# make sure z is loaded, (something has to be)
# set the other plot values to zero in the PlotParams
if self.parent.MainParamDict['2DSlicePlane'] == 0: # Show the x-y plane
if self.parent.MainParamDict['ImageAspect']:
self.cax = self.axes.imshow(self.fz[self.parent.zSlice,:,:], norm = self.norm(), origin = 'lower')
else:
self.cax = self.axes.imshow(self.fz[self.parent.zSlice,:,:], origin = 'lower', norm = self.norm(),
aspect= 'auto')
elif self.parent.MainParamDict['2DSlicePlane'] == 1: # Show the x-z plane
if self.parent.MainParamDict['ImageAspect']:
self.cax = self.axes.imshow(self.fz[:,self.parent.ySlice,:], norm = self.norm(), origin = 'lower')
else:
self.cax = self.axes.imshow(self.fz[:,self.parent.ySlice,:], origin = 'lower', norm = self.norm(),
aspect= 'auto')
self.plotFlag = 2
self.SetPlotParam('show_x', 0, update_plot = False)
self.SetPlotParam('show_y', 0, update_plot = False)
else:
if self.parent.MainParamDict['ImageAspect']:
self.cax = self.axes.imshow(np.ones([2,2]), norm = self.norm(), origin = 'lower')
else:
self.cax = self.axes.imshow(np.ones([2,2]), norm = self.norm(), origin = 'lower', aspect = 'auto')
self.cax.set_data(np.ma.masked_array(np.empty([2,2]), mask = np.ones([2,2])))
self.ymin = 0
self.ymax = self.cax.get_array().shape[0]/self.c_omp*self.istep
self.xmin = 0
self.xmax = self.cax.get_array().shape[1]/self.c_omp*self.istep
self.cax.set_cmap(new_cmaps.cmaps[self.cmap])
if self.plotFlag>=0:
self.vmin = self.cax.get_array().min()
if self.GetPlotParam('set_v_min'):
self.vmin = self.GetPlotParam('v_min')
self.vmax = self.cax.get_array().max()
if self.GetPlotParam('set_v_max'):
self.vmax = self.GetPlotParam('v_max')
if self.GetPlotParam('UseDivCmap') and self.GetPlotParam('stretch_colors'):
self.vmax = max(np.abs(self.vmin), self.vmax)
self.vmin = -self.vmax
self.cax.norm.vmin = self.vmin
self.cax.norm.vmax = self.vmax
self.cax.set_extent([self.xmin,self.xmax, self.ymin, self.ymax])
self.axes.add_artist(self.cax)
self.anntext =''
if self.plotFlag >= 0:
self.anntext = self.GetPlotParam('2D_label')[self.GetPlotParam('field_type')][self.plotFlag]
if self.GetPlotParam('field_type') ==0 and self.GetPlotParam('normalize_fields'):
self.anntext +=r'$/B_0$'
if self.GetPlotParam('field_type') ==1 and self.GetPlotParam('normalize_fields'):
self.anntext +=r'$/E_0$'
self.TwoDan = self.axes.annotate(self.anntext,
xy = (0.9,.9),
xycoords= 'axes fraction',
color = 'white',
**self.annotate_kwargs)
self.axC = self.figure.add_subplot(self.gs[self.parent.cbar_extent[0]:self.parent.cbar_extent[1], self.parent.cbar_extent[2]:self.parent.cbar_extent[3]])
self.parent.cbarList.append(self.axC)
if self.parent.MainParamDict['HorizontalCbars']:
self.cbar = self.axC.imshow(self.gradient, aspect='auto',
cmap=new_cmaps.cmaps[self.cmap])
# Make the colobar axis more like the real colorbar
self.cbar.set_extent([0, 1.0, 0, 1.0])
self.axC.tick_params(axis='x',
which = 'both', # bothe major and minor ticks
top = False, # turn off top ticks
labelsize=self.parent.MainParamDict['NumFontSize'])
self.axC.tick_params(axis='y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
left=False, # ticks along the bottom edge are off
right=False, # ticks along the top edge are off
labelleft=False)
else:
self.cbar = self.axC.imshow(np.transpose(self.gradient)[::-1], aspect='auto',
cmap=new_cmaps.cmaps[self.cmap])
# Make the colobar axis more like the real colorbar
self.cbar.set_extent([0, 1.0, 0, 1.0])
self.axC.tick_params(axis='x',
which = 'both', # bothe major and minor ticks
top = False, # turn off top ticks
bottom = False,
labelbottom = False,
labelsize=self.parent.MainParamDict['NumFontSize'])
self.axC.tick_params(axis='y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
left=False, # ticks along the bottom edge are off
right=True, # ticks along the top edge are off
labelleft = False,
labelright = True,
labelsize=self.parent.MainParamDict['NumFontSize'])
if self.GetPlotParam('show_cbar') == 0 or self.plotFlag == -1:
self.axC.set_visible(False)
else:
self.CbarTickFormatter()
self.shockline_2d = self.axes.axvline(self.parent.shock_loc, linewidth = 1.5, linestyle = '--', color = self.parent.shock_color, path_effects=[PathEffects.Stroke(linewidth=2, foreground='k'),
PathEffects.Normal()])
self.shockline_2d.set_visible(self.GetPlotParam('show_shock'))
if int(matplotlib.__version__[0]) < 2:
self.axes.set_axis_bgcolor(self.GetPlotParam('face_color'))
else:
self.axes.set_facecolor(self.GetPlotParam('face_color'))
self.axes.tick_params(labelsize = self.parent.MainParamDict['NumFontSize'], color=tick_color)
if self.parent.MainParamDict['SetxLim']:
if self.parent.MainParamDict['xLimsRelative']:
self.axes.set_xlim(self.parent.MainParamDict['xLeft'] + self.parent.shock_loc,
self.parent.MainParamDict['xRight'] + self.parent.shock_loc)
else:
self.axes.set_xlim(self.parent.MainParamDict['xLeft'], self.parent.MainParamDict['xRight'])
else:
self.axes.set_xlim(self.xmin, self.xmax)
self.cax.set_interpolation(self.GetPlotParam('interpolation'))
if self.parent.MainParamDict['SetyLim']:
self.axes.set_ylim(self.parent.MainParamDict['yBottom'],self.parent.MainParamDict['yTop'])
else:
self.axes.set_ylim(self.ymin, self.ymax)
self.axes.set_xlabel(r'$x\ [c/\omega_{\rm pe}]$', labelpad = self.parent.MainParamDict['xLabelPad'], color = 'black', size = self.parent.MainParamDict['AxLabelSize'])
if self.parent.MainParamDict['2DSlicePlane'] == 0:
self.axes.set_ylabel(r'$y\ [c/\omega_{\rm pe}]$', labelpad = self.parent.MainParamDict['yLabelPad'], color = 'black', size = self.parent.MainParamDict['AxLabelSize'])
if self.parent.MainParamDict['2DSlicePlane'] == 1:
self.axes.set_ylabel(r'$z\ [c/\omega_{\rm pe}]$', labelpad = self.parent.MainParamDict['yLabelPad'], color = 'black', size = self.parent.MainParamDict['AxLabelSize'])
else: # It's 1D
if self.parent.MainParamDict['LinkSpatial'] != 0 and self.parent.MainParamDict['LinkSpatial'] != 3:
if self.FigWrap.pos == self.parent.first_x:
self.axes = self.figure.add_subplot(self.gs[self.parent.axes_extent[0]:self.parent.axes_extent[1], self.parent.axes_extent[2]:self.parent.axes_extent[3]])
else:
self.axes = self.figure.add_subplot(self.gs[self.parent.axes_extent[0]:self.parent.axes_extent[1], self.parent.axes_extent[2]:self.parent.axes_extent[3]],
sharex = self.parent.SubPlotList[self.parent.first_x[0]][self.parent.first_x[1]].graph.axes)
else:
self.axes = self.figure.add_subplot(self.gs[self.parent.axes_extent[0]:self.parent.axes_extent[1], self.parent.axes_extent[2]:self.parent.axes_extent[3]])
self.annotate_pos = [0.8,0.9]
self.xmin, self.xmax = self.xaxis_values[0], self.xaxis_values[-1]
min_max = [np.inf, -np.inf]
if self.flagx > 0 and self.GetPlotParam('show_x'):
if self.flagx == 1 and len(self.fx.shape) == 1:
self.linex = self.axes.plot(self.xaxis_values, self.fx, color = self.xcolor)
elif self.parent.MainParamDict['Average1D']:
self.linex = self.axes.plot(self.xaxis_values, np.average(self.fx.reshape(-1,self.fx.shape[-1]), axis =0), color = self.xcolor)
else:
self.linex = self.axes.plot(self.xaxis_values, self.fx[self.parent.zSlice,self.parent.ySlice,:], color = self.xcolor)
min_max[0]=min(min_max[0],self.linex[0].get_data()[1].min())
min_max[1]=max(min_max[1],self.linex[0].get_data()[1].max())
else:
self.linex = self.axes.plot(np.arange(10), np.arange(10), color = self.xcolor)
self.linex[0].set_visible(False)
self.anx = self.axes.annotate(self.GetPlotParam('1D_label')[self.GetPlotParam('field_type')][0], xy = self.annotate_pos,
xycoords = 'axes fraction',
color = self.xcolor,
**self.annotate_kwargs)
self.anx.set_visible(self.GetPlotParam('show_x'))
self.annotate_pos[0] += .08
if self.flagy >0 and self.GetPlotParam('show_y'):
if self.flagy == 1 and len(self.flagy.shape) == 1:
self.liney = self.axes.plot(self.xaxis_values, self.fy, color = self.ycolor)
elif self.parent.MainParamDict['Average1D']:
self.liney = self.axes.plot(self.xaxis_values, np.average(self.fy.reshape(-1,self.fy.shape[-1]), axis = 0), color = self.ycolor)
else:
self.liney = self.axes.plot(self.xaxis_values, self.fy[self.parent.zSlice,self.parent.ySlice,:], color = self.ycolor)
min_max[0]=min(min_max[0],self.liney[0].get_data()[1].min())
min_max[1]=max(min_max[1],self.liney[0].get_data()[1].max())
else:
self.liney = self.axes.plot(np.arange(10), np.arange(10), color = self.ycolor)
self.liney[0].set_visible(False)
self.any =self.axes.annotate(self.GetPlotParam('1D_label')[self.GetPlotParam('field_type')][1], xy = self.annotate_pos,
xycoords= 'axes fraction',
color = self.ycolor,
**self.annotate_kwargs)
self.any.set_visible(self.GetPlotParam('show_y'))
self.annotate_pos[0] += .08
if self.flagz and self.GetPlotParam('show_z'):
if self.flagx == 1 and len(self.fz.shape) == 1:
self.linez = self.axes.plot(self.xaxis_values, self.fz, color = self.zcolor)
if self.parent.MainParamDict['Average1D']:
self.linez = self.axes.plot(self.xaxis_values, np.average(self.fz.reshape(-1,self.fz.shape[-1]), axis = 0), color = self.zcolor)
else: # In the x-y plane
self.linez = self.axes.plot(self.xaxis_values, self.fz[self.parent.zSlice,self.parent.ySlice,:], color = self.zcolor)
min_max[0]=min(min_max[0],self.linez[0].get_data()[1].min())
min_max[1]=max(min_max[1],self.linez[0].get_data()[1].max())
else:
self.linez = self.axes.plot(np.arange(10), np.arange(10), color = self.zcolor)
self.linez[0].set_visible(False)
if np.isinf(min_max[0]):
min_max[0]=None
min_max[1]=None
else:
dist = min_max[1]-min_max[0]
min_max[0] -= 0.04*dist
min_max[1] += 0.04*dist
if self.GetPlotParam('stretch_colors'):
tmp = max(abs(min_max[0]), abs(min_max[1]))
min_max = [-tmp, tmp]
self.axes.set_ylim(min_max)
self.anz = self.axes.annotate(self.GetPlotParam('1D_label')[self.GetPlotParam('field_type')][2], xy = self.annotate_pos,
xycoords= 'axes fraction',
color = self.zcolor,
**self.annotate_kwargs
)
self.anz.set_visible(self.GetPlotParam('show_z'))
self.shock_line = self.axes.axvline(self.parent.shock_loc, linewidth = 1.5, linestyle = '--', color = self.parent.shock_color, path_effects=[PathEffects.Stroke(linewidth=2, foreground='k'),
PathEffects.Normal()])
self.shock_line.set_visible(self.GetPlotParam('show_shock'))
if int(matplotlib.__version__[0]) < 2:
self.axes.set_axis_bgcolor(self.GetPlotParam('face_color'))
else:
self.axes.set_facecolor(self.GetPlotParam('face_color'))
self.axes.tick_params(labelsize = self.parent.MainParamDict['NumFontSize'], color=tick_color)
if self.parent.MainParamDict['SetxLim']:
if self.parent.MainParamDict['xLimsRelative']:
self.axes.set_xlim(self.parent.MainParamDict['xLeft'] + self.parent.shock_loc,
self.parent.MainParamDict['xRight'] + self.parent.shock_loc)
else:
self.axes.set_xlim(self.parent.MainParamDict['xLeft'], self.parent.MainParamDict['xRight'])
else:
self.axes.set_xlim(self.xaxis_values[0],self.xaxis_values[-1])
if self.GetPlotParam('set_v_min'):
self.axes.set_ylim(bottom = self.GetPlotParam('v_min'))
if self.GetPlotParam('set_v_max'):
self.axes.set_ylim(top = self.GetPlotParam('v_max'))
self.axes.set_xlabel(r'$x\ [c/\omega_{\rm pe}]$', labelpad = self.parent.MainParamDict['xLabelPad'], color = 'black', size = self.parent.MainParamDict['AxLabelSize'])
tmplblstr = self.GetPlotParam('yaxis_label')[self.GetPlotParam('field_type')]
if self.GetPlotParam('normalize_fields'):
if self.GetPlotParam('field_type') ==0:
tmplblstr +=r'$/B_0$'
elif self.GetPlotParam('field_type') ==1:
tmplblstr +=r'$/E_0$'
self.axes.set_ylabel(tmplblstr, labelpad = self.parent.MainParamDict['yLabelPad'], color = 'black', size = self.parent.MainParamDict['AxLabelSize'])
####
# FFT REGION PLOTTING CODE
####
self.lineleft = self.axes.axvline(0, linewidth = 1.5, linestyle = ':', color = self.parent.FFT_color)
self.lineright = self.axes.axvline(0, linewidth = 1.5, linestyle = ':', color = self.parent.FFT_color)
self.lineleft.set_visible(self.GetPlotParam('show_FFT_region'))
self.lineright.set_visible(self.GetPlotParam('show_FFT_region'))
if self.GetPlotParam('show_FFT_region'):
self.left_loc = self.parent.MainParamDict['FFTLeft'] + self.parent.shock_loc*self.parent.MainParamDict['FFTRelative']
self.left_loc = max(self.left_loc, self.xmin)
self.lineleft.set_xdata([self.left_loc,self.left_loc])
self.right_loc = self.parent.MainParamDict['FFTRight'] + self.parent.shock_loc*self.parent.MainParamDict['FFTRelative']
self.right_loc = min(self.right_loc, self.xmax)
self.lineright.set_xdata([self.right_loc,self.right_loc])
####
#
# Code to show the CPU domains
#
####
if self.GetPlotParam('show_cpu_domains'):
self.FigWrap.SetCpuDomainLines()
def refresh(self):
'''This is a function that will be called only if self.axes already
holds a fields type plot. We only update things that have changed & are
shown. If hasn't changed or isn't shown, don't touch it. The difference
between this and last time, is that we won't actually do any drawing in
the plot. The plot will be redrawn after all subplots are refreshed. '''
# Main goal, only change what is showing..
self.xmin, self.xmax = self.xaxis_values[0], self.xaxis_values[-1]
self.lineleft.set_visible(self.GetPlotParam('show_FFT_region'))
self.lineright.set_visible(self.GetPlotParam('show_FFT_region'))
if self.GetPlotParam('show_FFT_region'):
# Update the position of the FFT region
self.left_loc = self.parent.MainParamDict['FFTLeft'] + self.parent.shock_loc*self.parent.MainParamDict['FFTRelative']
self.left_loc = max(self.left_loc, self.xmin)
self.lineleft.set_xdata([self.left_loc,self.left_loc])
self.right_loc = self.parent.MainParamDict['FFTRight'] + self.parent.shock_loc*self.parent.MainParamDict['FFTRelative']
self.right_loc = min(self.right_loc, self.xmax)
self.lineright.set_xdata([self.right_loc,self.right_loc])
# Now do the 1D plots, because it is simpler
if self.GetPlotParam('twoD') == 0:
min_max = [np.inf, -np.inf]
if self.GetPlotParam('show_x') and self.flagx:
if self.flagx == 1 and len(self.fx.shape) == 1:
self.linex[0].set_data(self.xaxis_values, self.fx)
elif self.parent.MainParamDict['Average1D']:
self.linex[0].set_data(self.xaxis_values, np.average(self.fx.reshape(-1,self.fx.shape[-1]), axis =0))
else: # In the x-y plane
self.linex[0].set_data(self.xaxis_values, self.fx[self.parent.zSlice,self.parent.ySlice,:])
self.linex[0].set_visible(True)
self.anx.set_visible(True)
min_max[0]=min(min_max[0],self.linex[0].get_data()[1].min())
min_max[1]=max(min_max[1],self.linex[0].get_data()[1].max())
if self.GetPlotParam('show_y') and self.flagy:
if self.flagy == 1 and len(self.fy.shape) == 1:
self.liney[0].set_data(self.xaxis_values, self.fy)
elif self.parent.MainParamDict['Average1D']:
self.liney[0].set_data(self.xaxis_values, np.average(self.fy.reshape(-1,self.fy.shape[-1]), axis =0))
else:
self.liney[0].set_data(self.xaxis_values, self.fy[self.parent.zSlice,self.parent.ySlice,:])
self.liney[0].set_visible(True)
self.any.set_visible(True)
min_max[0]=min(min_max[0],self.liney[0].get_data()[1].min())
min_max[1]=max(min_max[1],self.liney[0].get_data()[1].max())
if self.GetPlotParam('show_z'):
if self.flagz ==1 and len(self.fz.shape) == 1:
self.linez[0].set_data(self.xaxis_values, self.fz)
elif self.parent.MainParamDict['Average1D']:
self.linez[0].set_data(self.xaxis_values, np.average(self.fz.reshape(-1,self.fz.shape[-1]), axis =0))
else:
self.linez[0].set_data(self.xaxis_values, self.fz[self.parent.zSlice,self.parent.ySlice,:])
self.linez[0].set_visible(True)
self.anz.set_visible(True)
min_max[0]=min(min_max[0],self.linez[0].get_data()[1].min())
min_max[1]=max(min_max[1],self.linez[0].get_data()[1].max())
if np.isinf(min_max[0]):
min_max[0]=None
min_max[1]=None
else:
dist = min_max[1]-min_max[0]
min_max[0] -= 0.04*dist
min_max[1] += 0.04*dist
if self.GetPlotParam('stretch_colors'):
tmp = max(abs(min_max[0]), abs(min_max[1]))
min_max = [-tmp, tmp]
self.axes.set_ylim(min_max)
if self.GetPlotParam('show_shock'):
self.shock_line.set_xdata([self.parent.shock_loc,self.parent.shock_loc])
if self.parent.MainParamDict['SetxLim']:
if self.parent.MainParamDict['xLimsRelative']:
self.axes.set_xlim(self.parent.MainParamDict['xLeft'] + self.parent.shock_loc,
self.parent.MainParamDict['xRight'] + self.parent.shock_loc)
else:
self.axes.set_xlim(self.parent.MainParamDict['xLeft'], self.parent.MainParamDict['xRight'])
else:
self.axes.set_xlim(self.xaxis_values[0], self.xaxis_values[-1])
if self.GetPlotParam('set_v_min'):
self.axes.set_ylim(bottom = self.GetPlotParam('v_min'))
if self.GetPlotParam('set_v_max'):
self.axes.set_ylim(top = self.GetPlotParam('v_max'))
else: # Now refresh the plot if it is 2D
self.plotFlag = -1
if self.GetPlotParam('show_x') and self.flagx >1:
self.plotFlag = 0
if self.parent.MainParamDict['2DSlicePlane'] == 0: #x-y plane
self.cax.set_data(self.fx[self.parent.zSlice,:,:])
elif self.parent.MainParamDict['2DSlicePlane'] == 1: #x-z plane
self.cax.set_data(self.fx[:,self.parent.ySlice,:])
elif self.GetPlotParam('show_y') and self.flagy >1:
self.plotFlag = 1
if self.parent.MainParamDict['2DSlicePlane'] == 0: #x-y plane
self.cax.set_data(self.fy[self.parent.zSlice,:,:])
elif self.parent.MainParamDict['2DSlicePlane'] == 1: #x-z plane
self.cax.set_data(self.fy[:,self.parent.ySlice,:])
elif self.GetPlotParam('show_z') and self.flagz>1:
self.plotFlag = 2
if self.parent.MainParamDict['2DSlicePlane'] == 0: #x-y plane
self.cax.set_data(self.fz[self.parent.zSlice,:,:])
elif self.parent.MainParamDict['2DSlicePlane'] == 1: #x-z plane
self.cax.set_data(self.fz[:,self.parent.ySlice,:])
else:
self.cax.set_data(np.ma.masked_array(np.empty([2,2]), mask = np.ones([2,2])))
self.clims = [None, None]
self.axC.set_visible(self.plotFlag !=-1)
if self.plotFlag != -1:
self.ymin = 0
self.ymax = self.cax.get_array().shape[0]/self.c_omp*self.istep
self.xmin = 0
self.xmax = self.xaxis_values[-1]
self.clims = [self.cax.get_array().min(), self.cax.get_array().max()]
if self.parent.MainParamDict['SetxLim']:
if self.parent.MainParamDict['xLimsRelative']:
self.axes.set_xlim(self.parent.MainParamDict['xLeft'] + self.parent.shock_loc,
self.parent.MainParamDict['xRight'] + self.parent.shock_loc)
else:
self.axes.set_xlim(self.parent.MainParamDict['xLeft'], self.parent.MainParamDict['xRight'])
else:
self.axes.set_xlim(self.xmin,self.xmax)
if self.parent.MainParamDict['SetyLim']:
self.axes.set_ylim(self.parent.MainParamDict['yBottom'],self.parent.MainParamDict['yTop'])
else:
self.axes.set_ylim(self.ymin,self.ymax)
self.cax.set_extent([self.xmin, self.xmax, self.ymin, self.ymax])
if self.plotFlag >= 0:
self.anntext = self.GetPlotParam('2D_label')[self.GetPlotParam('field_type')][self.plotFlag]
if self.GetPlotParam('field_type') ==0 and self.GetPlotParam('normalize_fields'):
self.anntext +=r'$/B_0$'
if self.GetPlotParam('field_type') ==1 and self.GetPlotParam('normalize_fields'):
self.anntext +=r'$/E_0$'
self.TwoDan.set_text(self.anntext)
else:
self.TwoDan.set_text('')
self.vmin = self.cax.get_array().min()
if self.GetPlotParam('set_v_min'):
self.vmin = self.GetPlotParam('v_min')
self.vmax = self.cax.get_array().max()
if self.GetPlotParam('set_v_max'):
self.vmax = self.GetPlotParam('v_max')
if self.GetPlotParam('UseDivCmap') and self.GetPlotParam('stretch_colors'):
self.vmax = max(np.abs(self.vmin), self.vmax)
self.vmin = -self.vmax
self.cax.norm.vmin = self.vmin
self.cax.norm.vmax = self.vmax
self.CbarTickFormatter()
if self.GetPlotParam('show_shock'):
self.shockline_2d.set_xdata([self.parent.shock_loc,self.parent.shock_loc])
if self.parent.MainParamDict['2DSlicePlane'] == 0:
self.axes.set_ylabel(r'$y\ [c/\omega_{\rm pe}]$', labelpad = self.parent.MainParamDict['yLabelPad'], color = 'black', size = self.parent.MainParamDict['AxLabelSize'])
if self.parent.MainParamDict['2DSlicePlane'] == 1:
self.axes.set_ylabel(r'$z\ [c/\omega_{\rm pe}]$', labelpad = self.parent.MainParamDict['yLabelPad'], color = 'black', size = self.parent.MainParamDict['AxLabelSize'])
if self.GetPlotParam('show_cpu_domains'):
self.FigWrap.UpdateCpuDomainLines()
def CbarTickFormatter(self):
''' A helper function that sets the cbar ticks & labels. This used to be
easier, but because I am no longer using the colorbar class i have to do
stuff manually.'''
clim = np.copy(self.cax.get_clim())
if self.GetPlotParam('show_cbar'):
if self.GetPlotParam('cnorm_type') == "Log":
self.cbar.set_extent([np.log10(clim[0]),np.log10(clim[1]),0,1])
self.axC.set_xlim(np.log10(clim[0]),np.log10(clim[1]))
elif self.GetPlotParam('cnorm_type') == "Pow":
# re-create the gradient with the data values
# First make a colorbar in the negative region that is linear in the pow_space
data_range = np.linspace(clim[0],clim[1],512)
cbardata = PowerNormFunc(data_range, vmin = data_range[0], vmax = data_range[-1], gamma = self.GetPlotParam('cpow_num'), midpoint = self.GetPlotParam('div_midpoint'), div_cmap = self.GetPlotParam('UseDivCmap'), stretch_colors = self.GetPlotParam('stretch_colors'))
cbardata = np.vstack((cbardata,cbardata))
if self.parent.MainParamDict['HorizontalCbars']:
self.cbar.set_data(cbardata)
self.cbar.set_extent([clim[0],clim[1],0,1])
self.axC.set_xlim(clim[0],clim[1])
else:
self.cbar.set_data(np.transpose(cbardata)[::-1])
self.cbar.set_extent([0,1,clim[0],clim[1]])
self.axC.set_ylim(clim[0],clim[1])
self.axC.locator_params(axis='y',nbins=6)
elif self.GetPlotParam('cnorm_type') == "Linear" and self.GetPlotParam('UseDivCmap'):
# re-create the gradient with the data values
# First make a colorbar in the negative region that is linear in the pow_space
data_range = np.linspace(clim[0],clim[1],512)
cbardata = PowerNormFunc(data_range, vmin = data_range[0], vmax = data_range[-1], gamma = 1.0, div_cmap = self.GetPlotParam('UseDivCmap'), midpoint = self.GetPlotParam('div_midpoint'), stretch_colors = self.GetPlotParam('stretch_colors'))
cbardata = np.vstack((cbardata,cbardata))
if self.parent.MainParamDict['HorizontalCbars']:
self.cbar.set_data(cbardata)
self.cbar.set_extent([clim[0],clim[1],0,1])
self.axC.set_xlim(clim[0],clim[1])
else:
self.cbar.set_data(np.transpose(cbardata)[::-1])
self.cbar.set_extent([0,1,clim[0],clim[1]])
self.axC.set_ylim(clim[0],clim[1])
self.axC.locator_params(axis='y',nbins=6)
else:# self.GetPlotParam('cnorm_type') == "Linear":
if self.parent.MainParamDict['HorizontalCbars']:
self.cbar.set_extent([clim[0],clim[1],0,1])
self.axC.set_xlim(clim[0],clim[1])
else:
self.cbar.set_extent([0,1,clim[0],clim[1]])
self.axC.set_ylim(clim[0],clim[1])
self.axC.locator_params(axis='y',nbins=6)
def GetPlotParam(self, keyname):
return self.FigWrap.GetPlotParam(keyname)
def SetPlotParam(self, keyname, value, update_plot = True, NeedsRedraw = False):
self.FigWrap.SetPlotParam(keyname, value, update_plot = update_plot, NeedsRedraw = NeedsRedraw)
def OpenSettings(self):
if self.settings_window is None:
self.settings_window = FieldSettings(self)
else:
self.settings_window.destroy()
self.settings_window = FieldSettings(self)
class FieldSettings(Tk.Toplevel):
def __init__(self, parent):
self.parent = parent
Tk.Toplevel.__init__(self)
self.def1_window = None
self.def2_window = None
self.def3_window = None
self.wm_title('Fields & Currents Plot (%d,%d) Settings' % self.parent.FigWrap.pos)
self.parent = parent
self.frm = ttk.Frame(self)
self.frm.pack(fill=Tk.BOTH, expand=True)
self.protocol('WM_DELETE_WINDOW', self.OnClosing)
self.bind('<Return>', self.TxtEnter)
# Create the OptionMenu to chooses the Chart Type:
self.InterpolVar = Tk.StringVar(self)
self.InterpolVar.set(self.parent.GetPlotParam('interpolation')) # default value
self.InterpolVar.trace('w', self.InterpolChanged)
ttk.Label(self.frm, text="Interpolation Method:").grid(row=0, column = 2)
InterplChooser = ttk.OptionMenu(self.frm, self.InterpolVar, self.parent.GetPlotParam('interpolation'), *tuple(self.parent.InterpolationMethods))
InterplChooser.grid(row =0, column = 3, sticky = Tk.W + Tk.E)
# Create the OptionMenu to chooses the Chart Type:
self.ctypevar = Tk.StringVar(self)
self.ctypevar.set(self.parent.chartType) # default value
self.ctypevar.trace('w', self.ctypeChanged)
ttk.Label(self.frm, text="Choose Chart Type:").grid(row=0, column = 0)
ctypeChooser = ttk.OptionMenu(self.frm, self.ctypevar, self.parent.chartType, *tuple(self.parent.ChartTypes))
ctypeChooser.grid(row =0, column = 1, sticky = Tk.W + Tk.E)
self.TwoDVar = Tk.IntVar(self) # Create a var to track whether or not to plot in 2-D
self.TwoDVar.set(self.parent.GetPlotParam('twoD'))
cb = ttk.Checkbutton(self.frm, text = "Show in 2-D",
variable = self.TwoDVar,
command = self.Change2d)
cb.grid(row = 1, sticky = Tk.W)
# the Radiobox Control to choose the Field Type
self.FieldList = ['B Field', 'E field', 'J [current]', 'User Defined']
self.FieldTypeVar = Tk.IntVar()
self.FieldTypeVar.set(self.parent.GetPlotParam('field_type'))
ttk.Label(self.frm, text='Choose Field:').grid(row = 2, sticky = Tk.W)
for i in range(len(self.FieldList)):
ttk.Radiobutton(self.frm,
text=self.FieldList[i],
variable=self.FieldTypeVar,
command = self.RadioField,
value=i).grid(row = 3+i, sticky =Tk.W)
# the Check boxes for the dimension
self.label = ttk.Label(self.frm, text='Dimension:')
self.label.grid(row = 2, column = 1, sticky = Tk.W)
self.ShowXVar = Tk.IntVar(self) # Create a var to track whether or not to show X
self.ShowXVar.set(self.parent.GetPlotParam('show_x'))
self.cbx = ttk.Checkbutton(self.frm, text = "Show x",
variable = self.ShowXVar,
command = self.Selector)
self.cbx.grid(row = 3, column = 1, sticky = Tk.W)
self.ShowYVar = Tk.IntVar(self) # Create a var to track whether or not to plot Y
self.ShowYVar.set(self.parent.GetPlotParam('show_y'))
self.cby = ttk.Checkbutton(self.frm, text = "Show y",
variable = self.ShowYVar,
command = self.Selector)
self.cby.grid(row = 4, column = 1, sticky = Tk.W)
self.ShowZVar = Tk.IntVar(self) # Create a var to track whether or not to plot Z
self.ShowZVar.set(self.parent.GetPlotParam('show_z'))
self.cbz = ttk.Checkbutton(self.frm, text = "Show z",
variable = self.ShowZVar,
command = self.Selector)
self.cbz.grid(row = 5, column = 1, sticky = Tk.W)
if self.FieldTypeVar.get()==3:
# ADD BUTTONS TO DEFINE THE FUNCTIONS
self.df1button = ttk.Button(self.frm, text = 'Def F1', command = self.OpenDef1)
self.df1button.grid(row =3, column =2)
self.df2button = ttk.Button(self.frm, text = 'Def F2', command = self.OpenDef2)
self.df2button.grid(row =4, column =2)
self.df3button = ttk.Button(self.frm, text = 'Def F3', command = self.OpenDef3)
self.df3button.grid(row =5, column =2)
# CHANGE LABELS
self.cbx.config(text='Show F1')
self.cby.config(text='Show F2')
self.cbz.config(text='Show F3')
self.label.config(text='Choose Function:')
# Control whether or not Cbar is shown
self.CbarVar = Tk.IntVar()
self.CbarVar.set(self.parent.GetPlotParam('show_cbar'))
cb = ttk.Checkbutton(self.frm, text = "Show Color bar",
variable = self.CbarVar,
command = self.CbarHandler)
cb.grid(row = 7, sticky = Tk.W)
# Control whether or not diverging cmap is used
self.DivVar = Tk.IntVar()
self.DivVar.set(self.parent.GetPlotParam('UseDivCmap'))
cb = ttk.Checkbutton(self.frm, text = "Use Diverging Cmap",
variable = self.DivVar,
command = self.DivHandler)
cb.grid(row = 8, sticky = Tk.W)
# Use full div cmap
self.StretchVar = Tk.IntVar()
self.StretchVar.set(self.parent.GetPlotParam('stretch_colors'))
cb = ttk.Checkbutton(self.frm, text = "Symmetric about zero",
variable = self.StretchVar,
command = self.StretchHandler)
cb.grid(row = 8, column = 1, sticky = Tk.W)
# Create the OptionMenu to chooses the cnorm_type:
self.cnormvar = Tk.StringVar(self)
self.cnormvar.set(self.parent.chartType) # default value
self.cnormvar.trace('w', self.cnormChanged)
ttk.Label(self.frm, text="Choose Color Norm:").grid(row=6, column = 3)
cnormChooser = ttk.OptionMenu(self.frm, self.cnormvar, self.parent.GetPlotParam('cnorm_type'), *tuple(['Pow', 'Linear']))
cnormChooser.grid(row =6, column = 4, sticky = Tk.W + Tk.E)
# Now the gamma of the pow norm
self.powGamma = Tk.StringVar()
self.powGamma.set(str(self.parent.GetPlotParam('cpow_num')))
ttk.Label(self.frm, text ='gamma =').grid(row = 7, column = 3, sticky =Tk.E)
ttk.Label(self.frm, text ='If cnorm is Pow =>').grid(row = 8, column = 3,columnspan = 2, sticky =Tk.N)
ttk.Label(self.frm, text ='sign(data)*|data|**gamma').grid(row = 9, column = 3,columnspan = 2, sticky =Tk.E)
self.GammaEnter = ttk.Entry(self.frm, textvariable=self.powGamma, width=7)
self.GammaEnter.grid(row = 7, column = 4)
# Now the field lim
self.setZminVar = Tk.IntVar()
self.setZminVar.set(self.parent.GetPlotParam('set_v_min'))
self.setZminVar.trace('w', self.setZminChanged)
self.setZmaxVar = Tk.IntVar()
self.setZmaxVar.set(self.parent.GetPlotParam('set_v_max'))
self.setZmaxVar.trace('w', self.setZmaxChanged)
self.Zmin = Tk.StringVar()
self.Zmin.set(str(self.parent.GetPlotParam('v_min')))
self.Zmax = Tk.StringVar()
self.Zmax.set(str(self.parent.GetPlotParam('v_max')))
cb = ttk.Checkbutton(self.frm, text ='Set B or E min',
variable = self.setZminVar)
cb.grid(row = 3, column = 3, sticky = Tk.W)
self.ZminEnter = ttk.Entry(self.frm, textvariable=self.Zmin, width=7)
self.ZminEnter.grid(row = 3, column = 4)
cb = ttk.Checkbutton(self.frm, text ='Set B or E max',
variable = self.setZmaxVar)
cb.grid(row = 4, column = 3, sticky = Tk.W)
self.ZmaxEnter = ttk.Entry(self.frm, textvariable=self.Zmax, width=7)
self.ZmaxEnter.grid(row = 4, column = 4)
self.ShockVar = Tk.IntVar()
self.ShockVar.set(self.parent.GetPlotParam('show_shock'))
cb = ttk.Checkbutton(self.frm, text = "Show Shock",
variable = self.ShockVar,
command = self.ShockVarHandler)
cb.grid(row = 9, column = 1, sticky = Tk.W)
self.FFTVar = Tk.IntVar()
self.FFTVar.set(self.parent.GetPlotParam('show_FFT_region'))
cb = ttk.Checkbutton(self.frm, text = "Show FFT Region",
variable = self.FFTVar,
command = self.FFTVarHandler)
cb.grid(row = 9, column = 0, sticky = Tk.W)
self.CPUVar = Tk.IntVar()
self.CPUVar.set(self.parent.GetPlotParam('show_cpu_domains'))
cb = ttk.Checkbutton(self.frm, text = "Show CPU domains",
variable = self.CPUVar,
command = self.CPUVarHandler)
cb.grid(row = 10, column = 0, sticky = Tk.W)
self.NormFieldVar = Tk.IntVar()
self.NormFieldVar.set(self.parent.GetPlotParam('normalize_fields'))
cb = ttk.Checkbutton(self.frm, text = "Normalize Fields",
variable = self.NormFieldVar,
command = self.NormFieldHandler)
cb.grid(row = 7, column = 1, sticky = Tk.W)
def CbarHandler(self, *args):
if self.parent.GetPlotParam('show_cbar')== self.CbarVar.get():
pass
else:
if self.parent.GetPlotParam('twoD'):
self.parent.axC.set_visible(self.CbarVar.get())
self.parent.SetPlotParam('show_cbar', self.CbarVar.get(), update_plot = self.parent.GetPlotParam('twoD'))
def DivHandler(self, *args):
if self.parent.GetPlotParam('UseDivCmap')== self.DivVar.get():
pass
elif self.parent.GetPlotParam('twoD'):
self.parent.SetPlotParam('UseDivCmap', self.DivVar.get(), NeedsRedraw = True)
else:
self.parent.SetPlotParam('UseDivCmap', self.DivVar.get(), update_plot = False)
def StretchHandler(self, *args):
if self.parent.GetPlotParam('stretch_colors') == self.StretchVar.get():
pass
elif self.parent.GetPlotParam('twoD'):
self.parent.SetPlotParam('stretch_colors', self.StretchVar.get(), NeedsRedraw = True)
else:
self.parent.SetPlotParam('stretch_colors', self.StretchVar.get(), update_plot = True)
def cnormChanged(self, *args):
if self.parent.GetPlotParam('cnorm_type') == self.cnormvar.get():
pass
elif self.parent.GetPlotParam('twoD'):
self.parent.SetPlotParam('cnorm_type', self.cnormvar.get(), NeedsRedraw = True)
else:
self.parent.SetPlotParam('cnorm_type', self.cnormvar.get(), update_plot = False)
def OpenDef1(self):
if self.def1_window is None:
self.def1_window = UserDefSettings(self, self.parent,1)
else:
self.def1_window.destroy()
self.def1_window = UserDefSettings(self, self.parent,1)
def OpenDef2(self):
if self.def2_window is None:
self.def2_window = UserDefSettings(self, self.parent,2)
else:
self.def2_window.destroy()
self.def2_window = UserDefSettings(self, self.parent,2)
def OpenDef3(self):
if self.def3_window is None:
self.def3_window = UserDefSettings(self, self.parent,3)
else:
self.def3_window.destroy()
self.def3_window = UserDefSettings(self, self.parent,3)
def ShockVarHandler(self, *args):
if self.parent.GetPlotParam('show_shock')== self.ShockVar.get():
pass
else:
if self.parent.GetPlotParam('twoD'):
self.parent.shockline_2d.set_visible(self.ShockVar.get())
else:
self.parent.shock_line.set_visible(self.ShockVar.get())
self.parent.SetPlotParam('show_shock', self.ShockVar.get())
def FFTVarHandler(self, *args):
if self.parent.GetPlotParam('show_FFT_region')== self.FFTVar.get():
pass
else:
self.parent.SetPlotParam('show_FFT_region', self.FFTVar.get(), update_plot = False)
self.parent.lineleft.set_visible(self.parent.GetPlotParam('show_FFT_region'))
self.parent.lineright.set_visible(self.parent.GetPlotParam('show_FFT_region'))
### The .parent.parent is less than ideal.... consider re-writing.
if self.parent.GetPlotParam('show_FFT_region'):
self.parent.left_loc = self.parent.parent.MainParamDict['FFTLeft'] + self.parent.parent.shock_loc*self.parent.parent.MainParamDict['FFTRelative']
self.parent.left_loc = max(self.parent.left_loc, self.parent.xmin)
self.parent.lineleft.set_xdata([self.parent.left_loc,self.parent.left_loc])
self.parent.right_loc = self.parent.parent.MainParamDict['FFTRight'] + self.parent.parent.shock_loc*self.parent.parent.MainParamDict['FFTRelative']
self.parent.right_loc = min(self.parent.right_loc, self.parent.xmax)
self.parent.lineright.set_xdata([self.parent.right_loc,self.parent.right_loc])
self.parent.parent.canvas.draw()
self.parent.parent.canvas.get_tk_widget().update_idletasks()
def CPUVarHandler(self, *args):
if self.parent.GetPlotParam('show_cpu_domains')== self.CPUVar.get():
pass
else:
self.parent.SetPlotParam('show_cpu_domains', self.CPUVar.get(), update_plot = False)
if self.parent.GetPlotParam('show_cpu_domains'):
self.parent.FigWrap.SetCpuDomainLines()
else: # We need to get remove of the cpu lines. Pop them out of the array and remove them from the list.
self.parent.FigWrap.RemoveCpuDomainLines()
self.parent.parent.canvas.draw()
self.parent.parent.canvas.get_tk_widget().update_idletasks()
def NormFieldHandler(self, *args):
if self.parent.GetPlotParam('normalize_fields') == self.NormFieldVar.get():
pass
else:
if ~self.parent.GetPlotParam('twoD'):
tmplblstr = self.parent.GetPlotParam('yaxis_label')[self.FieldTypeVar.get()]
if self.NormFieldVar.get():
if self.parent.GetPlotParam('field_type') ==0:
tmplblstr +=r'$/B_0$'
elif self.parent.GetPlotParam('field_type') ==1:
tmplblstr +=r'$/E_0$'
self.parent.axes.set_ylabel(tmplblstr, labelpad = self.parent.parent.MainParamDict['yLabelPad'], color = 'black', size = self.parent.parent.MainParamDict['AxLabelSize'])
self.parent.SetPlotParam('normalize_fields', self.NormFieldVar.get())
def Change2d(self):
if self.TwoDVar.get() == self.parent.GetPlotParam('twoD'):
pass
else:
if self.TwoDVar.get():
# Make sure only one dimension checked
if self.parent.GetPlotParam('show_x'):
self.ShowYVar.set(0)
self.ShowZVar.set(0)
elif self.parent.GetPlotParam('show_y'):
self.ShowZVar.set(0)
self.parent.SetPlotParam('spatial_y', self.TwoDVar.get(), update_plot=False)
self.parent.SetPlotParam('twoD', self.TwoDVar.get())
def ctypeChanged(self, *args):
if self.ctypevar.get() == self.parent.chartType:
pass
else:
self.parent.ChangePlotType(self.ctypevar.get())
self.destroy()
def InterpolChanged(self, *args):
if self.InterpolVar.get() == self.parent.GetPlotParam('interpolation'):
pass
else:
if self.parent.GetPlotParam('twoD'):
self.parent.cax.set_interpolation(self.InterpolVar.get())
self.parent.SetPlotParam('interpolation', self.InterpolVar.get())
def setZminChanged(self, *args):
if self.setZminVar.get() == self.parent.GetPlotParam('set_v_min'):
pass
else:
self.parent.SetPlotParam('set_v_min', self.setZminVar.get())
def setZmaxChanged(self, *args):
if self.setZmaxVar.get() == self.parent.GetPlotParam('set_v_max'):
pass
else:
self.parent.SetPlotParam('set_v_max', self.setZmaxVar.get())
def RadioField(self):
if self.FieldTypeVar.get() == self.parent.GetPlotParam('field_type'):
pass
else:
if not self.parent.GetPlotParam('twoD'):
tmplblstr = self.parent.GetPlotParam('yaxis_label')[self.FieldTypeVar.get()]
if self.parent.GetPlotParam('normalize_fields'):
if self.FieldTypeVar.get() ==0:
tmplblstr +=r'$/B_0$'
elif self.FieldTypeVar.get() ==1:
tmplblstr +=r'$/E_0$'
self.parent.axes.set_ylabel(tmplblstr, labelpad = self.parent.parent.MainParamDict['yLabelPad'], color = 'black', size = self.parent.parent.MainParamDict['AxLabelSize'])
self.parent.anx.set_text(self.parent.GetPlotParam('1D_label')[self.FieldTypeVar.get()][0])
self.parent.any.set_text(self.parent.GetPlotParam('1D_label')[self.FieldTypeVar.get()][1])
self.parent.anz.set_text(self.parent.GetPlotParam('1D_label')[self.FieldTypeVar.get()][2])
####
#
# Plot to add UserDef fields
#
#####
if self.FieldTypeVar.get()==3:
# ADD BUTTONS TO DEFINE THE FUNCTIONS
self.df1button = ttk.Button(self.frm, text = 'Def F1', command = self.OpenDef1)
self.df1button.grid(row =3, column =2)
self.df2button = ttk.Button(self.frm, text = 'Def F2', command = self.OpenDef2)
self.df2button.grid(row =4, column =2)
self.df3button = ttk.Button(self.frm, text = 'Def F3', command = self.OpenDef3)
self.df3button.grid(row =5, column =2)
# CHANGE THE LABELS OF ALL THE CHECKBUTTONS
self.label.config(text='Choose Function:')
self.cbx.config(text='Show F1')
self.cby.config(text='Show F2')
self.cbz.config(text='Show F3')
# TURN OFF ALL THE LINES
self.ShowXVar.set(False)
self.ShowYVar.set(False)
self.ShowZVar.set(False)
self.parent.SetPlotParam('show_x', False, update_plot = False)
self.parent.SetPlotParam('show_y', False, update_plot = False)
self.parent.SetPlotParam('show_z', False, update_plot = False)
if ~self.parent.GetPlotParam('twoD'):
self.parent.linex[0].set_visible(False)
self.parent.anx.set_visible(False)
self.parent.liney[0].set_visible(False)
self.parent.any.set_visible(False)
self.parent.linez[0].set_visible(False)
self.parent.anz.set_visible(False)
elif self.parent.GetPlotParam('field_type') ==3 :
### DESTROY THE buttons
self.df1button.destroy()
self.df2button.destroy()
self.df3button.destroy()
self.label.config(text='Dimension:')
self.cbx.config(text='Show x')
self.cby.config(text='Show y')
self.cbz.config(text='Show z')
self.ShowXVar.set(False)
self.ShowYVar.set(False)
self.ShowZVar.set(False)
self.parent.SetPlotParam('show_x', False, update_plot = False)
self.parent.SetPlotParam('show_y', False, update_plot = False)
self.parent.SetPlotParam('show_z', False, update_plot = False)
if ~self.parent.GetPlotParam('twoD'):
self.parent.linex[0].set_visible(False)
self.parent.anx.set_visible(False)
self.parent.liney[0].set_visible(False)
self.parent.any.set_visible(False)
self.parent.linez[0].set_visible(False)
self.parent.anz.set_visible(False)
self.parent.SetPlotParam('field_type', self.FieldTypeVar.get())
def Selector(self):
# First check if it is 2-D:
if self.parent.GetPlotParam('twoD'):
#if self.ShowXVar.get() == 0 and self.ShowYVar.get() == 0 and self.ShowZVar.get() == 0:
# # All are zero, something must be selected for this plot
# self.ShowXVar.set(1)
if self.parent.GetPlotParam('show_x') != self.ShowXVar.get():
# set the other plot values to zero in the PlotParams
self.parent.SetPlotParam('show_y', 0, update_plot = False)
self.parent.SetPlotParam('show_z', 0, update_plot = False)
# Uncheck the boxes
self.ShowYVar.set(self.parent.GetPlotParam('show_y'))
self.ShowZVar.set(self.parent.GetPlotParam('show_z'))
self.parent.SetPlotParam('show_x', self.ShowXVar.get())
elif self.parent.GetPlotParam('show_y') != self.ShowYVar.get():
# set the other plot values to zero in the PlotParams
self.parent.SetPlotParam('show_x', 0 ,update_plot = False)
self.parent.SetPlotParam('show_z', 0 ,update_plot = False)
# Uncheck the boxes
self.ShowXVar.set(self.parent.GetPlotParam('show_x'))
self.ShowZVar.set(self.parent.GetPlotParam('show_z'))
self.parent.SetPlotParam('show_y', self.ShowYVar.get())
elif self.parent.GetPlotParam('show_z') != self.ShowZVar.get():
# set the other plot values to zero in the PlotParams
self.parent.SetPlotParam('show_x', 0 ,update_plot = False)
self.parent.SetPlotParam('show_y', 0 ,update_plot = False)
# Uncheck the boxes
self.ShowXVar.set(self.parent.GetPlotParam('show_x'))
self.ShowYVar.set(self.parent.GetPlotParam('show_y'))
self.parent.SetPlotParam('show_z', self.ShowZVar.get())
else:
if self.parent.GetPlotParam('show_x') != self.ShowXVar.get():
self.parent.linex[0].set_visible(self.ShowXVar.get())
self.parent.anx.set_visible(self.ShowXVar.get())
self.parent.SetPlotParam('show_x', self.ShowXVar.get())
elif self.parent.GetPlotParam('show_y') != self.ShowYVar.get():
self.parent.liney[0].set_visible(self.ShowYVar.get())
self.parent.any.set_visible(self.ShowYVar.get())
self.parent.SetPlotParam('show_y', self.ShowYVar.get())
elif self.parent.GetPlotParam('show_z') != self.ShowZVar.get():
self.parent.linez[0].set_visible(self.ShowZVar.get())
self.parent.anz.set_visible(self.ShowZVar.get())
self.parent.SetPlotParam('show_z', self.ShowZVar.get())
def TxtEnter(self, e):
self.FieldsCallback()
self.GammaCallback()
def GammaCallback(self):
try:
#make sure the user types in a float
if np.abs(float(self.powGamma.get()) - self.parent.GetPlotParam('cpow_num')) > 1E-4:
if self.parent.GetPlotParam('twoD') and self.parent.GetPlotParam('cnorm_type')=='Pow':
self.parent.SetPlotParam('cpow_num', float(self.powGamma.get()), NeedsRedraw = True)
else:
self.parent.SetPlotParam('cpow_num', float(self.powGamma.get()), update_plot = False)
except ValueError:
#if they type in random stuff, just set it ot the param value
self.powGamma.set(str(self.parent.GetPlotParam('cpow_num')))
def FieldsCallback(self):
tkvarLimList = [self.Zmin, self.Zmax]
plot_param_List = ['v_min', 'v_max']
tkvarSetList = [self.setZminVar, self.setZmaxVar]
to_reload = False
try:
#make sure the user types in a float, no longer check to see if it has changed, because of precision issues.
self.parent.SetPlotParam('v_min', float(self.Zmin.get()), update_plot = False)
if self.parent.GetPlotParam('set_v_min'):
if self.parent.GetPlotParam('twoD'):
self.parent.cax.norm.vmin = self.parent.GetPlotParam('v_min')
else:
self.parent.axes.set_ylim(bottom = self.parent.GetPlotParam('v_min') )
except ValueError:
#if they type in random stuff, just set it ot the param value
self.Zmin.set(str(self.parent.GetPlotParam('v_min')))
try:
#make sure the user types in a float, no longer check to see if it has changed, because of precision issues.
self.parent.SetPlotParam('v_max', float(self.Zmax.get()), update_plot = False)
if self.parent.GetPlotParam('set_v_max'):
if self.parent.GetPlotParam('twoD'):
self.parent.cax.norm.vmax = self.parent.GetPlotParam('v_max')
else:
self.parent.axes.set_ylim(top = self.parent.GetPlotParam('v_max') )
except ValueError:
#if they type in random stuff, just set it ot the param value
self.Zmax.set(str(self.parent.GetPlotParam('v_max')))
self.parent.SetPlotParam('v_max', self.parent.GetPlotParam('v_max'))
def OnClosing(self):
self.parent.settings_window = None
self.destroy()
class UserDefSettings(Tk.Toplevel):
def __init__(self, parent, subplot, fnum):
self.parent = parent
self.subplot = subplot
self.fnum = fnum
Tk.Toplevel.__init__(self)
self.wm_title('Define Fuction %d' % fnum)
self.parent = parent
S = Tk.Scrollbar(self)
self.T = Tk.Text(self, height=25, width=100)
S.pack(side=Tk.RIGHT, fill=Tk.Y)
self.T.pack(side=Tk.TOP, fill=Tk.Y)
S.config(command=self.T.yview)
self.T.config(yscrollcommand=S.set)
tmpstr = ''
for line in self.subplot.GetPlotParam('cmdstr'+str(self.fnum)).splitlines():
tmpstr += line[1:] +'\n'
self.T.insert(Tk.END, tmpstr)
miniframe = ttk.Frame(self)
ttk.Label(miniframe, text ="1D y-label:").grid(row=0, column =2)
self.ylabel = Tk.StringVar()
self.ylabel.set(self.subplot.GetPlotParam('yaxis_label')[self.subplot.GetPlotParam('field_type')])
ttk.Entry(miniframe, textvariable=self.ylabel, width=15).grid(row = 0, column = 3)
ttk.Label(miniframe, text ="1D label:").grid(row=0, column =0)
self.oneDlabel = Tk.StringVar()
self.oneDlabel.set(self.subplot.GetPlotParam('1D_label')[3][self.fnum-1])
ttk.Entry(miniframe, textvariable=self.oneDlabel, width=15).grid(row = 0, column = 1)
ttk.Label(miniframe, text ="2D label:").grid(row=1, column =0)
self.twoDlabel = Tk.StringVar()
self.twoDlabel.set(self.subplot.GetPlotParam('2D_label')[3][self.fnum-1])
ttk.Entry(miniframe, textvariable=self.twoDlabel, width=15).grid(row = 1, column = 1)
self.OneDVar = Tk.IntVar()
self.OneDVar.set(self.subplot.GetPlotParam('OneDOnly')[self.fnum-1])
ttk.Checkbutton(miniframe, text = 'Returns a 1D array along x', variable = self.OneDVar).grid(row = 1, column = 2, columnspan= 2, sticky = Tk.W)
miniframe.pack(side=Tk.TOP)
ttk.Button(self, text = 'Save F'+str(self.fnum), command = self.SaveStr).pack(side =Tk.TOP)
def SaveStr(self):
tmpstr = ''
for line in self.T.get(1.0, Tk.END).splitlines():
tmpstr += '#' + line + '\n'
self.subplot.SetPlotParam('cmdstr'+str(self.fnum), tmpstr, update_plot=False)
### THIS IS SLOPPY!
self.subplot.SetPlotParam('yaxis_label',self.subplot.GetPlotParam('yaxis_label')[0:3]+ [self.ylabel.get()], update_plot =False)
tmplist = list(self.subplot.GetPlotParam('2D_label')[3])
tmplist[self.fnum-1] = self.twoDlabel.get()
tmplist2 = list(self.subplot.GetPlotParam('2D_label')[0:3])
tmplist2.append(tmplist)
self.subplot.SetPlotParam('2D_label',tmplist2, update_plot =False)
tmplist = self.subplot.GetPlotParam('1D_label')[3]
tmplist[self.fnum-1] = self.oneDlabel.get()
tmplist2 = list(self.subplot.GetPlotParam('1D_label')[0:3])
tmplist2.append(tmplist)
self.subplot.SetPlotParam('1D_label',tmplist2, update_plot =False)
if ~self.subplot.GetPlotParam('twoD'):
self.subplot.axes.set_ylabel(self.subplot.GetPlotParam('yaxis_label')[3])
self.subplot.anx.set_text(self.subplot.GetPlotParam('1D_label')[3][0])
self.subplot.any.set_text(self.subplot.GetPlotParam('1D_label')[3][1])
self.subplot.anz.set_text(self.subplot.GetPlotParam('1D_label')[3][2])
#self.subplot.GetPlotParam('1D_label')[self.subplot.GetPlotParam('field_type')][self.fnum-1] = self.oneDlabel.get()
#self.subplot.GetPlotParam('2D_label')[self.subplot.GetPlotParam('field_type')][self.fnum-1] = self.twoDlabel.get()
self.subplot.GetPlotParam('OneDOnly')[self.fnum -1] = self.OneDVar.get()
if self.fnum ==1:
self.subplot.SetPlotParam('show_x', True)
self.parent.ShowXVar.set(True)
if self.fnum ==2:
self.subplot.SetPlotParam('show_y', True)
self.parent.ShowYVar.set(True)
if self.fnum ==3:
self.subplot.SetPlotParam('show_z', True)
self.parent.ShowZVar.set(True)
self.OnClosing()
def OnClosing(self):
if self.fnum ==1:
self.parent.def1_window = None
if self.fnum ==2:
self.parent.def2_window = None
else:
self.parent.def3_window = None
self.destroy()
|
pcrumley/Iseult
|
src/fields_plots.py
|
Python
|
gpl-3.0
| 84,898
|
[
"Gaussian"
] |
e4eb32a9aed2e23d094feae6272cdfabb3c89134be13ffde91e6f8f6ee7795a6
|
from neuron import h
import numpy as np
import matplotlib.pyplot as plt
def fetch_soma_sec(section_name):
cell_model = 'Hayton.hoc'
h.load_file(cell_model)
cell = h.L5PC
soma = cell.soma[0]
exec('sec = cell.' + section_name)
return soma, sec
def find_vrest(h, section_name):
h.load_file("stdrun.hoc")
tstop = 100
h.dt = dt = 0.1
soma, sec = fetch_soma_sec(section_name)
h.init()
h.cvode.re_init()
t_vec, soma_vm, sec_vm = record(soma, sec)
h.execute('tstop = 100')
h.run()
vrest = np.array(sec_vm)[-1]
return vrest
def exp2(tt, tau_raise, tau_fall, onset):
vv = []
for ii,t in enumerate(tt):
if t < onset:
vv.append(0)
else:
val = np.exp(-t/tau_fall)*(1.-np.exp(-t/tau_raise))
vv.append(val)
return np.array(vv)
def square(tt, start, end, v_peak):
print('Square impuse')
vv = []
for ii, t in enumerate(tt):
if t<start or t>end:
vv.append(0.)
else:
vv.append(v_peak)
return np.array(vv)
def voltage_clamp(tstop, dt, v_rest, v_peak, tau_raise, tau_fall, onset=100.):
assert(tau_fall > tau_raise)
tt = np.arange(0, tstop, dt)
#vv = v_rest + (v_peak*exp2(tt, tau_raise, tau_fall, onset))
vv = v_rest + square(tt, 100, 200, v_peak)
return vv
def fetch_soma_apic_pots():
s_v = h.Vector()
a_v = h.Vector()
s_v.record(h.L5PC.soma[0](0.5)._ref_v)
a_v.record(h.L5PC.apic[0](1e-3)._ref_v)
return s_v, a_v
def record(soma, sec):
sec_vm = h.Vector()
sec_vm.record(sec(0.5)._ref_v)
soma_vm = h.Vector()
soma_vm.record(soma(0.5)._ref_v)
t_vec = h.Vector()
t_vec.record(h._ref_t)
return t_vec, soma_vm, sec_vm
def run_sim(h, section_name, v_peak, tau_raise, tau_fall, onset=100):
tstop = 500
h.dt = dt = 0.1
h.load_file("stdrun.hoc")
soma, sec = fetch_soma_sec(section_name)
v_rest = -75.711 # find_vrest(h, section_name)
h.init()
h.cvode.re_init()
s_v, a_v = fetch_soma_apic_pots()
vv = voltage_clamp(tstop, dt, v_rest, v_peak, tau_raise, tau_fall, onset)
vc = h.SEClamp(sec(0.5))
vc.rs = 0.001
vc.dur1 = tstop
vamp = h.Vector(vv)
vamp.play(vc._ref_amp1, h.dt)
t_vec, soma_vm, sec_vm = record(soma, sec)
h.execute('tstop = ' + str(tstop))
h.run()
diff_v = np.array(a_v) - np.array(s_v)
return t_vec, soma_vm, sec_vm, diff_v, vv
dendrite_name = 'apic[26]'
t_vec, soma_vm, sec_vm, diff_v, vv = run_sim(h, dendrite_name, 100, 10, 15)
plt.figure(figsize=(10,5))
plt.subplot(121)
plt.plot(t_vec, soma_vm, 'k', label='V_soma')
plt.plot(t_vec, sec_vm, 'r', label='V_'+dendrite_name)
# tt = np.arange(0, 500, 0.01)
# plt.plot(np.arange(0, tstop, dt), vv, c='g')
plt.xlabel('Time (ms)')
plt.ylabel('Membrane potential (mV)')
plt.legend()
plt.subplot(122)
# h.ri(1e-3) # after access of apic[0] to get the 0.13930698946266598 - neuron blues
plt.plot(t_vec, diff_v / 0.13930698946266598, 'g', label='apical 2 soma')
plt.xlabel('Time (ms)')
plt.ylabel('Axial current (nA)')
plt.legend()
plt.show()
|
ccluri/L5Pyr
|
agnesnrn.py
|
Python
|
gpl-3.0
| 3,121
|
[
"NEURON"
] |
fff076e74d08eef0c5de544549e1bfcdf1b024fde112323138687a20811cadc0
|
import numpy as np
# import FitsUtils
import FittingUtilities
import HelperFunctions
import matplotlib.pyplot as plt
import sys
import os
from astropy import units
from astropy.io import fits, ascii
import DataStructures
from scipy.interpolate import InterpolatedUnivariateSpline as interp
import MakeModel
import HelperFunctions
from collections import Counter
from sklearn.gaussian_process import GaussianProcess
import warnings
def SmoothData(order, windowsize=91, smoothorder=5, lowreject=3, highreject=3, numiters=10, expand=0, normalize=True):
denoised = HelperFunctions.Denoise(order.copy())
denoised.y = FittingUtilities.Iterative_SV(denoised.y, windowsize, smoothorder, lowreject=lowreject,
highreject=highreject, numiters=numiters, expand=expand)
if normalize:
denoised.y /= denoised.y.max()
return denoised
def roundodd(num):
rounded = round(num)
if rounded % 2 != 0:
return rounded
else:
if rounded > num:
return rounded - 1
else:
return rounded + 1
def GPSmooth(data, low=0.1, high=10, debug=False, findoutliers=True):
"""
This will smooth the data using Gaussian processes. It will find the best
smoothing parameter via cross-validation to be between the low and high.
The low and high keywords are reasonable bounds for A and B stars with
vsini > 100 km/s.
"""
smoothed = data.copy()
# First, find outliers by doing a guess smooth
if findoutliers:
smoothed = SmoothData(data, normalize=False)
temp = smoothed.copy()
temp.y = data.y / smoothed.y
temp.cont = FittingUtilities.Continuum(temp.x, temp.y, lowreject=2, highreject=2, fitorder=3)
outliers = HelperFunctions.FindOutliers(temp, numsiglow=3, expand=5)
if len(outliers) > 0:
data.y[outliers] = smoothed.y[outliers]
gp = GaussianProcess(corr='squared_exponential',
theta0=np.sqrt(low * high),
thetaL=low,
thetaU=high,
normalize=False,
nugget=(data.err / data.y) ** 2)
try:
gp.fit(data.x[:, None], data.y)
except ValueError:
#On some orders with large telluric residuals, this will fail.
# Just fall back to the old smoothing method in that case.
return SmoothData(data), 91
if debug:
print "\tSmoothing parameter theta = ", gp.theta_, gp.theta0, gp.thetaL, gp.thetaU
smoothed.y, smoothed.err = gp.predict(data.x[:, None], eval_MSE=True)
return smoothed, gp.theta_[0][0]
if __name__ == "__main__":
fileList = []
plot = False
vsini_file = "%s/School/Research/Useful_Datafiles/Vsini.csv" % (os.environ["HOME"])
for arg in sys.argv[1:]:
if "-p" in arg:
plot = True
elif "-vsini" in arg:
vsini_file = arg.split("=")[-1]
else:
fileList.append(arg)
#Read in the vsini table
vsini_data = ascii.read(vsini_file)[10:]
if len(fileList) == 0:
fileList = [f for f in os.listdir("./") if f.endswith("telluric_corrected.fits")]
for fname in fileList:
orders = HelperFunctions.ReadFits(fname, extensions=True, x="wavelength", y="flux", cont="continuum",
errors="error")
#Find the vsini of this star
header = fits.getheader(fname)
starname = header["object"]
found = False
for data in vsini_data:
if data[0] == starname:
vsini = float(data[1])
found = True
if not found:
outfile = open("Warnings.log", "a")
outfile.write("Cannot find %s in the vsini data: %s\n" % (starname, vsini_file))
outfile.close()
warnings.warn("Cannot find %s in the vsini data: %s" % (starname, vsini_file))
print starname, vsini
#Begin looping over the orders
column_list = []
header_list = []
for i, order in enumerate(orders):
print "Smoothing order %i/%i" % (i + 1, len(orders))
#Fix errors
order.err[order.err > 1e8] = np.sqrt(order.y[order.err > 1e8])
#Linearize
xgrid = np.linspace(order.x[0], order.x[-1], order.x.size)
order = FittingUtilities.RebinData(order, xgrid)
dx = order.x[1] - order.x[0]
smooth_factor = 0.8
theta = roundodd(vsini / 3e5 * order.x.mean() / dx * smooth_factor)
denoised = SmoothData(order,
windowsize=theta,
smoothorder=3,
lowreject=3,
highreject=3,
expand=10,
numiters=10)
#denoised, theta = GPSmooth(order.copy())
#denoised, theta = CrossValidation(order.copy(), 5, 2, 2, 10)
#denoised, theta = OptimalSmooth(order.copy())
#denoised.y *= order.cont/order.cont.mean()
print "Window size = %.4f nm" % theta
column = {"wavelength": denoised.x,
"flux": order.y / denoised.y,
"continuum": denoised.cont,
"error": denoised.err}
header_list.append((("Smoother", theta, "Smoothing Parameter"),))
column_list.append(column)
if plot:
plt.figure(1)
plt.plot(order.x, order.y / order.y.mean())
plt.plot(denoised.x, denoised.y / denoised.y.mean())
plt.title(starname)
plt.figure(2)
plt.plot(order.x, order.y / denoised.y)
plt.title(starname)
#plt.plot(order.x, (order.y-denoised.y)/np.median(order.y))
#plt.show()
if plot:
plt.show()
outfilename = "%s_smoothed.fits" % (fname.split(".fits")[0])
print "Outputting to %s" % outfilename
HelperFunctions.OutputFitsFileExtensions(column_list, fname, outfilename, mode='new', headers_info=header_list)
|
kgullikson88/TS23-Scripts
|
Smooth.py
|
Python
|
gpl-3.0
| 6,248
|
[
"Gaussian"
] |
7b35e0bfb87abfc6769a5a29407a856f918989efcfedaa23e89aadbcbd3a9d12
|
from dateutil.relativedelta import relativedelta
from edc_constants.constants import SCREENED
from edc_registration.models import RegisteredSubject
from edc_identifier.models import SubjectIdentifier
from edc_constants.constants import FAILED_ELIGIBILITY, OFF_STUDY, SCHEDULED
from edc_meta_data.models import RequisitionMetaData
from edc_appointment.models import Appointment
from td_maternal.models import MaternalVisit
from td_maternal.tests import BaseTestCase
from td_maternal.tests.factories import (MaternalUltraSoundIniFactory, MaternalEligibilityFactory,
MaternalConsentFactory, AntenatalEnrollmentFactory,
AntenatalVisitMembershipFactory, MaternalLabourDelFactory)
from .factories import InfantBirthFactory
class TestInfantBirthMembership(BaseTestCase):
def setUp(self):
super(TestInfantBirthMembership, self).setUp()
self.maternal_eligibility = MaternalEligibilityFactory()
self.maternal_consent = MaternalConsentFactory(registered_subject=self.maternal_eligibility.registered_subject)
self.registered_subject = self.maternal_consent.registered_subject
# maternal visit created here.
self.antenatal_enrollment = AntenatalEnrollmentFactory(registered_subject=self.registered_subject)
self.maternal_visit = MaternalVisit.objects.get(
appointment__registered_subject=self.registered_subject,
reason=SCHEDULED,
appointment__visit_definition__code='1000M')
self.maternal_ultrasound = MaternalUltraSoundIniFactory(maternal_visit=self.maternal_visit,
number_of_gestations=1)
self.maternal_visits_membership = AntenatalVisitMembershipFactory(registered_subject=self.registered_subject)
self.maternal_labour_del = MaternalLabourDelFactory(registered_subject=self.registered_subject,
live_infants_to_register=1)
def test_create_appointments(self):
infant_birth = InfantBirthFactory(
maternal_labour_del=self.maternal_labour_del,
registered_subject=RegisteredSubject.objects.get(
relative_identifier=self.maternal_consent.subject_identifier))
self.assertEqual(Appointment.objects.filter(
registered_subject=RegisteredSubject.objects.get(
relative_identifier=self.maternal_consent.subject_identifier)).count(), 6)
|
TshepangRas/tshilo-dikotla
|
td_infant/tests/test_infant_birth_membership.py
|
Python
|
gpl-2.0
| 2,538
|
[
"VisIt"
] |
a49d0b2ce2e4693dfec63c1eae6302e3bb37bfd6a0723f6747c1363a461e121d
|
# This file is part of ts_wep.
#
# Developed for the LSST Telescope and Site Systems.
# This product includes software developed by the LSST Project
# (https://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import os
import numpy as np
import pandas as pd
from copy import copy
from scipy.signal import correlate
import lsst.utils.tests
import lsst.pipe.base as pipeBase
from lsst.afw import image as afwImage
from lsst.daf import butler as dafButler
from lsst.ts.wep.task.EstimateZernikesCwfsTask import (
EstimateZernikesCwfsTask,
EstimateZernikesCwfsTaskConfig,
)
from lsst.ts.wep.Utility import (
getModulePath,
runProgram,
DefocalType,
writePipetaskCmd,
writeCleanUpRepoCmd,
)
class TestEstimateZernikesCwfsTask(lsst.utils.tests.TestCase):
@classmethod
def setUpClass(cls):
"""
Run the pipeline only once since it takes a
couple minutes with the ISR.
"""
moduleDir = getModulePath()
testDataDir = os.path.join(moduleDir, "tests", "testData")
testPipelineConfigDir = os.path.join(testDataDir, "pipelineConfigs")
cls.repoDir = os.path.join(testDataDir, "gen3TestRepo")
cls.runName = "run2"
# The visit number for the test data
cls.visitNum = 4021123106000
# Check that run doesn't already exist due to previous improper cleanup
butler = dafButler.Butler(cls.repoDir)
registry = butler.registry
collectionsList = list(registry.queryCollections())
if cls.runName in collectionsList:
cleanUpCmd = writeCleanUpRepoCmd(cls.repoDir, cls.runName)
runProgram(cleanUpCmd)
# Point to the collections for the reference catalogs,
# the raw images and the camera model in the calib directory
# that comes from `butler write-curated-calibrations`.
cls.collections = "refcats,LSSTCam/calib,LSSTCam/raw/all"
cls.instrument = "lsst.obs.lsst.LsstCam"
cls.cameraName = "LSSTCam"
cls.pipelineYaml = os.path.join(testPipelineConfigDir, "testCwfsPipeline.yaml")
pipeCmd = writePipetaskCmd(
cls.repoDir,
cls.runName,
cls.instrument,
cls.collections,
pipelineYaml=cls.pipelineYaml,
)
pipeCmd += f" -d 'exposure IN ({cls.visitNum})'"
runProgram(pipeCmd)
@classmethod
def tearDownClass(cls):
cleanUpCmd = writeCleanUpRepoCmd(cls.repoDir, cls.runName)
runProgram(cleanUpCmd)
def setUp(self):
self.config = EstimateZernikesCwfsTaskConfig()
self.task = EstimateZernikesCwfsTask(config=self.config)
self.butler = dafButler.Butler(self.repoDir)
self.registry = self.butler.registry
self.dataIdExtra = {
"instrument": "LSSTCam",
"detector": 191,
"exposure": self.visitNum,
"visit": self.visitNum,
}
self.dataIdIntra = {
"instrument": "LSSTCam",
"detector": 192,
"exposure": self.visitNum,
"visit": self.visitNum,
}
self.testRunName = "testTaskRun"
self.collectionsList = list(self.registry.queryCollections())
if self.testRunName in self.collectionsList:
cleanUpCmd = writeCleanUpRepoCmd(self.repoDir, self.testRunName)
runProgram(cleanUpCmd)
def tearDown(self):
# Get Butler with updated registry
self.butler = dafButler.Butler(self.repoDir)
self.registry = self.butler.registry
self.collectionsList = list(self.registry.queryCollections())
if self.testRunName in self.collectionsList:
cleanUpCmd = writeCleanUpRepoCmd(self.repoDir, self.testRunName)
runProgram(cleanUpCmd)
def _generateTestExposures(self):
# Generate donut template
template = self.task.getTemplate("R00_SW0", DefocalType.Extra)
correlatedImage = correlate(template, template)
maxIdx = np.argmax(correlatedImage)
maxLoc = np.unravel_index(maxIdx, np.shape(correlatedImage))
templateCenter = np.array(maxLoc) - self.task.donutTemplateSize / 2
# Make donut centered in exposure
initCutoutSize = (
self.task.donutTemplateSize + self.task.initialCutoutPadding * 2
)
centeredArr = np.zeros((initCutoutSize, initCutoutSize), dtype=np.float32)
centeredArr[
self.task.initialCutoutPadding : -self.task.initialCutoutPadding,
self.task.initialCutoutPadding : -self.task.initialCutoutPadding,
] += template
centeredImage = afwImage.ImageF(initCutoutSize, initCutoutSize)
centeredImage.array = centeredArr
centeredExp = afwImage.ExposureF(initCutoutSize, initCutoutSize)
centeredExp.setImage(centeredImage)
centerCoord = (
self.task.initialCutoutPadding + templateCenter[1],
self.task.initialCutoutPadding + templateCenter[0],
)
# Make new donut that needs to be shifted by 20 pixels
# from the edge of the exposure
offCenterArr = np.zeros((initCutoutSize, initCutoutSize), dtype=np.float32)
offCenterArr[
: self.task.donutTemplateSize - 20, : self.task.donutTemplateSize - 20
] = template[20:, 20:]
offCenterImage = afwImage.ImageF(initCutoutSize, initCutoutSize)
offCenterImage.array = offCenterArr
offCenterExp = afwImage.ExposureF(initCutoutSize, initCutoutSize)
offCenterExp.setImage(offCenterImage)
# Center coord value 20 pixels closer than template center
# due to stamp overrunning the edge of the exposure.
offCenterCoord = templateCenter - 20
return centeredExp, centerCoord, template, offCenterExp, offCenterCoord
def _getDataFromButler(self):
# Grab two exposures from the same visits of adjacent detectors
exposureExtra = self.butler.get(
"postISRCCD", dataId=self.dataIdExtra, collections=[self.runName]
)
exposureIntra = self.butler.get(
"postISRCCD", dataId=self.dataIdIntra, collections=[self.runName]
)
# Get the donut catalogs for each detector
donutCatalogExtra = self.butler.get(
"donutCatalog", dataId=self.dataIdExtra, collections=[self.runName]
)
donutCatalogIntra = self.butler.get(
"donutCatalog", dataId=self.dataIdIntra, collections=[self.runName]
)
# Get the camera from the butler
camera = self.butler.get(
"camera",
dataId={"instrument": "LSSTCam"},
collections="LSSTCam/calib/unbounded",
)
return (
exposureExtra,
exposureIntra,
donutCatalogExtra,
donutCatalogIntra,
camera,
)
def testValidateConfigs(self):
self.config.donutTemplateSize = 120
self.config.donutStampSize = 120
self.config.initialCutoutPadding = 290
self.task = EstimateZernikesCwfsTask(config=self.config)
self.assertEqual(self.task.donutTemplateSize, 120)
self.assertEqual(self.task.donutStampSize, 120)
self.assertEqual(self.task.initialCutoutPadding, 290)
def testRunQuantum(self):
# Set up test quantum from butler data
inputRefs = pipeBase.InputQuantizedConnection()
badInstrument = "LSSTComCam"
inputRefs.exposures = [
dafButler.DatasetRef(
self.registry.getDatasetType("postISRCCD"),
{
"instrument": badInstrument,
"detector": 191,
"exposure": 4021123106000,
},
id="3104de33-c107-4678-b07e-1fc62407a52e",
run="run2",
)
]
inputRefs.camera = self.butler.getDeferred(
"camera", instrument="LSSTComCam", collections="LSSTComCam/calib/unbounded"
).ref
outputRefs = pipeBase.OutputQuantizedConnection()
quantum = dafButler.Quantum(
inputs={
inputRefs.exposures[0].datasetType: inputRefs.exposures,
inputRefs.camera.datasetType: [inputRefs.camera],
}
)
butlerQC = pipeBase.ButlerQuantumContext(self.butler, quantum)
# Test that we will get an error if we try to use an
# unsupported instrument.
errMsg = f"{badInstrument} is not a valid camera name."
with self.assertRaises(ValueError, msg=errMsg) as context:
self.task.runQuantum(butlerQC, inputRefs, outputRefs)
self.assertEqual(
f"{badInstrument} is not a valid camera name.",
str(context.exception),
)
# Test error raised when list sizes are unequal
inputRefs.exposures = [
self.butler.getDeferred(
"postISRCCD", dataId=self.dataIdExtra, collections=[self.runName]
).ref,
]
inputRefs.camera = self.butler.getDeferred(
"camera", instrument="LSSTCam", collections="LSSTCam/calib/unbounded"
).ref
inputRefs.donutCatalog = [
self.butler.getDeferred(
"donutCatalog", dataId=self.dataIdExtra, collections=[self.runName]
).ref,
self.butler.getDeferred(
"donutCatalog", dataId=self.dataIdIntra, collections=[self.runName]
).ref,
]
outputRefs = pipeBase.OutputQuantizedConnection()
quantum = dafButler.Quantum(
inputs={
inputRefs.exposures[0].datasetType: inputRefs.exposures,
inputRefs.donutCatalog[0].datasetType: inputRefs.donutCatalog,
inputRefs.camera.datasetType: [inputRefs.camera],
}
)
butlerQC = pipeBase.ButlerQuantumContext(self.butler, quantum)
unequalMsg = "Unequal number of intra and extra focal detectors."
with self.assertRaises(ValueError) as context:
self.task.runQuantum(butlerQC, inputRefs, outputRefs)
self.assertEqual(str(context.exception), unequalMsg)
# Test error raised when extra and intra focal do not
# have correct partner
self.mismatchDataId = copy(self.dataIdIntra)
self.mismatchDataId["detector"] = 196
# Test errors raised
inputRefs.exposures.append(
self.butler.getDeferred(
"postISRCCD", dataId=self.mismatchDataId, collections=[self.runName]
).ref
)
butlerQC = pipeBase.ButlerQuantumContext(self.butler, quantum)
mismatchMsg = "Intra and extra focal detectors not adjacent."
with self.assertRaises(ValueError) as context:
self.task.runQuantum(butlerQC, inputRefs, outputRefs)
self.assertEqual(str(context.exception), mismatchMsg)
def testTaskRunNoSources(self):
(
exposureExtra,
exposureIntra,
donutCatalogExtra,
donutCatalogIntra,
camera,
) = self._getDataFromButler()
# Test return values when no sources in catalog
noSrcDonutCatalog = pd.DataFrame(columns=donutCatalogExtra.columns)
testOutNoSrc = self.task.run(
[exposureExtra, exposureIntra], [noSrcDonutCatalog] * 2, camera
)
np.testing.assert_array_equal(
testOutNoSrc.outputZernikesRaw, np.ones(19) * np.nan
)
np.testing.assert_array_equal(
testOutNoSrc.outputZernikesAvg, np.ones(19) * np.nan
)
self.assertEqual(len(testOutNoSrc.donutStampsExtra), 0)
self.assertEqual(len(testOutNoSrc.donutStampsIntra), 0)
# Test no intra sources in catalog
testOutNoIntra = self.task.run(
[exposureExtra, exposureIntra],
[
donutCatalogExtra,
pd.DataFrame(columns=donutCatalogExtra.columns),
],
camera,
)
np.testing.assert_array_equal(
testOutNoIntra.outputZernikesRaw, np.ones(19) * np.nan
)
np.testing.assert_array_equal(
testOutNoIntra.outputZernikesAvg, np.ones(19) * np.nan
)
self.assertEqual(len(testOutNoIntra.donutStampsExtra), 0)
self.assertEqual(len(testOutNoIntra.donutStampsIntra), 0)
# Test no extra sources in catalog
testOutNoExtra = self.task.run(
[exposureExtra, exposureIntra],
[
pd.DataFrame(columns=donutCatalogIntra.columns),
donutCatalogIntra,
],
camera,
)
np.testing.assert_array_equal(
testOutNoExtra.outputZernikesRaw, np.ones(19) * np.nan
)
np.testing.assert_array_equal(
testOutNoExtra.outputZernikesAvg, np.ones(19) * np.nan
)
self.assertEqual(len(testOutNoExtra.donutStampsExtra), 0)
self.assertEqual(len(testOutNoExtra.donutStampsIntra), 0)
def testTaskRunNormal(self):
(
exposureExtra,
exposureIntra,
donutCatalogExtra,
donutCatalogIntra,
camera,
) = self._getDataFromButler()
# Test normal behavior
taskOut = self.task.run(
[exposureIntra, exposureExtra],
[donutCatalogExtra, donutCatalogIntra],
camera,
)
testExtraStamps = self.task.cutOutStamps(
exposureExtra, donutCatalogExtra, DefocalType.Extra, camera.getName()
)
testIntraStamps = self.task.cutOutStamps(
exposureIntra, donutCatalogIntra, DefocalType.Intra, camera.getName()
)
for donutStamp, cutOutStamp in zip(taskOut.donutStampsExtra, testExtraStamps):
self.assertMaskedImagesAlmostEqual(
donutStamp.stamp_im, cutOutStamp.stamp_im
)
for donutStamp, cutOutStamp in zip(taskOut.donutStampsIntra, testIntraStamps):
self.assertMaskedImagesAlmostEqual(
donutStamp.stamp_im, cutOutStamp.stamp_im
)
testCoeffsRaw = self.task.estimateZernikes(testExtraStamps, testIntraStamps)
testCoeffsAvg = self.task.combineZernikes.run(testCoeffsRaw)
np.testing.assert_array_equal(taskOut.outputZernikesRaw, testCoeffsRaw)
np.testing.assert_array_equal(
taskOut.outputZernikesAvg, testCoeffsAvg.combinedZernikes
)
def testPipelineOnePairOnly(self):
pipeCmd = writePipetaskCmd(
self.repoDir,
self.testRunName,
self.instrument,
self.collections,
pipelineYaml=self.pipelineYaml,
)
pipeCmd += f" -d 'exposure IN ({self.visitNum}) and detector IN (191, 192)'"
runProgram(pipeCmd)
# Get Butler with updated registry
self.butler = dafButler.Butler(self.repoDir)
donutExtra = self.butler.get(
"donutStampsExtra", dataId=self.dataIdExtra, collections=[self.testRunName]
)
donutIntra = self.butler.get(
"donutStampsIntra", dataId=self.dataIdIntra, collections=[self.testRunName]
)
zernAvg = self.butler.get(
"zernikeEstimateAvg",
dataId=self.dataIdExtra,
collections=[self.testRunName],
)
zernRaw = self.butler.get(
"zernikeEstimateRaw",
dataId=self.dataIdExtra,
collections=[self.testRunName],
)
self.assertEqual(len(donutExtra), 2)
self.assertEqual(len(donutExtra), len(donutIntra))
self.assertEqual(np.shape(zernAvg), (19,))
self.assertEqual(np.shape(zernRaw), (2, 19))
self.badDataId = copy(self.dataIdExtra)
self.badDataId["detector"] = 195
with self.assertRaises(LookupError):
self.butler.get(
"donutStampsExtra",
dataId=self.badDataId,
collections=[self.testRunName],
)
|
lsst-ts/ts_wep
|
tests/task/test_estimateZernikesCwfsTask.py
|
Python
|
gpl-3.0
| 16,817
|
[
"VisIt"
] |
07df71685b986158cf23733fde79cb57b25f3de400aa73da687abbcb1d4f8411
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS.
"""
import time
from ..helpers import UniqueCourseTest
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.lms.courseware import CoursewarePage, CoursewareSequentialTabPage
from ...pages.lms.course_nav import CourseNavPage
from ...pages.lms.problem import ProblemPage
from ...pages.common.logout import LogoutPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
class CoursewareTest(UniqueCourseTest):
"""
Test courseware.
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
def setUp(self):
super(CoursewareTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1')
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('problem', 'Test Problem 2')
)
)
).install()
# Auto-auth register for the course.
self._auto_auth(self.USERNAME, self.EMAIL, False)
def _goto_problem_page(self):
"""
Open problem page with assertion.
"""
self.courseware_page.visit()
self.problem_page = ProblemPage(self.browser)
self.assertEqual(self.problem_page.problem_name, 'TEST PROBLEM 1')
def _change_problem_release_date_in_studio(self):
"""
"""
subsection = self.course_outline.section('Test Section 1').subsection('Test Subsection 1')
modal = subsection.edit()
self.course_outline.q(css="#start_date").fill("01/01/2030")
# Set the date again by clicking on datepicker to close it.
modal.release_date = '01/01/2030'
modal.save()
def _auto_auth(self, username, email, staff):
"""
Logout and login with given credentials.
"""
AutoAuthPage(self.browser, username=username, email=email,
course_id=self.course_id, staff=staff).visit()
def test_courseware(self):
"""
Test courseware if recent visited subsection become unpublished.
"""
# Visit problem page as a student.
self._goto_problem_page()
# Logout and login as a staff user.
LogoutPage(self.browser).visit()
self._auto_auth("STAFF_TESTER", "staff101@example.com", True)
# Visit course outline page in studio.
self.course_outline.visit()
# Set release date for subsection in future.
self._change_problem_release_date_in_studio()
# Wait for 2 seconds to save new date.
time.sleep(2)
# Logout and login as a student.
LogoutPage(self.browser).visit()
self._auto_auth(self.USERNAME, self.EMAIL, False)
# Visit courseware as a student.
self.courseware_page.visit()
# Problem name should be "TEST PROBLEM 2".
self.assertEqual(self.problem_page.problem_name, 'TEST PROBLEM 2')
class CoursewareMultipleVerticalsTest(UniqueCourseTest):
"""
Test courseware with multiple verticals
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
def setUp(self):
super(CoursewareMultipleVerticalsTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data='<problem>problem 1 dummy body</problem>'),
XBlockFixtureDesc('html', 'html 1', data="<html>html 1 dummy body</html>"),
XBlockFixtureDesc('problem', 'Test Problem 2', data="<problem>problem 2 dummy body</problem>"),
XBlockFixtureDesc('html', 'html 2', data="<html>html 2 dummy body</html>"),
),
XBlockFixtureDesc('sequential', 'Test Subsection 2'),
),
).install()
# Auto-auth register for the course.
AutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL,
course_id=self.course_id, staff=False).visit()
self.courseware_page.visit()
self.course_nav = CourseNavPage(self.browser)
def test_tab_position(self):
# test that using the position in the url direct to correct tab in courseware
self.course_nav.go_to_section('Test Section 1', 'Test Subsection 1')
subsection_url = self.courseware_page.get_active_subsection_url()
url_part_list = subsection_url.split('/')
self.assertEqual(len(url_part_list), 9)
course_id = url_part_list[4]
chapter_id = url_part_list[-3]
subsection_id = url_part_list[-2]
problem1_page = CoursewareSequentialTabPage(
self.browser,
course_id=course_id,
chapter=chapter_id,
subsection=subsection_id,
position=1
).visit()
self.assertIn('problem 1 dummy body', problem1_page.get_selected_tab_content())
html1_page = CoursewareSequentialTabPage(
self.browser,
course_id=course_id,
chapter=chapter_id,
subsection=subsection_id,
position=2
).visit()
self.assertIn('html 1 dummy body', html1_page.get_selected_tab_content())
problem2_page = CoursewareSequentialTabPage(
self.browser,
course_id=course_id,
chapter=chapter_id,
subsection=subsection_id,
position=3
).visit()
self.assertIn('problem 2 dummy body', problem2_page.get_selected_tab_content())
html2_page = CoursewareSequentialTabPage(
self.browser,
course_id=course_id,
chapter=chapter_id,
subsection=subsection_id,
position=4
).visit()
self.assertIn('html 2 dummy body', html2_page.get_selected_tab_content())
|
martynovp/edx-platform
|
common/test/acceptance/tests/lms/test_lms_courseware.py
|
Python
|
agpl-3.0
| 7,379
|
[
"VisIt"
] |
b16c8a3a8fabd3076864cca3af4ca88e2a0e9a6e48c289aafa55a325f882e340
|
import numpy as np
class sample(object):
"""
class to sample hyperparams
- randomly
- from the previous hyperparams
"""
def __init__(self):
# new : [n_couches, c1, c2, c3, learning_rate, reg_l1, reg_l2, moment, decay, nesterov, activation]
# space definition
self.values = np.array([[0, 1, 2, 3], #n_couches
range(10, 500, 10), range(10, 500, 10), range(10, 500, 10), #couches
[0.001, 0.002, 0.004, 0.008, 0.016, 0.03, 0.06, 0.012, 0.025, 0.05, 0.1, 0.2, 0.4, 0.8], #learning rate
[0.000001,0.00001,0.0001,0.001,0.01,0.1], #reg_l1
[0.000001,0.00001,0.0001,0.001,0.01,0.1], #reg_l2
[0.001, 0.002, 0.004, 0.008, 0.016, 0.03, 0.06, 0.012, 0.025, 0.05, 0.1, 0.2, 0.4, 0.8], #moment
[.0,0.001, 0.002, 0.004, 0.008, 0.016, 0.03, 0.06, 0.012, 0.025, 0.05, 0.1, 0.2, 0.4, 0.8], #decay
[0,1], #nesterov
[0, 1, 2]])
self.max = np.zeros(self.values.shape[0], dtype='int')
for i in range(self.values.shape[0]):
self.max[i] = len(self.values[i])
# random sampling (initialisation)
self.c = []
for i in range(self.values.shape[0]):
self.c.append(np.random.randint(self.max[i]))
def get_MNIST(self):
"""
:return: a readable array of hyperparams for train_MNIST
"""
res = []
res.append(self.values[0][self.c[0]])
n = []
for i in range(self.c[0]):
n.append(self.values[1][self.c[1+i]])
res.append(n)
for i in range(4, self.values.shape[0]):
res.append(self.values[i][self.c[i]])
return res
def gaussian_samp(self):
"""
gaussian sampling from the previous sample
creation of a new object (not in place)
:return: a sample
"""
s = sample()
rand = np.random.normal(np.zeros(self.values.shape[0]), 0.5)
for p in range(rand.shape[0]):
s.c[p]=min(max(int(self.c[p]+0.5+rand[p]),0),self.max[p]-1)
return s
def get_RSM(self):
"""
get a readable array for RSM training
:return: np array
"""
s=self.get_MNIST()
# new : [n_couches, noeuds, learning_rate, reg_l1, reg_l2, moment, decay, nesterov, activation]
# train : [n_couches, c1, c2, c3, learning_rate, reg_l1, reg_l2, moment, decay, nesterov, a1, a2, a3]
assert (len(s) == 9)
t = np.array([s[0] / 3., 0, 0, 0, s[2], s[3], s[4], s[5], s[6], s[7], 0, 0, 0])
t[10 + s[8]] = 1
for n in range(len(s[1])):
t[1 + n] = s[1][n] / 500.
return t
def __str__(self):
return str(self.c)
|
AntoinePrv/hyperNN
|
hyperLearn/sample.py
|
Python
|
mit
| 2,893
|
[
"Gaussian"
] |
4c1a7df7d07f7776b6ae9ddd31d6d0cd433b94e6ae84da084be745ae72f48e2e
|
################################################################################
# Copyright (C) 2013 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for bayespy.utils.linalg module.
"""
import numpy as np
from .. import misc
from .. import linalg
class TestDot(misc.TestCase):
def test_dot(self):
"""
Test dot product multiple multi-dimensional arrays.
"""
# If no arrays, return 0
self.assertAllClose(linalg.dot(),
0)
# If only one array, return itself
self.assertAllClose(linalg.dot([[1,2,3],
[4,5,6]]),
[[1,2,3],
[4,5,6]])
# Basic test of two arrays: (2,3) * (3,2)
self.assertAllClose(linalg.dot([[1,2,3],
[4,5,6]],
[[7,8],
[9,1],
[2,3]]),
[[31,19],
[85,55]])
# Basic test of four arrays: (2,3) * (3,2) * (2,1) * (1,2)
self.assertAllClose(linalg.dot([[1,2,3],
[4,5,6]],
[[7,8],
[9,1],
[2,3]],
[[4],
[5]],
[[6,7]]),
[[1314,1533],
[3690,4305]])
# Test broadcasting: (2,2,2) * (2,2,2,2)
self.assertAllClose(linalg.dot([[[1,2],
[3,4]],
[[5,6],
[7,8]]],
[[[[1,2],
[3,4]],
[[5,6],
[7,8]]],
[[[9,1],
[2,3]],
[[4,5],
[6,7]]]]),
[[[[ 7, 10],
[ 15, 22]],
[[ 67, 78],
[ 91, 106]]],
[[[ 13, 7],
[ 35, 15]],
[[ 56, 67],
[ 76, 91]]]])
# Inconsistent shapes: (2,3) * (2,3)
self.assertRaises(ValueError,
linalg.dot,
[[1,2,3],
[4,5,6]],
[[1,2,3],
[4,5,6]])
# Other axes do not broadcast: (2,2,2) * (3,2,2)
self.assertRaises(ValueError,
linalg.dot,
[[[1,2],
[3,4]],
[[5,6],
[7,8]]],
[[[1,2],
[3,4]],
[[5,6],
[7,8]],
[[9,1],
[2,3]]])
# Do not broadcast matrix axes: (2,1) * (3,2)
self.assertRaises(ValueError,
linalg.dot,
[[1],
[2]],
[[1,2,3],
[4,5,6]])
# Do not accept less than 2-D arrays: (2) * (2,2)
self.assertRaises(ValueError,
linalg.dot,
[1,2],
[[1,2,3],
[4,5,6]])
class TestBandedSolve(misc.TestCase):
def test_block_banded_solve(self):
"""
Test the Gaussian elimination algorithm for block-banded matrices.
"""
#
# Create a block-banded matrix
#
# Number of blocks
N = 40
# Random sizes of the blocks
#D = np.random.randint(5, 10, size=N)
# Fixed sizes of the blocks
D = 5*np.ones(N, dtype=np.int)
# Some helpful variables to create the covariances
W = [np.random.randn(D[i], 2*D[i])
for i in range(N)]
# The diagonal blocks (covariances)
A = [np.dot(W[i], W[i].T) for i in range(N)]
# The superdiagonal blocks (cross-covariances)
B = [np.dot(W[i][:,-1:], W[i+1][:,:1].T) for i in range(N-1)]
C = misc.block_banded(A, B)
# Create the system to be solved: y=C*x
x_true = np.random.randn(np.sum(D))
y = np.dot(C, x_true)
x_true = np.reshape(x_true, (N, -1))
y = np.reshape(y, (N, -1))
#
# Run tests
#
# The correct inverse
invC = np.linalg.inv(C)
# Inverse from the function that is tested
(invA, invB, x, ldet) = linalg.block_banded_solve(np.asarray(A),
np.asarray(B),
np.asarray(y))
# Check that you get the correct number of blocks
self.assertEqual(len(invA), N)
self.assertEqual(len(invB), N-1)
# Check each block
i0 = 0
for i in range(N-1):
i1 = i0 + D[i]
i2 = i1 + D[i+1]
# Check diagonal block
self.assertTrue(np.allclose(invA[i], invC[i0:i1, i0:i1]))
# Check super-diagonal block
self.assertTrue(np.allclose(invB[i], invC[i0:i1, i1:i2]))
i0 = i1
# Check last block
self.assertTrue(np.allclose(invA[-1], invC[i0:, i0:]))
# Check the solution of the system
self.assertTrue(np.allclose(x_true, x))
# Check the log determinant
self.assertAlmostEqual(ldet/np.linalg.slogdet(C)[1], 1)
|
jluttine/bayespy
|
bayespy/utils/tests/test_linalg.py
|
Python
|
mit
| 6,200
|
[
"Gaussian"
] |
5e0a19723d5611ed97f25d94f865c3b207d11b1600c28cbeda310bf8daf63125
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RProtgenerics(RPackage):
"""S4 generic functions needed by Bioconductor proteomics packages."""
homepage = "https://bioconductor.org/packages/ProtGenerics/"
url = "https://git.bioconductor.org/packages/ProtGenerics"
list_url = homepage
version('1.8.0', git='https://git.bioconductor.org/packages/ProtGenerics', commit='b2b3bb0938e20f58fca905f6870de7dbc9dfd7a3')
depends_on('r@3.4.0:3.4.9', when='@1.8.0')
|
skosukhin/spack
|
var/spack/repos/builtin/packages/r-protgenerics/package.py
|
Python
|
lgpl-2.1
| 1,700
|
[
"Bioconductor"
] |
c73c90b5b904c183aa8dd5d188463998847633dfde878ef28c92d4fb15a4591e
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import json
from monty.json import MontyDecoder
import numpy as np
import matplotlib
matplotlib.use("pdf")
from pymatgen.util.testing import PymatgenTest
from pymatgen.analysis.xas.spectrum import XANES
from pymatgen.vis.plotters import SpectrumPlotter
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
"test_files/spectrum_test")
with open(os.path.join(test_dir, 'Pd2O.json')) as fp:
spect_data_dict = json.load(fp, cls=MontyDecoder)
class SpectrumPlotterTest(PymatgenTest):
def setUp(self):
self.xanes = XANES.from_dict(spect_data_dict)
def test_get_plot(self):
self.plotter = SpectrumPlotter(yshift=0.2)
self.plotter.add_spectrum("Pd2O", self.xanes)
xanes = self.xanes.copy()
xanes.y += np.random.randn(len(xanes.y)) * 0.005
self.plotter.add_spectrum("Pd2O + noise", xanes)
self.plotter.add_spectrum("Pd2O - replot", xanes, "k")
plt = self.plotter.get_plot()
self.plotter.save_plot("spectrum_plotter_test.eps")
os.remove("spectrum_plotter_test.eps")
def test_get_stacked_plot(self):
self.plotter = SpectrumPlotter(yshift=0.2, stack=True)
self.plotter.add_spectrum("Pd2O", self.xanes, "b")
xanes = self.xanes.copy()
xanes.y += np.random.randn(len(xanes.y)) * 0.005
self.plotter.add_spectrum("Pd2O + noise", xanes, "r")
plt = self.plotter.get_plot()
if __name__ == '__main__':
unittest.main()
|
matk86/pymatgen
|
pymatgen/vis/tests/test_plotters.py
|
Python
|
mit
| 1,629
|
[
"pymatgen"
] |
5d6c83dac7431eed474d7b79873503f19ae521206318d7e2e3a1c3443a4ff7d0
|
# coding: utf-8
"""
This module provides classes to run and analyze boltztrap on pymatgen band
structure objects. Boltztrap is a software interpolating band structures and
computing materials properties from this band structure using Boltzmann
semi-classical transport theory.
Boltztrap has been developed by Georg Madsen.
http://www.icams.de/content/research/software-development/boltztrap/
You need version 1.2.3 or higher
References are::
Madsen, G. K. H., and Singh, D. J. (2006).
BoltzTraP. A code for calculating band-structure dependent quantities.
Computer Physics Communications, 175, 67-71
"""
import logging
import math
import os
import subprocess
import tempfile
import time
import numpy as np
from monty.dev import requires
from monty.json import jsanitize, MSONable
from monty.os import cd
from monty.os.path import which
from scipy import constants
from scipy.spatial import distance
from pymatgen.core.lattice import Lattice
from pymatgen.core.units import Energy, Length
from pymatgen.electronic_structure.bandstructure import \
BandStructureSymmLine, Kpoint
from pymatgen.electronic_structure.core import Orbital
from pymatgen.electronic_structure.dos import Dos, Spin, CompleteDos
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.symmetry.bandstructure import HighSymmKpath
__author__ = "Geoffroy Hautier, Zachary Gibbs, Francesco Ricci, Anubhav Jain"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "1.1"
__maintainer__ = "Geoffroy Hautier"
__email__ = "geoffroy@uclouvain.be"
__status__ = "Development"
__date__ = "August 23, 2013"
class BoltztrapRunner(MSONable):
"""
This class is used to run Boltztrap on a band structure object.
"""
@requires(which('x_trans'),
"BoltztrapRunner requires the executables 'x_trans' to be in "
"the path. Please download the Boltztrap at http://"
"www.icams.de/content/research/software-development/boltztrap/ "
"and follow the instructions in the README to compile "
"Bolztrap accordingly. Then add x_trans to your path")
def __init__(self, bs, nelec, dos_type="HISTO", energy_grid=0.005,
lpfac=10, run_type="BOLTZ", band_nb=None, tauref=0, tauexp=0,
tauen=0, soc=False, doping=None, energy_span_around_fermi=1.5,
scissor=0.0, kpt_line=None, spin=None, cond_band=False,
tmax=1300, tgrid=50, symprec=1e-3, cb_cut=10, timeout=7200):
"""
Args:
bs:
A band structure object
nelec:
the number of electrons
dos_type:
two options for the band structure integration: "HISTO"
(histogram) or "TETRA" using the tetrahedon method. TETRA
typically gives better results (especially for DOSes)
but takes more time
energy_grid:
the energy steps used for the integration (eV)
lpfac:
the number of interpolation points in the real space. By
default 10 gives 10 time more points in the real space than
the number of kpoints given in reciprocal space
run_type:
type of boltztrap usage. by default
- BOLTZ: (default) compute transport coefficients
- BANDS: interpolate all bands contained in the energy range
specified in energy_span_around_fermi variable, along specified
k-points
- DOS: compute total and partial dos (custom BoltzTraP code
needed!)
- FERMI: compute fermi surface or more correctly to
get certain bands interpolated
band_nb:
indicates a band number. Used for Fermi Surface interpolation
(run_type="FERMI")
spin:
specific spin component (1: up, -1: down) of the band selected
in FERMI mode (mandatory).
cond_band:
if a conduction band is specified in FERMI mode,
set this variable as True
tauref:
reference relaxation time. Only set to a value different than
zero if we want to model beyond the constant relaxation time.
tauexp:
exponent for the energy in the non-constant relaxation time
approach
tauen:
reference energy for the non-constant relaxation time approach
soc:
results from spin-orbit coupling (soc) computations give
typically non-polarized (no spin up or down) results but single
electron occupations. If the band structure comes from a soc
computation, you should set soc to True (default False)
doping:
the fixed doping levels you want to compute. Boltztrap provides
both transport values depending on electron chemical potential
(fermi energy) and for a series of fixed carrier
concentrations. By default, this is set to 1e16 to 1e22 in
increments of factors of 10.
energy_span_around_fermi:
usually the interpolation is not needed on the entire energy
range but on a specific range around the fermi level.
This energy gives this range in eV. by default it is 1.5 eV.
If DOS or BANDS type are selected, this range is automatically
set to cover the entire energy range.
scissor:
scissor to apply to the band gap (eV). This applies a scissor
operation moving the band edges without changing the band
shape. This is useful to correct the often underestimated band
gap in DFT. Default is 0.0 (no scissor)
kpt_line:
list of fractional coordinates of kpoints as arrays or list of
Kpoint objects for BANDS mode calculation (standard path of
high symmetry k-points is automatically set as default)
tmax:
Maximum temperature (K) for calculation (default=1300)
tgrid:
Temperature interval for calculation (default=50)
symprec: 1e-3 is the default in pymatgen. If the kmesh has been
generated using a different symprec, it has to be specified
to avoid a "factorization error" in BoltzTraP calculation.
If a kmesh that spans the whole Brillouin zone has been used,
or to disable all the symmetries, set symprec to None.
cb_cut: by default 10% of the highest conduction bands are
removed because they are often not accurate.
Tune cb_cut to change the percentage (0-100) of bands
that are removed.
timeout: overall time limit (in seconds): mainly to avoid infinite
loop when trying to find Fermi levels.
"""
self.lpfac = lpfac
self._bs = bs
self._nelec = nelec
self.dos_type = dos_type
self.energy_grid = energy_grid
self.error = []
self.run_type = run_type
self.band_nb = band_nb
self.spin = spin
self.cond_band = cond_band
self.tauref = tauref
self.tauexp = tauexp
self.tauen = tauen
self.soc = soc
self.kpt_line = kpt_line
self.cb_cut = cb_cut / 100.
if isinstance(doping, list) and len(doping) > 0:
self.doping = doping
else:
self.doping = []
for d in [1e16, 1e17, 1e18, 1e19, 1e20, 1e21]:
self.doping.extend([1 * d, 2.5 * d, 5 * d, 7.5 * d])
self.doping.append(1e22)
self.energy_span_around_fermi = energy_span_around_fermi
self.scissor = scissor
self.tmax = tmax
self.tgrid = tgrid
self._symprec = symprec
if self.run_type in ("DOS", "BANDS"):
self._auto_set_energy_range()
self.timeout = timeout
self.start_time = time.time()
def _auto_set_energy_range(self):
"""
automatically determine the energy range as min/max eigenvalue
minus/plus the buffer_in_ev
"""
emins = [min([e_k[0] for e_k in self._bs.bands[Spin.up]])]
emaxs = [max([e_k[0] for e_k in self._bs.bands[Spin.up]])]
if self._bs.is_spin_polarized:
emins.append(min([e_k[0] for e_k in
self._bs.bands[Spin.down]]))
emaxs.append(max([e_k[0] for e_k in
self._bs.bands[Spin.down]]))
min_eigenval = Energy(min(emins) - self._bs.efermi, "eV"). \
to("Ry")
max_eigenval = Energy(max(emaxs) - self._bs.efermi, "eV"). \
to("Ry")
# set energy range to buffer around min/max EV
# buffer does not increase CPU time but will help get equal
# energies for spin up/down for band structure
const = Energy(2, "eV").to("Ry")
self._ll = min_eigenval - const
self._hl = max_eigenval + const
en_range = Energy(max((abs(self._ll), abs(self._hl))),
"Ry").to("eV")
self.energy_span_around_fermi = en_range * 1.01
print("energy_span_around_fermi = ",
self.energy_span_around_fermi)
@property
def bs(self):
"""
:return: The BandStructure
"""
return self._bs
@property
def nelec(self):
"""
:return: Number of electrons
"""
return self._nelec
def write_energy(self, output_file):
"""
Writes the energy to an output file.
:param output_file: Filename
"""
with open(output_file, 'w') as f:
f.write("test\n")
f.write("{}\n".format(len(self._bs.kpoints)))
if self.run_type == "FERMI":
sign = -1.0 if self.cond_band else 1.0
for i in range(len(self._bs.kpoints)):
eigs = []
eigs.append(Energy(
self._bs.bands[Spin(self.spin)][self.band_nb][i] -
self._bs.efermi, "eV").to("Ry"))
f.write("%12.8f %12.8f %12.8f %d\n"
% (self._bs.kpoints[i].frac_coords[0],
self._bs.kpoints[i].frac_coords[1],
self._bs.kpoints[i].frac_coords[2],
len(eigs)))
for j in range(len(eigs)):
f.write("%18.8f\n" % (sign * float(eigs[j])))
else:
for i, kpt in enumerate(self._bs.kpoints):
eigs = []
if self.run_type == "DOS":
spin_lst = [self.spin]
else:
spin_lst = self._bs.bands
for spin in spin_lst:
# use 90% of bottom bands since highest eigenvalues
# are usually incorrect
# ask Geoffroy Hautier for more details
nb_bands = int(math.floor(self._bs.nb_bands * (1 - self.cb_cut)))
for j in range(nb_bands):
eigs.append(
Energy(self._bs.bands[Spin(spin)][j][i] -
self._bs.efermi, "eV").to("Ry"))
eigs.sort()
if self.run_type == "DOS" and self._bs.is_spin_polarized:
eigs.insert(0, self._ll)
eigs.append(self._hl)
f.write("%12.8f %12.8f %12.8f %d\n"
% (kpt.frac_coords[0],
kpt.frac_coords[1],
kpt.frac_coords[2],
len(eigs)))
for j in range(len(eigs)):
f.write("%18.8f\n" % (float(eigs[j])))
def write_struct(self, output_file):
"""
Writes the structure to an output file.
:param output_file: Filename
"""
if self._symprec is not None:
sym = SpacegroupAnalyzer(self._bs.structure, symprec=self._symprec)
elif self._symprec is None:
pass
with open(output_file, 'w') as f:
if self._symprec is not None:
f.write("{} {}\n".format(self._bs.structure.composition.formula,
sym.get_space_group_symbol()))
elif self._symprec is None:
f.write("{} {}\n".format(self._bs.structure.composition.formula,
"symmetries disabled"))
f.write("{}\n".format("\n".join(
[" ".join(["%.5f" % Length(i, "ang").to("bohr") for i in row])
for row in self._bs.structure.lattice.matrix])))
if self._symprec is not None:
ops = sym.get_symmetry_dataset()['rotations']
elif self._symprec is None:
ops = [[[1, 0, 0], [0, 1, 0], [0, 0, 1]]]
f.write("{}\n".format(len(ops)))
for c in ops:
for row in c:
f.write("{}\n".format(" ".join(str(i) for i in row)))
def write_def(self, output_file):
"""
Writes the def to an output file.
:param output_file: Filename
"""
# This function is useless in std version of BoltzTraP code
# because x_trans script overwrite BoltzTraP.def
with open(output_file, 'w') as f:
so = ""
if self._bs.is_spin_polarized or self.soc:
so = "so"
f.write("5, 'boltztrap.intrans', 'old', 'formatted',0\n" +
"6,'boltztrap.outputtrans', 'unknown', "
"'formatted',0\n" +
"20,'boltztrap.struct', 'old', 'formatted',0\n"
+ "10,'boltztrap.energy" + so + "', 'old', "
"'formatted',0\n" +
"48,'boltztrap.engre', 'unknown', "
"'unformatted',0\n" +
"49,'boltztrap.transdos', 'unknown', "
"'formatted',0\n" +
"50,'boltztrap.sigxx', 'unknown', 'formatted',"
"0\n" +
"51,'boltztrap.sigxxx', 'unknown', 'formatted',"
"0\n" +
"21,'boltztrap.trace', 'unknown', "
"'formatted',0\n" +
"22,'boltztrap.condtens', 'unknown', "
"'formatted',0\n" +
"24,'boltztrap.halltens', 'unknown', "
"'formatted',0\n" +
"30,'boltztrap_BZ.cube', 'unknown', "
"'formatted',0\n")
def write_proj(self, output_file_proj, output_file_def):
"""
Writes the projections to an output file.
:param output_file: Filename
"""
# This function is useless in std version of BoltzTraP code
# because x_trans script overwrite BoltzTraP.def
for oi, o in enumerate(Orbital):
for site_nb in range(0, len(self._bs.structure.sites)):
if oi < len(self._bs.projections[Spin.up][0][0]):
with open(output_file_proj + "_" + str(site_nb) + "_" + str(
o),
'w') as f:
f.write(self._bs.structure.composition.formula + "\n")
f.write(str(len(self._bs.kpoints)) + "\n")
for i in range(len(self._bs.kpoints)):
tmp_proj = []
for j in range(
int(math.floor(self._bs.nb_bands * (1 - self.cb_cut)))):
tmp_proj.append(
self._bs.projections[Spin(self.spin)][j][
i][oi][site_nb])
# TODO deal with the sorting going on at
# the energy level!!!
# tmp_proj.sort()
if self.run_type == "DOS" and \
self._bs.is_spin_polarized:
tmp_proj.insert(0, self._ll)
tmp_proj.append(self._hl)
f.write("%12.8f %12.8f %12.8f %d\n"
% (self._bs.kpoints[i].frac_coords[0],
self._bs.kpoints[i].frac_coords[1],
self._bs.kpoints[i].frac_coords[2],
len(tmp_proj)))
for j in range(len(tmp_proj)):
f.write("%18.8f\n" % float(tmp_proj[j]))
with open(output_file_def, 'w') as f:
so = ""
if self._bs.is_spin_polarized:
so = "so"
f.write("5, 'boltztrap.intrans', 'old', 'formatted',0\n" +
"6,'boltztrap.outputtrans', 'unknown', "
"'formatted',0\n" +
"20,'boltztrap.struct', 'old', 'formatted',0\n"
+ "10,'boltztrap.energy" + so + "', 'old', "
"'formatted',0\n" +
"48,'boltztrap.engre', 'unknown', "
"'unformatted',0\n" +
"49,'boltztrap.transdos', 'unknown', "
"'formatted',0\n" +
"50,'boltztrap.sigxx', 'unknown', 'formatted',"
"0\n" +
"51,'boltztrap.sigxxx', 'unknown', 'formatted',"
"0\n" +
"21,'boltztrap.trace', 'unknown', "
"'formatted',0\n" +
"22,'boltztrap.condtens', 'unknown', "
"'formatted',0\n" +
"24,'boltztrap.halltens', 'unknown', "
"'formatted',0\n" +
"30,'boltztrap_BZ.cube', 'unknown', "
"'formatted',0\n")
i = 1000
for oi, o in enumerate(Orbital):
for site_nb in range(0, len(self._bs.structure.sites)):
if oi < len(self._bs.projections[Spin.up][0][0]):
f.write(str(i) + ",\'" + "boltztrap.proj_" + str(
site_nb) + "_" + str(o.name) +
"\' \'old\', \'formatted\',0\n")
i += 1
def write_intrans(self, output_file):
"""
Writes the intrans to an output file.
:param output_file: Filename
"""
setgap = 1 if self.scissor > 0.0001 else 0
if self.run_type == "BOLTZ" or self.run_type == "DOS":
with open(output_file, 'w') as fout:
fout.write("GENE # use generic interface\n")
fout.write(
"1 0 %d %f # iskip (not presently used) idebug "
"setgap shiftgap \n"
% (setgap, Energy(self.scissor, "eV").to("Ry")))
fout.write(
"0.0 %f %f %6.1f # Fermilevel (Ry),energygrid,energy "
"span around Fermilevel, number of electrons\n"
% (Energy(self.energy_grid, "eV").to("Ry"),
Energy(self.energy_span_around_fermi, "eV").to("Ry"),
self._nelec))
fout.write(
"CALC # CALC (calculate expansion "
"coeff), NOCALC read from file\n")
fout.write(
"%d # lpfac, number of latt-points "
"per k-point\n" % self.lpfac)
fout.write(
"%s # run mode (only BOLTZ is "
"supported)\n" % self.run_type)
fout.write(
".15 # (efcut) energy range of "
"chemical potential\n")
fout.write(
"{} {} # Tmax, temperature grid\n".
format(self.tmax, self.tgrid))
fout.write(
"-1. # energyrange of bands given DOS output sig_xxx and "
"dos_xxx (xxx is band number)\n")
fout.write(self.dos_type + "\n") # e.g., HISTO or TETRA
fout.write("{} {} {} 0 0 0\n".format(
self.tauref, self.tauexp, self.tauen))
fout.write("{}\n".format(2 * len(self.doping)))
for d in self.doping:
fout.write(str(d) + "\n")
for d in self.doping:
fout.write(str(-d) + "\n")
elif self.run_type == "FERMI":
with open(output_file, 'w') as fout:
fout.write("GENE # use generic interface\n")
fout.write(
"1 0 0 0.0 # iskip (not presently used) idebug "
"setgap shiftgap \n")
fout.write(
"0.0 %f 0.1 %6.1f # Fermilevel (Ry),energygrid,"
"energy span around Fermilevel, "
"number of electrons\n"
% (Energy(self.energy_grid, "eV").to("Ry"), self._nelec))
fout.write(
"CALC # CALC (calculate expansion "
"coeff), NOCALC read from file\n")
fout.write(
"%d # lpfac, number of latt-points "
"per k-point\n" % self.lpfac)
fout.write(
"FERMI # run mode (only BOLTZ is "
"supported)\n")
fout.write(str(1) +
" # actual band selected: " +
str(self.band_nb + 1) + " spin: " + str(self.spin))
elif self.run_type == "BANDS":
if self.kpt_line is None:
kpath = HighSymmKpath(self._bs.structure)
self.kpt_line = [Kpoint(k, self._bs.structure.lattice) for k
in
kpath.get_kpoints(coords_are_cartesian=False)[
0]]
self.kpt_line = [kp.frac_coords for kp in self.kpt_line]
elif type(self.kpt_line[0]) == Kpoint:
self.kpt_line = [kp.frac_coords for kp in self.kpt_line]
with open(output_file, 'w') as fout:
fout.write("GENE # use generic interface\n")
fout.write(
"1 0 %d %f # iskip (not presently used) idebug "
"setgap shiftgap \n"
% (setgap, Energy(self.scissor, "eV").to("Ry")))
fout.write(
"0.0 %f %f %6.1f # Fermilevel (Ry),energygrid,energy "
"span around Fermilevel, "
"number of electrons\n"
% (Energy(self.energy_grid, "eV").to("Ry"),
Energy(self.energy_span_around_fermi, "eV").to("Ry"),
self._nelec))
fout.write(
"CALC # CALC (calculate expansion "
"coeff), NOCALC read from file\n")
fout.write(
"%d # lpfac, number of latt-points "
"per k-point\n" % self.lpfac)
fout.write(
"BANDS # run mode (only BOLTZ is "
"supported)\n")
fout.write("P " + str(len(self.kpt_line)) + "\n")
for kp in self.kpt_line:
fout.writelines([str(k) + " " for k in kp])
fout.write('\n')
def write_input(self, output_dir):
"""
Writes the input files.
:param output_dir: Directory to write the input files.
"""
if self._bs.is_spin_polarized or self.soc:
self.write_energy(os.path.join(output_dir, "boltztrap.energyso"))
else:
self.write_energy(os.path.join(output_dir, "boltztrap.energy"))
self.write_struct(os.path.join(output_dir, "boltztrap.struct"))
self.write_intrans(os.path.join(output_dir, "boltztrap.intrans"))
self.write_def(os.path.join(output_dir, "BoltzTraP.def"))
if len(self.bs.projections) != 0 and self.run_type == "DOS":
self.write_proj(os.path.join(output_dir, "boltztrap.proj"),
os.path.join(output_dir, "BoltzTraP.def"))
def run(self, path_dir=None, convergence=True, write_input=True,
clear_dir=False, max_lpfac=150, min_egrid=0.00005):
"""
Write inputs (optional), run BoltzTraP, and ensure
convergence (optional)
Args:
path_dir (str): directory in which to run BoltzTraP
convergence (bool): whether to check convergence and make
corrections if needed
write_input: (bool) whether to write input files before the run
(required for convergence mode)
clear_dir: (bool) whether to remove all files in the path_dir
before starting
max_lpfac: (float) maximum lpfac value to try before reducing egrid
in convergence mode
min_egrid: (float) minimum egrid value to try before giving up in
convergence mode
Returns:
"""
# TODO: consider making this a part of custodian rather than pymatgen
# A lot of this functionality (scratch dirs, handlers, monitors)
# is built into custodian framework
if convergence and not write_input:
raise ValueError("Convergence mode requires write_input to be "
"true")
if self.run_type in ("BANDS", "DOS", "FERMI"):
convergence = False
if self.lpfac > max_lpfac:
max_lpfac = self.lpfac
if self.run_type == "BANDS" and self.bs.is_spin_polarized:
print("Reminder: for run_type " + str(
self.run_type) + ", spin component are not separated! "
"(you have a spin polarized band structure)")
if self.run_type in ("FERMI", "DOS") and self.spin is None:
if self.bs.is_spin_polarized:
raise BoltztrapError(
"Spin parameter must be specified for spin polarized "
"band structures!")
else:
self.spin = 1
dir_bz_name = "boltztrap"
if path_dir is None:
temp_dir = tempfile.mkdtemp()
path_dir = os.path.join(temp_dir, dir_bz_name)
else:
path_dir = os.path.abspath(
os.path.join(path_dir, dir_bz_name))
if not os.path.exists(path_dir):
os.mkdir(path_dir)
elif clear_dir:
for c in os.listdir(path_dir):
os.remove(os.path.join(path_dir, c))
FORMAT = "%(message)s"
logging.basicConfig(level=logging.INFO, format=FORMAT,
filename=os.path.join(path_dir, "../boltztrap.out"))
with cd(path_dir):
lpfac_start = self.lpfac
converged = False
while self.energy_grid >= min_egrid and not converged:
self.lpfac = lpfac_start
if time.time() - self.start_time > self.timeout:
raise BoltztrapError("no doping convergence after timeout "
"of {} s".format(self.timeout))
logging.info("lpfac, energy_grid: {} {}".format(self.lpfac, self.energy_grid))
while self.lpfac <= max_lpfac and not converged:
if time.time() - self.start_time > self.timeout:
raise BoltztrapError("no doping convergence after "
"timeout of {} s".format(self.timeout))
if write_input:
self.write_input(path_dir)
bt_exe = ["x_trans", "BoltzTraP"]
if self._bs.is_spin_polarized or self.soc:
bt_exe.append("-so")
p = subprocess.Popen(bt_exe, stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
p.wait()
for c in p.communicate():
logging.info(c.decode())
if "error in factorization" in c.decode():
raise BoltztrapError("error in factorization")
warning = ""
with open(os.path.join(path_dir,
dir_bz_name + ".outputtrans")) as f:
for l in f:
if "Option unknown" in l:
raise BoltztrapError(
"DOS mode needs a custom version of "
"BoltzTraP code is needed")
if "WARNING" in l:
warning = l
break
if "Error - Fermi level was not found" in l:
warning = l
break
if not warning and convergence:
# check convergence for warning
analyzer = BoltztrapAnalyzer.from_files(path_dir)
for doping in ['n', 'p']:
for c in analyzer.mu_doping[doping]:
if len(analyzer.mu_doping[doping][c]) != len(
analyzer.doping[doping]):
warning = "length of mu_doping array is " \
"incorrect"
break
if doping == 'p' and \
sorted(
analyzer.mu_doping[doping][
c], reverse=True) != \
analyzer.mu_doping[doping][c]:
warning = "sorting of mu_doping array " \
"incorrect for p-type"
break
# ensure n-type doping sorted correctly
if doping == 'n' and sorted(
analyzer.mu_doping[doping][c]) != \
analyzer.mu_doping[doping][c]:
warning = "sorting of mu_doping array " \
"incorrect for n-type"
break
if warning:
self.lpfac += 10
logging.warn("Warning detected: {}! Increase lpfac to "
"{}".format(warning, self.lpfac))
else:
converged = True
if not converged:
self.energy_grid /= 10
logging.info("Could not converge with max lpfac; "
"Decrease egrid to {}".format(self.energy_grid))
if not converged:
raise BoltztrapError(
"Doping convergence not reached with lpfac=" + str(
self.lpfac) + ", energy_grid=" + str(self.energy_grid))
return path_dir
def as_dict(self):
"""
:return: MSONable dict
"""
results = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"lpfac": self.lpfac,
"bs": self.bs.as_dict(),
"nelec": self._nelec,
"dos_type": self.dos_type,
"run_type": self.run_type,
"band_nb": self.band_nb,
"spin": self.spin,
"cond_band": self.cond_band,
"tauref": self.tauref,
"tauexp": self.tauexp,
"tauen": self.tauen,
"soc": self.soc,
"kpt_line": self.kpt_line,
"doping": self.doping,
"energy_span_around_fermi": self.energy_span_around_fermi,
"scissor": self.scissor,
"tmax": self.tmax,
"tgrid": self.tgrid,
"symprec": self._symprec
}
return jsanitize(results)
class BoltztrapError(Exception):
"""
Exception class for boltztrap.
Raised when the boltztrap gives an error
"""
pass
class BoltztrapAnalyzer:
"""
Class used to store all the data from a boltztrap run
"""
def __init__(self, gap=None, mu_steps=None, cond=None, seebeck=None,
kappa=None, hall=None, doping=None,
mu_doping=None, seebeck_doping=None, cond_doping=None,
kappa_doping=None,
hall_doping=None, intrans=None, dos=None, dos_partial=None,
carrier_conc=None, vol=None, warning=None,
bz_bands=None, bz_kpoints=None, fermi_surface_data=None):
"""
Constructor taking directly all the data generated by Boltztrap. You
won't probably use it directly but instead use the from_files and
from_dict methods.
Args:
gap: The gap after interpolation in eV
mu_steps: The steps of electron chemical potential (or Fermi
level) in eV.
cond: The electronic conductivity tensor divided by a constant
relaxation time (sigma/tau) at different temperature and
fermi levels.
The format is {temperature: [array of 3x3 tensors at each
fermi level in mu_steps]}. The units are 1/(Ohm*m*s).
seebeck: The Seebeck tensor at different temperatures and fermi
levels. The format is {temperature: [array of 3x3 tensors at
each fermi level in mu_steps]}. The units are V/K
kappa: The electronic thermal conductivity tensor divided by a
constant relaxation time (kappa/tau) at different temperature
and fermi levels. The format is {temperature: [array of 3x3
tensors at each fermi level in mu_steps]}
The units are W/(m*K*s)
hall: The hall tensor at different temperature and fermi levels
The format is {temperature: [array of 27 coefficients list at
each fermi level in mu_steps]}
The units are m^3/C
doping: The different doping levels that have been given to
Boltztrap. The format is {'p':[],'n':[]} with an array of
doping levels. The units are cm^-3
mu_doping: Gives the electron chemical potential (or Fermi level)
for a given set of doping.
Format is {'p':{temperature: [fermi levels],'n':{temperature:
[fermi levels]}}
the fermi level array is ordered according to the doping
levels in doping units for doping are in cm^-3 and for Fermi
level in eV
seebeck_doping: The Seebeck tensor at different temperatures and
doping levels. The format is {'p': {temperature: [Seebeck
tensors]}, 'n':{temperature: [Seebeck tensors]}}
The [Seebeck tensors] array is ordered according to the
doping levels in doping units for doping are in cm^-3 and for
Seebeck in V/K
cond_doping: The electronic conductivity tensor divided by a
constant relaxation time (sigma/tau) at different
temperatures and doping levels
The format is {'p':{temperature: [conductivity tensors]},
'n':{temperature: [conductivity tensors]}}
The [conductivity tensors] array is ordered according to the
doping levels in doping units for doping are in cm^-3 and for
conductivity in 1/(Ohm*m*s)
kappa_doping: The thermal conductivity tensor divided by a constant
relaxation time (kappa/tau) at different temperatures and
doping levels.
The format is {'p':{temperature: [thermal conductivity
tensors]},'n':{temperature: [thermal conductivity tensors]}}
The [thermal conductivity tensors] array is ordered according
to the doping levels in doping units for doping are in cm^-3
and for thermal conductivity in W/(m*K*s)
hall_doping: The Hall tensor at different temperatures and doping
levels.
The format is {'p':{temperature: [Hall tensors]},
'n':{temperature: [Hall tensors]}}
The [Hall tensors] array is ordered according to the doping
levels in doping and each Hall tensor is represented by a 27
coefficients list.
The units are m^3/C
intrans: a dictionary of inputs e.g. {"scissor": 0.0}
carrier_conc: The concentration of carriers in electron (or hole)
per unit cell
dos: The dos computed by Boltztrap given as a pymatgen Dos object
dos_partial: Data for the partial DOS projected on sites and
orbitals
vol: Volume of the unit cell in angstrom cube (A^3)
warning: string if BoltzTraP outputted a warning, else None
bz_bands: Data for interpolated bands on a k-point line
(run_type=BANDS)
bz_kpoints: k-point in reciprocal coordinates for interpolated
bands (run_type=BANDS)
fermi_surface_data: energy values in a 3D grid imported from the
output .cube file.
"""
self.gap = gap
self.mu_steps = mu_steps
self._cond = cond
self._seebeck = seebeck
self._kappa = kappa
self._hall = hall
self.warning = warning
self.doping = doping
self.mu_doping = mu_doping
self._seebeck_doping = seebeck_doping
self._cond_doping = cond_doping
self._kappa_doping = kappa_doping
self._hall_doping = hall_doping
self.intrans = intrans
self._carrier_conc = carrier_conc
self.dos = dos
self.vol = vol
self._dos_partial = dos_partial
self._bz_bands = bz_bands
self._bz_kpoints = bz_kpoints
self.fermi_surface_data = fermi_surface_data
def get_symm_bands(self, structure, efermi, kpt_line=None,
labels_dict=None):
"""
Function useful to read bands from Boltztrap output and get a
BandStructureSymmLine object comparable with that one from a DFT
calculation (if the same kpt_line is provided). Default kpt_line
and labels_dict is the standard path of high symmetry k-point for
the specified structure. They could be extracted from the
BandStructureSymmLine object that you want to compare with. efermi
variable must be specified to create the BandStructureSymmLine
object (usually it comes from DFT or Boltztrap calc)
"""
try:
if kpt_line is None:
kpath = HighSymmKpath(structure)
kpt_line = [Kpoint(k, structure.lattice.reciprocal_lattice) for
k in
kpath.get_kpoints(coords_are_cartesian=False)[0]]
labels_dict = {l: k for k, l in zip(
*kpath.get_kpoints(coords_are_cartesian=False)) if l}
kpt_line = [kp.frac_coords for kp in kpt_line]
elif type(kpt_line[0]) == Kpoint:
kpt_line = [kp.frac_coords for kp in kpt_line]
labels_dict = {k: labels_dict[k].frac_coords for k in
labels_dict}
idx_list = []
# kpt_dense=np.array([kp for kp in self._bz_kpoints])
for i, kp in enumerate(kpt_line):
w = []
prec = 1e-05
while len(w) == 0:
w = np.where(np.all(
np.abs(kp - self._bz_kpoints) < [prec] * 3,
axis=1))[0]
prec *= 10
# print( prec )
idx_list.append([i, w[0]])
# if len(w)>0:
# idx_list.append([i,w[0]])
# else:
# w=np.where(np.all(np.abs(kp.frac_coords-self._bz_kpoints)
# <[1e-04,1e-04,1e-04],axis=1))[0]
# idx_list.append([i,w[0]])
idx_list = np.array(idx_list)
# print( idx_list.shape )
bands_dict = {Spin.up: (self._bz_bands * Energy(1, "Ry").to(
"eV") + efermi).T[:, idx_list[:, 1]].tolist()}
# bz_kpoints = bz_kpoints[idx_list[:,1]].tolist()
sbs = BandStructureSymmLine(kpt_line, bands_dict,
structure.lattice.reciprocal_lattice,
efermi,
labels_dict=labels_dict)
return sbs
except Exception:
raise BoltztrapError(
"Bands are not in output of BoltzTraP.\nBolztrapRunner must "
"be run with run_type=BANDS")
@staticmethod
def check_acc_bzt_bands(sbs_bz, sbs_ref, warn_thr=(0.03, 0.03)):
"""
Compare sbs_bz BandStructureSymmLine calculated with boltztrap with
the sbs_ref BandStructureSymmLine as reference (from MP for
instance), computing correlation and energy difference for eight bands
around the gap (semiconductors) or fermi level (metals).
warn_thr is a threshold to get a warning in the accuracy of Boltztap
interpolated bands.
Return a dictionary with these keys:
- "N": the index of the band compared; inside each there are:
- "Corr": correlation coefficient for the 8 compared bands
- "Dist": energy distance for the 8 compared bands
- "branch_name": energy distance for that branch
- "avg_corr": average of correlation coefficient over the 8 bands
- "avg_dist": average of energy distance over the 8 bands
- "nb_list": list of indexes of the 8 compared bands
- "acc_thr": list of two float corresponing to the two warning
thresholds in input
- "acc_err": list of two bools:
True if the avg_corr > warn_thr[0], and
True if the avg_dist > warn_thr[1]
See also compare_sym_bands function doc
"""
if not sbs_ref.is_metal() and not sbs_bz.is_metal():
vbm_idx = sbs_bz.get_vbm()['band_index'][Spin.up][-1]
cbm_idx = sbs_bz.get_cbm()['band_index'][Spin.up][0]
nb_list = range(vbm_idx - 3, cbm_idx + 4)
else:
bnd_around_efermi = []
delta = 0
spin = list(sbs_bz.bands.keys())[0]
while len(bnd_around_efermi) < 8 and delta < 100:
delta += 0.1
bnd_around_efermi = []
for nb in range(len(sbs_bz.bands[spin])):
for kp in range(len(sbs_bz.bands[spin][nb])):
if abs(sbs_bz.bands[spin][nb][
kp] - sbs_bz.efermi) < delta:
bnd_around_efermi.append(nb)
break
if len(bnd_around_efermi) < 8:
print("Warning! check performed on " + str(
len(bnd_around_efermi)))
nb_list = bnd_around_efermi
else:
nb_list = bnd_around_efermi[:8]
# print(nb_list)
bcheck = compare_sym_bands(sbs_bz, sbs_ref, nb_list)
# print(bcheck)
acc_err = [False, False]
avg_corr = sum([item[1]['Corr'] for item in bcheck.iteritems()]) / 8
avg_distance = sum([item[1]['Dist'] for item in bcheck.iteritems()]) / 8
if avg_corr > warn_thr[0]:
acc_err[0] = True
if avg_distance > warn_thr[0]:
acc_err[1] = True
bcheck['avg_corr'] = avg_corr
bcheck['avg_distance'] = avg_distance
bcheck['acc_err'] = acc_err
bcheck['acc_thr'] = warn_thr
bcheck['nb_list'] = nb_list
if True in acc_err:
print("Warning! some bands around gap are not accurate")
return bcheck
def get_seebeck(self, output='eigs', doping_levels=True):
"""
Gives the seebeck coefficient (microV/K) in either a
full 3x3 tensor form, as 3 eigenvalues, or as the average value
(trace/3.0) If doping_levels=True, the results are given at
different p and n doping
levels (given by self.doping), otherwise it is given as a series
of electron chemical potential values
Args:
output (string): the type of output. 'tensor' give the full
3x3 tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}.
The 'p' links to Seebeck at p-type doping
and 'n' to the Seebeck at n-type doping. Otherwise, returns a
{temp:[]} dictionary
The result contains either the sorted three eigenvalues of
the symmetric
Seebeck tensor (output='eigs') or a full tensor (3x3 array) (
output='tensor') or as an average
(output='average').
units are microV/K
"""
return BoltztrapAnalyzer._format_to_output(self._seebeck,
self._seebeck_doping,
output,
doping_levels, 1e6)
def get_conductivity(self, output='eigs', doping_levels=True,
relaxation_time=1e-14):
"""
Gives the conductivity (1/Ohm*m) in either a full 3x3 tensor
form, as 3 eigenvalues, or as the average value
(trace/3.0) If doping_levels=True, the results are given at
different p and n doping
levels (given by self.doping), otherwise it is given as a series
of electron chemical potential values
Args:
output (string): the type of output. 'tensor' give the full
3x3 tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
relaxation_time (float): constant relaxation time in secs
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}.
The 'p' links to conductivity
at p-type doping and 'n' to the conductivity at n-type
doping. Otherwise,
returns a {temp:[]} dictionary. The result contains either
the sorted three eigenvalues of the symmetric
conductivity tensor (format='eigs') or a full tensor (3x3
array) (output='tensor') or as an average
(output='average').
The result includes a given constant relaxation time
units are 1/Ohm*m
"""
return BoltztrapAnalyzer._format_to_output(self._cond,
self._cond_doping, output,
doping_levels,
relaxation_time)
def get_power_factor(self, output='eigs', doping_levels=True,
relaxation_time=1e-14):
"""
Gives the power factor (Seebeck^2 * conductivity) in units
microW/(m*K^2) in either a full 3x3 tensor form,
as 3 eigenvalues, or as the average value (trace/3.0) If
doping_levels=True, the results are given at
different p and n doping levels (given by self.doping), otherwise it
is given as a series of
electron chemical potential values
Args:
output (string): the type of output. 'tensor' give the full 3x3
tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
relaxation_time (float): constant relaxation time in secs
Returns:
If doping_levels=True, a dictionnary {temp:{'p':[],'n':[]}}. The
'p' links to power factor
at p-type doping and 'n' to the conductivity at n-type doping.
Otherwise,
returns a {temp:[]} dictionary. The result contains either the
sorted three eigenvalues of the symmetric
power factor tensor (format='eigs') or a full tensor (3x3 array) (
output='tensor') or as an average
(output='average').
The result includes a given constant relaxation time
units are microW/(m K^2)
"""
result = None
result_doping = None
if doping_levels:
result_doping = {doping: {t: [] for t in
self._seebeck_doping[doping]} for
doping in self._seebeck_doping}
for doping in result_doping:
for t in result_doping[doping]:
for i in range(len(self.doping[doping])):
full_tensor = np.dot(self._cond_doping[doping][t][i],
np.dot(
self._seebeck_doping[doping][
t][i],
self._seebeck_doping[doping][
t][i]))
result_doping[doping][t].append(full_tensor)
else:
result = {t: [] for t in self._seebeck}
for t in result:
for i in range(len(self.mu_steps)):
full_tensor = np.dot(self._cond[t][i],
np.dot(self._seebeck[t][i],
self._seebeck[t][i]))
result[t].append(full_tensor)
return BoltztrapAnalyzer._format_to_output(result, result_doping,
output, doping_levels,
multi=1e6 * relaxation_time)
def get_thermal_conductivity(self, output='eigs', doping_levels=True,
k_el=True, relaxation_time=1e-14):
"""
Gives the electronic part of the thermal conductivity in either a
full 3x3 tensor form,
as 3 eigenvalues, or as the average value (trace/3.0) If
doping_levels=True, the results are given at
different p and n doping levels (given by self.doping), otherwise it
is given as a series of
electron chemical potential values
Args:
output (string): the type of output. 'tensor' give the full 3x3
tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
k_el (boolean): True for k_0-PF*T, False for k_0
relaxation_time (float): constant relaxation time in secs
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The
'p' links to thermal conductivity
at p-type doping and 'n' to the thermal conductivity at n-type
doping. Otherwise,
returns a {temp:[]} dictionary. The result contains either the
sorted three eigenvalues of the symmetric
conductivity tensor (format='eigs') or a full tensor (3x3 array) (
output='tensor') or as an average
(output='average').
The result includes a given constant relaxation time
units are W/mK
"""
result = None
result_doping = None
if doping_levels:
result_doping = {doping: {t: [] for t in
self._seebeck_doping[doping]} for
doping in self._seebeck_doping}
for doping in result_doping:
for t in result_doping[doping]:
for i in range(len(self.doping[doping])):
if k_el:
pf_tensor = np.dot(self._cond_doping[doping][t][i],
np.dot(
self._seebeck_doping[doping][
t][i],
self._seebeck_doping[doping][
t][i]))
result_doping[doping][t].append((
self._kappa_doping[doping][t][
i] - pf_tensor * t))
else:
result_doping[doping][t].append((
self._kappa_doping[doping][t][i]))
else:
result = {t: [] for t in self._seebeck}
for t in result:
for i in range(len(self.mu_steps)):
if k_el:
pf_tensor = np.dot(self._cond[t][i],
np.dot(self._seebeck[t][i],
self._seebeck[t][i]))
result[t].append((self._kappa[t][i] - pf_tensor * t))
else:
result[t].append((self._kappa[t][i]))
return BoltztrapAnalyzer._format_to_output(result, result_doping,
output, doping_levels,
multi=relaxation_time)
def get_zt(self, output='eigs', doping_levels=True, relaxation_time=1e-14,
kl=1.0):
"""
Gives the ZT coefficient (S^2*cond*T/thermal cond) in either a full
3x3 tensor form,
as 3 eigenvalues, or as the average value (trace/3.0) If
doping_levels=True, the results are given at
different p and n doping levels (given by self.doping), otherwise it
is given as a series of
electron chemical potential values. We assume a constant relaxation
time and a constant
lattice thermal conductivity
Args:
output (string): the type of output. 'tensor' give the full 3x3
tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
relaxation_time (float): constant relaxation time in secs
k_l (float): lattice thermal cond in W/(m*K)
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The
'p' links to ZT
at p-type doping and 'n' to the ZT at n-type doping. Otherwise,
returns a {temp:[]} dictionary. The result contains either the
sorted three eigenvalues of the symmetric
ZT tensor (format='eigs') or a full tensor (3x3 array) (
output='tensor') or as an average
(output='average').
The result includes a given constant relaxation time and lattice
thermal conductivity
"""
result = None
result_doping = None
if doping_levels:
result_doping = {doping: {t: [] for t in
self._seebeck_doping[doping]} for
doping in self._seebeck_doping}
for doping in result_doping:
for t in result_doping[doping]:
for i in range(len(self.doping[doping])):
pf_tensor = np.dot(self._cond_doping[doping][t][i],
np.dot(
self._seebeck_doping[doping][t][
i],
self._seebeck_doping[doping][t][
i]))
thermal_conduct = (self._kappa_doping[doping][t][i]
- pf_tensor * t) * relaxation_time
result_doping[doping][t].append(
np.dot(pf_tensor * relaxation_time * t,
np.linalg.inv(
thermal_conduct + kl * np.eye(3, 3))))
else:
result = {t: [] for t in self._seebeck}
for t in result:
for i in range(len(self.mu_steps)):
pf_tensor = np.dot(self._cond[t][i],
np.dot(self._seebeck[t][i],
self._seebeck[t][i]))
thermal_conduct = (self._kappa[t][i]
- pf_tensor * t) * relaxation_time
result[t].append(np.dot(pf_tensor * relaxation_time * t,
np.linalg.inv(
thermal_conduct + kl *
np.eye(3, 3))))
return BoltztrapAnalyzer._format_to_output(result, result_doping,
output, doping_levels)
def get_average_eff_mass(self, output='eigs', doping_levels=True):
"""
Gives the average effective mass tensor. We call it average because
it takes into account all the bands
and regions in the Brillouin zone. This is different than the standard
textbook effective mass which relates
often to only one (parabolic) band.
The average effective mass tensor is defined as the integrated
average of the second derivative of E(k)
This effective mass tensor takes into account:
-non-parabolicity
-multiple extrema
-multiple bands
For more information about it. See:
Hautier, G., Miglio, A., Waroquiers, D., Rignanese, G., & Gonze,
X. (2014).
How Does Chemistry Influence Electron Effective Mass in Oxides?
A High-Throughput Computational Analysis. Chemistry of Materials,
26(19), 5447–5458. doi:10.1021/cm404079a
or
Hautier, G., Miglio, A., Ceder, G., Rignanese, G.-M., & Gonze,
X. (2013).
Identification and design principles of low hole effective mass
p-type transparent conducting oxides.
Nature Communications, 4, 2292. doi:10.1038/ncomms3292
Depending on the value of output, we have either the full 3x3
effective mass tensor,
its 3 eigenvalues or an average
Args:
output (string): 'eigs' for eigenvalues, 'tensor' for the full
tensor and 'average' for an average (trace/3)
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
Returns:
If doping_levels=True,a dictionary {'p':{temp:[]},'n':{temp:[]}}
with an array of effective mass tensor, eigenvalues of average
value (depending on output) for each temperature and for each
doping level.
The 'p' links to hole effective mass tensor and 'n' to electron
effective mass tensor.
"""
result = None
result_doping = None
conc = self.get_carrier_concentration()
if doping_levels:
result_doping = {doping: {t: [] for t in self._cond_doping[doping]}
for
doping in self.doping}
for doping in result_doping:
for temp in result_doping[doping]:
for i in range(len(self.doping[doping])):
try:
result_doping[doping][temp].append(np.linalg.inv(
np.array(self._cond_doping[doping][temp][i])) * self.doping[doping][i] * 10 ** 6 *
constants.e ** 2 / constants.m_e)
except np.linalg.LinAlgError:
pass
else:
result = {t: [] for t in self._seebeck}
for temp in result:
for i in range(len(self.mu_steps)):
try:
cond_inv = np.linalg.inv(np.array(self._cond[temp][i]))
except np.linalg.LinAlgError:
pass
result[temp].append(cond_inv * conc[temp][i] * 10 ** 6 * constants.e ** 2 / constants.m_e)
return BoltztrapAnalyzer._format_to_output(result, result_doping,
output, doping_levels)
def get_seebeck_eff_mass(self, output='average', temp=300, doping_levels=False,
Lambda=0.5):
"""
Seebeck effective mass calculated as explained in Ref.
Gibbs, Z. M. et al., Effective mass and fermi surface complexity factor
from ab initio band structure calculations.
npj Computational Materials 3, 8 (2017).
Args:
output: 'average' returns the seebeck effective mass calculated using
the average of the three diagonal components of the seebeck tensor.
'tensor' returns the seebeck effective mass respect to the three
diagonal components of the seebeck tensor.
doping_levels: False means that the seebeck effective mass is calculated
for every value of the chemical potential
True means that the seebeck effective mass is calculated
for every value of the doping levels for both n and p types
temp: temperature of calculated seebeck.
Lambda: fitting parameter used to model the scattering (0.5 means constant
relaxation time).
Returns:
a list of values for the seebeck effective mass w.r.t the chemical potential,
if doping_levels is set at False;
a dict with n an p keys that contain a list of values for the seebeck effective
mass w.r.t the doping levels, if doping_levels is set at True;
if 'tensor' is selected, each element of the lists is a list containing
the three components of the seebeck effective mass.
"""
if doping_levels:
sbk_mass = {}
for dt in ('n', 'p'):
conc = self.doping[dt]
seebeck = self.get_seebeck(output=output, doping_levels=True)[dt][temp]
sbk_mass[dt] = []
for i in range(len(conc)):
if output == 'average':
sbk_mass[dt].append(
seebeck_eff_mass_from_seebeck_carr(abs(seebeck[i]),
conc[i], temp, Lambda))
elif output == 'tensor':
sbk_mass[dt].append([])
for j in range(3):
sbk_mass[dt][-1].append(
seebeck_eff_mass_from_seebeck_carr(abs(seebeck[i][j][j]),
conc[i], temp, Lambda))
else:
seebeck = self.get_seebeck(output=output, doping_levels=False)[temp]
conc = self.get_carrier_concentration()[temp]
sbk_mass = []
for i in range(len(conc)):
if output == 'average':
sbk_mass.append(
seebeck_eff_mass_from_seebeck_carr(abs(seebeck[i]),
conc[i], temp, Lambda))
elif output == 'tensor':
sbk_mass.append([])
for j in range(3):
sbk_mass[-1].append(
seebeck_eff_mass_from_seebeck_carr(abs(seebeck[i][j][j]),
conc[i], temp, Lambda))
return sbk_mass
def get_complexity_factor(self, output='average', temp=300, doping_levels=False,
Lambda=0.5):
"""
Fermi surface complexity factor respect to calculated as explained in Ref.
Gibbs, Z. M. et al., Effective mass and fermi surface complexity factor
from ab initio band structure calculations.
npj Computational Materials 3, 8 (2017).
Args:
output: 'average' returns the complexity factor calculated using the average
of the three diagonal components of the seebeck and conductivity tensors.
'tensor' returns the complexity factor respect to the three
diagonal components of seebeck and conductivity tensors.
doping_levels: False means that the complexity factor is calculated
for every value of the chemical potential
True means that the complexity factor is calculated
for every value of the doping levels for both n and p types
temp: temperature of calculated seebeck and conductivity.
Lambda: fitting parameter used to model the scattering (0.5 means constant
relaxation time).
Returns:
a list of values for the complexity factor w.r.t the chemical potential,
if doping_levels is set at False;
a dict with n an p keys that contain a list of values for the complexity factor
w.r.t the doping levels, if doping_levels is set at True;
if 'tensor' is selected, each element of the lists is a list containing
the three components of the complexity factor.
"""
if doping_levels:
cmplx_fact = {}
for dt in ('n', 'p'):
sbk_mass = self.get_seebeck_eff_mass(output, temp, True, Lambda)[dt]
cond_mass = self.get_average_eff_mass(output=output, doping_levels=True)[dt][temp]
if output == 'average':
cmplx_fact[dt] = [(m_s / abs(m_c)) ** 1.5 for m_s, m_c in zip(sbk_mass, cond_mass)]
elif output == 'tensor':
cmplx_fact[dt] = []
for i in range(len(sbk_mass)):
cmplx_fact[dt].append([])
for j in range(3):
cmplx_fact[dt][-1].append((sbk_mass[i][j] / abs(cond_mass[i][j][j])) ** 1.5)
else:
sbk_mass = self.get_seebeck_eff_mass(output, temp, False, Lambda)
cond_mass = self.get_average_eff_mass(output=output, doping_levels=False)[temp]
if output == 'average':
cmplx_fact = [(m_s / abs(m_c)) ** 1.5 for m_s, m_c in zip(sbk_mass, cond_mass)]
elif output == 'tensor':
cmplx_fact = []
for i in range(len(sbk_mass)):
cmplx_fact.append([])
for j in range(3):
cmplx_fact[-1].append((sbk_mass[i][j] / abs(cond_mass[i][j][j])) ** 1.5)
return cmplx_fact
def get_extreme(self, target_prop, maximize=True, min_temp=None,
max_temp=None, min_doping=None, max_doping=None,
isotropy_tolerance=0.05, use_average=True):
"""
This method takes in eigenvalues over a range of carriers,
temperatures, and doping levels, and tells you what is the "best"
value that can be achieved for the given target_property. Note that
this method searches the doping dict only, not the full mu dict.
Args:
target_prop: target property, i.e. "seebeck", "power factor",
"conductivity", "kappa", or "zt"
maximize: True to maximize, False to minimize (e.g. kappa)
min_temp: minimum temperature allowed
max_temp: maximum temperature allowed
min_doping: minimum doping allowed (e.g., 1E18)
max_doping: maximum doping allowed (e.g., 1E20)
isotropy_tolerance: tolerance for isotropic (0.05 = 5%)
use_average: True for avg of eigenval, False for max eigenval
Returns:
A dictionary with keys {"p", "n", "best"} with sub-keys:
{"value", "temperature", "doping", "isotropic"}
"""
def is_isotropic(x, isotropy_tolerance):
"""
Internal method to tell you if 3-vector "x" is isotropic
Args:
x: the vector to determine isotropy for
isotropy_tolerance: tolerance, e.g. 0.05 is 5%
"""
if len(x) != 3:
raise ValueError("Invalid input to is_isotropic!")
st = sorted(x)
return bool(all([st[0], st[1], st[2]]) and
(abs((st[1] - st[0]) / st[1]) <= isotropy_tolerance) and
(abs((st[2] - st[0])) / st[2] <= isotropy_tolerance) and
(abs((st[2] - st[1]) / st[2]) <= isotropy_tolerance))
if target_prop.lower() == "seebeck":
d = self.get_seebeck(output="eigs", doping_levels=True)
elif target_prop.lower() == "power factor":
d = self.get_power_factor(output="eigs", doping_levels=True)
elif target_prop.lower() == "conductivity":
d = self.get_conductivity(output="eigs", doping_levels=True)
elif target_prop.lower() == "kappa":
d = self.get_thermal_conductivity(output="eigs",
doping_levels=True)
elif target_prop.lower() == "zt":
d = self.get_zt(output="eigs", doping_levels=True)
else:
raise ValueError("Target property: {} not recognized!".
format(target_prop))
absval = True # take the absolute value of properties
x_val = None
x_temp = None
x_doping = None
x_isotropic = None
output = {}
min_temp = min_temp or 0
max_temp = max_temp or float('inf')
min_doping = min_doping or 0
max_doping = max_doping or float('inf')
for pn in ('p', 'n'):
for t in d[pn]: # temperatures
if min_temp <= float(t) <= max_temp:
for didx, evs in enumerate(d[pn][t]):
doping_lvl = self.doping[pn][didx]
if min_doping <= doping_lvl <= max_doping:
isotropic = is_isotropic(evs, isotropy_tolerance)
if absval:
evs = [abs(x) for x in evs]
if use_average:
val = float(sum(evs)) / len(evs)
else:
val = max(evs)
if x_val is None or (val > x_val and maximize) \
or (val < x_val and not maximize):
x_val = val
x_temp = t
x_doping = doping_lvl
x_isotropic = isotropic
output[pn] = {'value': x_val, 'temperature': x_temp,
'doping': x_doping, 'isotropic': x_isotropic}
x_val = None
if maximize:
max_type = 'p' if output['p']['value'] >= \
output['n']['value'] else 'n'
else:
max_type = 'p' if output['p']['value'] <= \
output['n']['value'] else 'n'
output['best'] = output[max_type]
output['best']['carrier_type'] = max_type
return output
@staticmethod
def _format_to_output(tensor, tensor_doping, output, doping_levels,
multi=1.0):
if doping_levels:
full_tensor = tensor_doping
result = {doping: {t: [] for t in tensor_doping[doping]} for doping
in tensor_doping}
for doping in full_tensor:
for temp in full_tensor[doping]:
for i in range(len(full_tensor[doping][temp])):
if output in ['eig', 'eigs']:
result[doping][temp].append(sorted(
np.linalg.eigh(full_tensor[doping][temp][i])[
0] * multi))
elif output == 'tensor':
result[doping][temp].append(
np.array(full_tensor[doping][temp][i]) * multi)
elif output == 'average':
result[doping][temp].append(
(full_tensor[doping][temp][i][0][0]
+ full_tensor[doping][temp][i][1][1]
+ full_tensor[doping][temp][i][2][
2]) * multi / 3.0)
else:
raise ValueError("Unknown output format: "
"{}".format(output))
else:
full_tensor = tensor
result = {t: [] for t in tensor}
for temp in full_tensor:
for i in range(len(tensor[temp])):
if output in ['eig', 'eigs']:
result[temp].append(sorted(
np.linalg.eigh(full_tensor[temp][i])[0] * multi))
elif output == 'tensor':
result[temp].append(
np.array(full_tensor[temp][i]) * multi)
elif output == 'average':
result[temp].append((full_tensor[temp][i][0][0]
+ full_tensor[temp][i][1][1]
+ full_tensor[temp][i][2][
2]) * multi / 3.0)
else:
raise ValueError("Unknown output format: {}".
format(output))
return result
def get_complete_dos(self, structure, analyzer_for_second_spin=None):
"""
Gives a CompleteDos object with the DOS from the interpolated
projected band structure
Args:
the structure (necessary to identify sites for projection)
analyzer_for_second_spin must be specified to have a
CompleteDos with both Spin components
Returns:
a CompleteDos object
Example of use in case of spin polarized case:
BoltztrapRunner(bs=bs,nelec=10,run_type="DOS",spin=1).run(path_dir='dos_up/')
an_up=BoltztrapAnalyzer.from_files("dos_up/boltztrap/",dos_spin=1)
BoltztrapRunner(bs=bs,nelec=10,run_type="DOS",spin=-1).run(path_dir='dos_dw/')
an_dw=BoltztrapAnalyzer.from_files("dos_dw/boltztrap/",dos_spin=-1)
cdos=an_up.get_complete_dos(bs.structure,an_dw)
"""
pdoss = {}
spin_1 = list(self.dos.densities.keys())[0]
if analyzer_for_second_spin:
if not np.all(self.dos.energies ==
analyzer_for_second_spin.dos.energies):
raise BoltztrapError(
"Dos merging error: energies of the two dos are different")
spin_2 = list(analyzer_for_second_spin.dos.densities.keys())[0]
if spin_1 == spin_2:
raise BoltztrapError(
"Dos merging error: spin component are the same")
for s in self._dos_partial:
if structure.sites[int(s)] not in pdoss:
pdoss[structure.sites[int(s)]] = {}
for o in self._dos_partial[s]:
if Orbital[o] not in pdoss[structure.sites[int(s)]]:
pdoss[structure.sites[int(s)]][Orbital[o]] = {}
pdoss[structure.sites[int(s)]][Orbital[o]][
spin_1] = self._dos_partial[s][o]
if analyzer_for_second_spin:
pdoss[structure.sites[int(s)]][Orbital[o]][
spin_2] = analyzer_for_second_spin._dos_partial[s][o]
if analyzer_for_second_spin:
tdos = Dos(self.dos.efermi, self.dos.energies,
{spin_1: self.dos.densities[spin_1],
spin_2: analyzer_for_second_spin.dos.densities[
spin_2]})
else:
tdos = self.dos
return CompleteDos(structure, total_dos=tdos, pdoss=pdoss)
def get_mu_bounds(self, temp=300):
"""
:param temp: Temperature.
:return: The chemical potential bounds at that temperature.
"""
return min(self.mu_doping['p'][temp]), max(self.mu_doping['n'][temp])
def get_carrier_concentration(self):
"""
gives the carrier concentration (in cm^-3)
Returns
a dictionary {temp:[]} with an array of carrier concentration
(in cm^-3) at each temperature
The array relates to each step of electron chemical potential
"""
return {temp: [1e24 * i / self.vol for i in self._carrier_conc[temp]]
for temp in self._carrier_conc}
def get_hall_carrier_concentration(self):
"""
gives the Hall carrier concentration (in cm^-3). This is the trace of
the Hall tensor (see Boltztrap source code) Hall carrier concentration
are not always exactly the same than carrier concentration.
Returns
a dictionary {temp:[]} with an array of Hall carrier concentration
(in cm^-3) at each temperature The array relates to each step of
electron chemical potential
"""
result = {temp: [] for temp in self._hall}
for temp in self._hall:
for i in self._hall[temp]:
trace = (i[1][2][0] + i[2][0][1] + i[0][1][2]) / 3.0
if trace != 0.0:
result[temp].append(1e-6 / (trace * constants.e))
else:
result[temp].append(0.0)
return result
@staticmethod
def parse_outputtrans(path_dir):
"""
Parses .outputtrans file
Args:
path_dir: dir containing boltztrap.outputtrans
Returns:
tuple - (run_type, warning, efermi, gap, doping_levels)
"""
run_type = None
warning = None
efermi = None
gap = None
doping_levels = []
with open(os.path.join(path_dir, "boltztrap.outputtrans"), 'r') \
as f:
for line in f:
if "WARNING" in line:
warning = line
elif "Calc type:" in line:
run_type = line.split()[-1]
elif line.startswith("VBM"):
efermi = Energy(line.split()[1], "Ry").to("eV")
elif line.startswith("Egap:"):
gap = Energy(float(line.split()[1]), "Ry").to("eV")
elif line.startswith("Doping level number"):
doping_levels.append(float(line.split()[6]))
return run_type, warning, efermi, gap, doping_levels
@staticmethod
def parse_transdos(path_dir, efermi, dos_spin=1, trim_dos=False):
"""
Parses .transdos (total DOS) and .transdos_x_y (partial DOS) files
Args:
path_dir: (str) dir containing DOS files
efermi: (float) Fermi energy
dos_spin: (int) -1 for spin down, +1 for spin up
trim_dos: (bool) whether to post-process / trim DOS
Returns:
tuple - (DOS, dict of partial DOS)
"""
data_dos = {'total': [], 'partial': {}}
# parse the total DOS data
# format is energy, DOS, integrated DOS
with open(os.path.join(path_dir, "boltztrap.transdos"), 'r') as f:
count_series = 0 # TODO: why is count_series needed?
for line in f:
if line.lstrip().startswith("#"):
count_series += 1
if count_series > 1:
break
else:
data_dos['total'].append(
[Energy(float(line.split()[0]), "Ry").to("eV"),
float(line.split()[1])])
lw_l = 0
hg_l = -len(data_dos['total'])
if trim_dos:
# Francesco knows what this does
# It has something to do with a trick of adding fake energies
# at the endpoints of the DOS, and then re-trimming it. This is
# to get the same energy scale for up and down spin DOS.
tmp_data = np.array(data_dos['total'])
tmp_den = np.trim_zeros(tmp_data[:, 1], 'f')[1:]
lw_l = len(tmp_data[:, 1]) - len(tmp_den)
tmp_ene = tmp_data[lw_l:, 0]
tmp_den = np.trim_zeros(tmp_den, 'b')[:-1]
hg_l = len(tmp_ene) - len(tmp_den)
tmp_ene = tmp_ene[:-hg_l]
tmp_data = np.vstack((tmp_ene, tmp_den)).T
data_dos['total'] = tmp_data.tolist()
# parse partial DOS data
for file_name in os.listdir(path_dir):
if file_name.endswith(
"transdos") and file_name != 'boltztrap.transdos':
tokens = file_name.split(".")[1].split("_")
site = tokens[1]
orb = '_'.join(tokens[2:])
with open(os.path.join(path_dir, file_name), 'r') as f:
for line in f:
if not line.lstrip().startswith(" #"):
if site not in data_dos['partial']:
data_dos['partial'][site] = {}
if orb not in data_dos['partial'][site]:
data_dos['partial'][site][orb] = []
data_dos['partial'][site][orb].append(
float(line.split()[1]))
data_dos['partial'][site][orb] = data_dos['partial'][site][
orb][lw_l:-hg_l]
dos_full = {'energy': [], 'density': []}
for t in data_dos['total']:
dos_full['energy'].append(t[0])
dos_full['density'].append(t[1])
dos = Dos(efermi, dos_full['energy'],
{Spin(dos_spin): dos_full['density']})
dos_partial = data_dos['partial'] # TODO: make this real DOS object?
return dos, dos_partial
@staticmethod
def parse_intrans(path_dir):
"""
Parses boltztrap.intrans mainly to extract the value of scissor applied
to the bands or some other inputs
Args:
path_dir: (str) dir containing the boltztrap.intrans file
Returns:
intrans (dict): a dictionary containing various inputs that had
been used in the Boltztrap run.
"""
intrans = {}
with open(os.path.join(path_dir, "boltztrap.intrans"), 'r') as f:
for line in f:
if "iskip" in line:
intrans["scissor"] = Energy(float(line.split(" ")[3]),
"Ry").to("eV")
if "HISTO" in line or "TETRA" in line:
intrans["dos_type"] = line[:-1]
return intrans
@staticmethod
def parse_struct(path_dir):
"""
Parses boltztrap.struct file (only the volume)
Args:
path_dir: (str) dir containing the boltztrap.struct file
Returns:
(float) volume
"""
with open(os.path.join(path_dir, "boltztrap.struct"), 'r') as f:
tokens = f.readlines()
return Lattice([[Length(float(tokens[i].split()[j]), "bohr").
to("ang") for j in range(3)] for i in
range(1, 4)]).volume
@staticmethod
def parse_cond_and_hall(path_dir, doping_levels=None):
"""
Parses the conductivity and Hall tensors
Args:
path_dir: Path containing .condtens / .halltens files
doping_levels: ([float]) - doping lvls, parse outtrans to get this
Returns:
mu_steps, cond, seebeck, kappa, hall, pn_doping_levels,
mu_doping, seebeck_doping, cond_doping, kappa_doping,
hall_doping, carrier_conc
"""
# Step 1: parse raw data but do not convert to final format
t_steps = set()
mu_steps = set()
data_full = []
data_hall = []
data_doping_full = []
data_doping_hall = []
doping_levels = doping_levels or []
# parse the full conductivity/Seebeck/kappa0/etc data
# also initialize t_steps and mu_steps
with open(os.path.join(path_dir, "boltztrap.condtens"), 'r') as f:
for line in f:
if not line.startswith("#"):
mu_steps.add(float(line.split()[0]))
t_steps.add(int(float(line.split()[1])))
data_full.append([float(c) for c in line.split()])
# parse the full Hall tensor
with open(os.path.join(path_dir, "boltztrap.halltens"), 'r') as f:
for line in f:
if not line.startswith("#"):
data_hall.append([float(c) for c in line.split()])
if len(doping_levels) != 0:
# parse doping levels version of full cond. tensor, etc.
with open(
os.path.join(path_dir, "boltztrap.condtens_fixdoping"),
'r') as f:
for line in f:
if not line.startswith("#") and len(line) > 2:
data_doping_full.append([float(c)
for c in line.split()])
# parse doping levels version of full hall tensor
with open(
os.path.join(path_dir, "boltztrap.halltens_fixdoping"),
'r') as f:
for line in f:
if not line.startswith("#") and len(line) > 2:
data_doping_hall.append(
[float(c) for c in line.split()])
# Step 2: convert raw data to final format
# sort t and mu_steps (b/c they are sets not lists)
# and convert to correct energy
t_steps = sorted([t for t in t_steps])
mu_steps = sorted([Energy(m, "Ry").to("eV") for m in mu_steps])
# initialize output variables - could use defaultdict instead
# I am leaving things like this for clarity
cond = {t: [] for t in t_steps}
seebeck = {t: [] for t in t_steps}
kappa = {t: [] for t in t_steps}
hall = {t: [] for t in t_steps}
carrier_conc = {t: [] for t in t_steps}
mu_doping = {'p': {t: [] for t in t_steps},
'n': {t: [] for t in t_steps}}
seebeck_doping = {'p': {t: [] for t in t_steps},
'n': {t: [] for t in t_steps}}
cond_doping = {'p': {t: [] for t in t_steps},
'n': {t: [] for t in t_steps}}
kappa_doping = {'p': {t: [] for t in t_steps},
'n': {t: [] for t in t_steps}}
hall_doping = {'p': {t: [] for t in t_steps},
'n': {t: [] for t in t_steps}}
# process doping levels
pn_doping_levels = {'p': [], 'n': []}
for d in doping_levels:
if d > 0:
pn_doping_levels['p'].append(d)
else:
pn_doping_levels['n'].append(-d)
# process raw conductivity data, etc.
for d in data_full:
temp, doping = d[1], d[2]
carrier_conc[temp].append(doping)
cond[temp].append(np.reshape(d[3:12], (3, 3)).tolist())
seebeck[temp].append(np.reshape(d[12:21], (3, 3)).tolist())
kappa[temp].append(np.reshape(d[21:30], (3, 3)).tolist())
# process raw Hall data
for d in data_hall:
temp, doping = d[1], d[2]
hall_tens = [np.reshape(d[3:12], (3, 3)).tolist(),
np.reshape(d[12:21], (3, 3)).tolist(),
np.reshape(d[21:30], (3, 3)).tolist()]
hall[temp].append(hall_tens)
# process doping conductivity data, etc.
for d in data_doping_full:
temp, doping, mu = d[0], d[1], d[-1]
pn = 'p' if doping > 0 else 'n'
mu_doping[pn][temp].append(Energy(mu, "Ry").to("eV"))
cond_doping[pn][temp].append(
np.reshape(d[2:11], (3, 3)).tolist())
seebeck_doping[pn][temp].append(
np.reshape(d[11:20], (3, 3)).tolist())
kappa_doping[pn][temp].append(
np.reshape(d[20:29], (3, 3)).tolist())
# process doping Hall data
for d in data_doping_hall:
temp, doping, mu = d[0], d[1], d[-1]
pn = 'p' if doping > 0 else 'n'
hall_tens = [np.reshape(d[2:11], (3, 3)).tolist(),
np.reshape(d[11:20], (3, 3)).tolist(),
np.reshape(d[20:29], (3, 3)).tolist()]
hall_doping[pn][temp].append(hall_tens)
return (mu_steps, cond, seebeck, kappa, hall, pn_doping_levels,
mu_doping, seebeck_doping, cond_doping, kappa_doping,
hall_doping, carrier_conc)
@staticmethod
def from_files(path_dir, dos_spin=1):
"""
get a BoltztrapAnalyzer object from a set of files
Args:
path_dir: directory where the boltztrap files are
dos_spin: in DOS mode, set to 1 for spin up and -1 for spin down
Returns:
a BoltztrapAnalyzer object
"""
run_type, warning, efermi, gap, doping_levels = \
BoltztrapAnalyzer.parse_outputtrans(path_dir)
vol = BoltztrapAnalyzer.parse_struct(path_dir)
intrans = BoltztrapAnalyzer.parse_intrans(path_dir)
if run_type == "BOLTZ":
dos, pdos = BoltztrapAnalyzer.parse_transdos(
path_dir, efermi, dos_spin=dos_spin, trim_dos=False)
(mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, mu_doping,
seebeck_doping, cond_doping, kappa_doping, hall_doping,
carrier_conc) = BoltztrapAnalyzer.parse_cond_and_hall(path_dir, doping_levels)
return BoltztrapAnalyzer(
gap, mu_steps, cond, seebeck, kappa, hall, pn_doping_levels,
mu_doping, seebeck_doping, cond_doping, kappa_doping,
hall_doping, intrans, dos, pdos, carrier_conc, vol, warning)
elif run_type == "DOS":
trim = True if intrans["dos_type"] == "HISTO" else False
dos, pdos = BoltztrapAnalyzer.parse_transdos(
path_dir, efermi, dos_spin=dos_spin, trim_dos=trim)
return BoltztrapAnalyzer(gap=gap, dos=dos, dos_partial=pdos,
warning=warning, vol=vol)
elif run_type == "BANDS":
bz_kpoints = np.loadtxt(
os.path.join(path_dir, "boltztrap_band.dat"))[:, -3:]
bz_bands = np.loadtxt(
os.path.join(path_dir, "boltztrap_band.dat"))[:, 1:-6]
return BoltztrapAnalyzer(bz_bands=bz_bands, bz_kpoints=bz_kpoints,
warning=warning, vol=vol)
elif run_type == "FERMI":
"""
"""
if os.path.exists(os.path.join(path_dir, 'boltztrap_BZ.cube')):
fs_data = read_cube_file(
os.path.join(path_dir, 'boltztrap_BZ.cube'))
elif os.path.exists(os.path.join(path_dir, 'fort.30')):
fs_data = read_cube_file(os.path.join(path_dir, 'fort.30'))
else:
raise BoltztrapError("No data file found for fermi surface")
return BoltztrapAnalyzer(fermi_surface_data=fs_data)
else:
raise ValueError("Run type: {} not recognized!".format(run_type))
def as_dict(self):
"""
:return: MSONable dict.
"""
results = {'gap': self.gap,
'mu_steps': self.mu_steps,
'intrans': self.intrans,
'cond': self._cond,
'seebeck': self._seebeck,
'kappa': self._kappa,
'hall': self._hall,
'doping': self.doping,
'mu_doping': self.mu_doping,
'seebeck_doping': self._seebeck_doping,
'cond_doping': self._cond_doping,
'kappa_doping': self._kappa_doping,
'hall_doping': self._hall_doping,
'dos': self.dos.as_dict(),
'dos_partial': self._dos_partial,
'carrier_conc': self._carrier_conc,
'vol': self.vol,
'warning': self.warning}
return jsanitize(results)
@staticmethod
def from_dict(data):
"""
:param data: Dict representation.
:return: BoltztrapAnalyzer
"""
def _make_float_array(a):
res = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
for i in range(3):
for j in range(3):
res[i][j] = float(a[i][j])
return res
def _make_float_hall(a):
return [i for i in a[:27]]
gap = data.get('gap')
mu_steps = [float(d) for d in data['mu_steps']] if \
'mu_steps' in data else None
cond = {int(d): [_make_float_array(v) for v in data['cond'][d]]
for d in data['cond']} if 'cond' in data else None
seebeck = {int(d): [_make_float_array(v) for v in data['seebeck'][d]]
for d in data['seebeck']} if 'seebeck' in data else None
kappa = {int(d): [_make_float_array(v) for v in data['kappa'][d]]
for d in data['kappa']} if 'kappa' in data else None
hall = {int(d): [_make_float_hall(v) for v in data['hall'][d]]
for d in data['hall']} if 'hall' in data else None
doping = {'p': [float(d) for d in data['doping']['p']],
'n': [float(d) for d in data['doping']['n']]} if \
'doping' in data else None
mu_doping = {'p': {int(d): [
float(v) for v in data['mu_doping']['p'][d]] for d in
data['mu_doping']['p']}, 'n':
{int(d): [float(v) for v in data['mu_doping']['n'][d]]
for d in data['mu_doping'][
'n']}} if 'mu_doping' in data else None
seebeck_doping = {'p': {int(d): [
_make_float_array(v) for v in data['seebeck_doping']['p'][d]]
for d in data['seebeck_doping']['p']}, 'n':
{int(d): [_make_float_array(v) for v in
data['seebeck_doping']['n'][d]] for d in
data['seebeck_doping'][
'n']}} if 'seebeck_doping' in data \
else None
cond_doping = {'p': {int(d): [_make_float_array(v) for v in data['cond_doping']['p'][d]]
for d in data['cond_doping']['p']},
'n': {int(d): [_make_float_array(v) for v in data['cond_doping']['n'][d]]
for d in data['cond_doping']['n']}} if 'cond_doping' in data else None
kappa_doping = {'p': {int(d): [_make_float_array(v) for v in data['kappa_doping']['p'][d]]
for d in data['kappa_doping']['p']},
'n': {int(d): [_make_float_array(v) for v in data['kappa_doping']['n'][d]]
for d in data['kappa_doping']['n']}} if 'kappa_doping' in data else None
hall_doping = {'p': {int(d): [_make_float_hall(v) for v in data['hall_doping']['p'][d]]
for d in data['hall_doping']['p']},
'n': {int(d): [_make_float_hall(v) for v in data['hall_doping']['n'][d]]
for d in data['hall_doping']['n']}} if "hall_doping" in data else None
dos = Dos.from_dict(data['dos']) if 'dos' in data else None
dos_partial = data.get('dos_partial')
carrier_conc = data.get('carrier_conc')
vol = data.get('vol')
warning = data.get('warning')
return BoltztrapAnalyzer(gap=gap,
mu_steps=mu_steps,
cond=cond,
seebeck=seebeck,
kappa=kappa,
hall=hall,
doping=doping,
mu_doping=mu_doping,
seebeck_doping=seebeck_doping,
cond_doping=cond_doping,
kappa_doping=kappa_doping,
hall_doping=hall_doping,
dos=dos,
dos_partial=dos_partial,
carrier_conc=carrier_conc,
vol=vol,
warning=warning)
def read_cube_file(filename):
"""
:param filename: Cube filename
:return: Energy data.
"""
with open(filename, 'rt') as f:
natoms = 0
count_line = 0
for line in f:
line = line.rstrip("\n")
if count_line == 0 and "CUBE" not in line:
raise ValueError("CUBE file format not recognized")
if count_line == 2:
tokens = line.split()
natoms = int(tokens[0])
if count_line == 3:
tokens = line.split()
n1 = int(tokens[0])
elif count_line == 4:
tokens = line.split()
n2 = int(tokens[0])
elif count_line == 5:
tokens = line.split()
n3 = int(tokens[0])
elif count_line > 5:
break
count_line += 1
if 'fort.30' in filename:
energy_data = np.genfromtxt(filename, skip_header=natoms + 6, skip_footer=1)
nlines_data = len(energy_data)
last_line = np.genfromtxt(filename, skip_header=nlines_data + natoms + 6)
energy_data = np.append(energy_data.flatten(), last_line).reshape(n1, n2, n3)
elif 'boltztrap_BZ.cube' in filename:
energy_data = np.loadtxt(filename, skiprows=natoms + 6).reshape(n1, n2, n3)
energy_data /= Energy(1, "eV").to("Ry")
return energy_data
def compare_sym_bands(bands_obj, bands_ref_obj, nb=None):
"""
Compute the mean of correlation between bzt and vasp bandstructure on
sym line, for all bands and locally (for each branches) the difference
squared (%) if nb is specified.
"""
if bands_ref_obj.is_spin_polarized:
nbands = min(bands_obj.nb_bands, 2 * bands_ref_obj.nb_bands)
else:
# TODO: why is this needed? Shouldn't pmg take care of nb_bands?
nbands = min(len(bands_obj.bands[Spin.up]),
len(bands_ref_obj.bands[Spin.up]))
# print(nbands)
arr_bands = np.array(bands_obj.bands[Spin.up][:nbands])
# arr_bands_lavg = (arr_bands-np.mean(arr_bands,axis=1).reshape(nbands,1))
if bands_ref_obj.is_spin_polarized:
arr_bands_ref_up = np.array(bands_ref_obj.bands[Spin.up])
arr_bands_ref_dw = np.array(bands_ref_obj.bands[Spin.down])
# print(arr_bands_ref_up.shape)
arr_bands_ref = np.vstack((arr_bands_ref_up, arr_bands_ref_dw))
arr_bands_ref = np.sort(arr_bands_ref, axis=0)[:nbands]
# print(arr_bands_ref.shape)
else:
arr_bands_ref = np.array(bands_ref_obj.bands[Spin.up][:nbands])
# arr_bands_ref_lavg =
# (arr_bands_ref-np.mean(arr_bands_ref,axis=1).reshape(nbands,1))
# err = np.sum((arr_bands_lavg-arr_bands_ref_lavg)**2,axis=1)/nkpt
corr = np.array(
[distance.correlation(arr_bands[idx], arr_bands_ref[idx]) for idx in
range(nbands)])
if type(nb) == int:
nb = [nb]
bcheck = {}
if max(nb) < nbands:
branches = [[s['start_index'], s['end_index'], s['name']] for s in
bands_ref_obj.branches]
if not bands_obj.is_metal() and not bands_ref_obj.is_metal():
zero_ref = bands_ref_obj.get_vbm()['energy']
zero = bands_obj.get_vbm()['energy']
if not zero:
vbm = bands_ref_obj.get_vbm()['band_index'][Spin.up][-1]
zero = max(arr_bands[vbm])
else:
zero_ref = 0 # bands_ref_obj.efermi
zero = 0 # bands_obj.efermi
print(zero, zero_ref)
for nbi in nb:
bcheck[nbi] = {}
bcheck[nbi]['Dist'] = np.mean(abs(arr_bands[nbi] - zero
- arr_bands_ref[nbi] + zero_ref))
bcheck[nbi]['Corr'] = corr[nbi]
for start, end, name in branches:
# werr.append((sum((arr_bands_corr[nb][start:end+1] -
# arr_bands_ref_corr[nb][start:end+1])**2)/(end+1-start)*100,name))
bcheck[nbi][name] = np.mean(abs(arr_bands[nbi][start:end + 1]
- zero
- arr_bands_ref[nbi][
start:end + 1] + zero_ref))
else:
bcheck = "No nb given"
return bcheck
def seebeck_spb(eta, Lambda=0.5):
"""
Seebeck analytic formula in the single parabolic model
"""
from fdint import fdk
return constants.k / constants.e * ((2. + Lambda) * fdk(1. + Lambda, eta) /
((1. + Lambda) * fdk(Lambda, eta)) - eta) * 1e+6
def eta_from_seebeck(seeb, Lambda):
"""
It takes a value of seebeck and adjusts the analytic seebeck until it's equal
Returns: eta where the two seebeck coefficients are equal
(reduced chemical potential)
"""
from scipy.optimize import fsolve
out = fsolve(lambda x: (seebeck_spb(x, Lambda) - abs(seeb)) ** 2, 1., full_output=True)
return out[0][0]
def seebeck_eff_mass_from_carr(eta, n, T, Lambda):
"""
Calculate seebeck effective mass at a certain carrier concentration
eta in kB*T units, n in cm-3, T in K, returns mass in m0 units
"""
try:
from fdint import fdk
except ImportError:
raise BoltztrapError("fdint module not found. Please, install it.\n" +
"It is needed to calculate Fermi integral quickly.")
return (2 * np.pi ** 2 * abs(n) * 10 ** 6 / (fdk(0.5, eta))) ** (2. / 3) / \
(2 * constants.m_e * constants.k * T / (constants.h / 2 / np.pi) ** 2)
def seebeck_eff_mass_from_seebeck_carr(seeb, n, T, Lambda):
"""
Find the chemical potential where analytic and calculated seebeck are identical
and then calculate the seebeck effective mass at that chemical potential and
a certain carrier concentration n
"""
eta = eta_from_seebeck(seeb, Lambda)
mass = seebeck_eff_mass_from_carr(eta, n, T, Lambda)
return mass
|
mbkumar/pymatgen
|
pymatgen/electronic_structure/boltztrap.py
|
Python
|
mit
| 107,107
|
[
"BoltzTrap",
"VASP",
"pymatgen"
] |
e7ebf258456451eb3477d411fecd146d584999214bc84548448476cbf2cf7f39
|
#
# OldFSSource.py - Old-style basic file-system data source
# Copyright (C) 2004 - 2009 Tony Garnock-Jones <tonyg@kcbbs.gen.nz>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import Gyre
import os
import string
import rfc822
class OldFSSource:
def __init__(self, contentdir):
self.contentdir = contentdir
def _visit_story(self, dirname, name):
filepath = os.path.join(dirname, name + '.' + Gyre.config.file_extension)
try:
s = os.stat(filepath)
except OSError:
return
story = Gyre.Entity(Gyre.config.protostory)
story.mtime = s.st_mtime
f = open(filepath)
headers = rfc822.Message(f)
for (key, val) in headers.items(): setattr(story, key.lower(), val.decode('utf-8'))
body = f.read().decode('utf-8')
f.close()
story.mtime = int(story.mtime)
categorystr = dirname[len(self.contentdir) + 1:]
if categorystr:
story.category = string.split(categorystr, '/')
else:
story.category = []
story.body = body
uid = list(story.category)
uid.append(name)
story.id = string.join(uid, '/')
story.source = self
Gyre.config.store.update(story)
def updateStore(self):
def visit(arg, dirname, names):
for name in names:
if name.endswith('.' + Gyre.config.file_extension):
choplen = len(Gyre.config.file_extension) + 1
self._visit_story(dirname, name[:-choplen])
os.path.walk(self.contentdir, visit, None)
|
tonyg/gyre
|
OldFSSource.py
|
Python
|
gpl-2.0
| 2,265
|
[
"VisIt"
] |
1a0eb178bb508857e1f11c2db6e2675d0a64bb398d2b7a15897cefbe3929256f
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
from oslo_policy import policy as oslo_policy
from oslo_serialization import jsonutils
from nova.api.openstack.placement import policy as placement_policy
import nova.conf
from nova.conf import paths
from nova import policies
import nova.policy
from nova.tests.unit import fake_policy
CONF = nova.conf.CONF
class RealPolicyFixture(fixtures.Fixture):
"""Load the live policy for tests.
A base policy fixture that starts with the assumption that you'd
like to load and enforce the shipped default policy in tests.
Provides interfaces to tinker with both the contents and location
of the policy file before loading to allow overrides. To do this
implement ``_prepare_policy`` in the subclass, and adjust the
``policy_file`` accordingly.
"""
def _prepare_policy(self):
"""Allow changing of the policy before we get started"""
pass
def setUp(self):
super(RealPolicyFixture, self).setUp()
# policy_file can be overridden by subclasses
self.policy_file = paths.state_path_def('etc/nova/policy.json')
self._prepare_policy()
CONF.set_override('policy_file', self.policy_file, group='oslo_policy')
nova.policy.reset()
nova.policy.init()
self.addCleanup(nova.policy.reset)
def set_rules(self, rules, overwrite=True):
policy = nova.policy._ENFORCER
policy.set_rules(oslo_policy.Rules.from_dict(rules),
overwrite=overwrite)
def add_missing_default_rules(self, rules):
"""Adds default rules and their values to the given rules dict.
The given rulen dict may have an incomplete set of policy rules.
This method will add the default policy rules and their values to
the dict. It will not override the existing rules.
"""
for rule in policies.list_rules():
# NOTE(lbragstad): Only write the rule if it isn't already in the
# rule set and if it isn't deprecated. Otherwise we're just going
# to spam test runs with deprecate policy warnings.
if rule.name not in rules and not rule.deprecated_for_removal:
rules[rule.name] = rule.check_str
class PolicyFixture(RealPolicyFixture):
"""Load a fake policy from nova.tests.unit.fake_policy
This overrides the policy with a completely fake and synthetic
policy file.
NOTE(sdague): the use of this is deprecated, and we should unwind
the tests so that they can function with the real policy. This is
mostly legacy because our default test instances and default test
contexts don't match up. It appears that in many cases fake_policy
was just modified to whatever makes tests pass, which makes it
dangerous to be used in tree. Long term a NullPolicy fixture might
be better in those cases.
"""
def _prepare_policy(self):
self.policy_dir = self.useFixture(fixtures.TempDir())
self.policy_file = os.path.join(self.policy_dir.path,
'policy.json')
# load the fake_policy data and add the missing default rules.
policy_rules = jsonutils.loads(fake_policy.policy_data)
self.add_missing_default_rules(policy_rules)
with open(self.policy_file, 'w') as f:
jsonutils.dump(policy_rules, f)
CONF.set_override('policy_dirs', [], group='oslo_policy')
class RoleBasedPolicyFixture(RealPolicyFixture):
"""Load a modified policy which allows all actions only by a single role.
This fixture can be used for testing role based permissions as it
provides a version of the policy which stomps over all previous
declaration and makes every action only available to a single
role.
"""
def __init__(self, role="admin", *args, **kwargs):
super(RoleBasedPolicyFixture, self).__init__(*args, **kwargs)
self.role = role
def _prepare_policy(self):
# Convert all actions to require the specified role
policy = {}
for rule in policies.list_rules():
policy[rule.name] = 'role:%s' % self.role
self.policy_dir = self.useFixture(fixtures.TempDir())
self.policy_file = os.path.join(self.policy_dir.path, 'policy.json')
with open(self.policy_file, 'w') as f:
jsonutils.dump(policy, f)
class PlacementPolicyFixture(fixtures.Fixture):
"""Load the default placement policy for tests.
This fixture requires nova.tests.unit.conf_fixture.ConfFixture.
"""
def setUp(self):
super(PlacementPolicyFixture, self).setUp()
policy_file = paths.state_path_def('etc/nova/placement-policy.yaml')
CONF.set_override('policy_file', policy_file, group='placement')
placement_policy.reset()
placement_policy.init()
self.addCleanup(placement_policy.reset)
@staticmethod
def set_rules(rules, overwrite=True):
"""Set placement policy rules.
.. note:: The rules must first be registered via the
Enforcer.register_defaults method.
:param rules: dict of action=rule mappings to set
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
"""
enforcer = placement_policy.get_enforcer()
enforcer.set_rules(oslo_policy.Rules.from_dict(rules),
overwrite=overwrite)
|
mikalstill/nova
|
nova/tests/unit/policy_fixture.py
|
Python
|
apache-2.0
| 6,072
|
[
"TINKER"
] |
7913f1d2340bc9fac71e8dc7326cbf4386899b2eb973dce74252945bd625a96d
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides templates which allow variable sharing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import traceback
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
__all__ = ["make_template"]
def make_template(name_, func_, create_scope_now_=False, unique_name_=None,
**kwargs):
"""Given an arbitrary function, wrap it so that it does variable sharing.
This wraps `func_` in a Template and partially evaluates it. Templates are
functions that create variables the first time they are called and reuse them
thereafter. In order for `func_` to be compatible with a `Template` it must
have the following properties:
* The function should create all trainable variables and any variables that
should be reused by calling `tf.get_variable`. If a trainable variable is
created using `tf.Variable`, then a ValueError will be thrown. Variables
that are intended to be locals can be created by specifying
`tf.Variable(..., trainable=false)`.
* The function may use variable scopes and other templates internally to
create and reuse variables, but it shouldn't use `tf.all_variables` to
capture variables that are defined outside of the scope of the function.
* Internal scopes and variable names should not depend on any arguments that
are not supplied to `make_template`. In general you will get a ValueError
telling you that you are trying to reuse a variable that doesn't exist
if you make a mistake.
In the following example, both `z` and `w` will be scaled by the same `y`. It
is important to note that if we didn't assign `scalar_name` and used a
different name for z and w that a `ValueError` would be thrown because it
couldn't reuse the variable.
```python
def my_op(x, scalar_name):
var1 = tf.get_variable(scalar_name,
shape=[],
initializer=tf.constant_initializer(1))
return x * var1
scale_by_y = tf.make_template('scale_by_y', my_op, scalar_name='y')
z = scale_by_y(input1)
w = scale_by_y(input2)
```
As a safe-guard, the returned function will raise a `ValueError` after the
first call if trainable variables are created by calling `tf.Variable`.
If all of these are true, then 2 properties are enforced by the template:
1. Calling the same template multiple times will share all non-local
variables.
2. Two different templates are guaranteed to be unique, unless you reenter the
same variable scope as the initial definition of a template and redefine
it. An examples of this exception:
```python
def my_op(x, scalar_name):
var1 = tf.get_variable(scalar_name,
shape=[],
initializer=tf.constant_initializer(1))
return x * var1
with tf.variable_scope('scope') as vs:
scale_by_y = tf.make_template('scale_by_y', my_op, scalar_name='y')
z = scale_by_y(input1)
w = scale_by_y(input2)
# Creates a template that reuses the variables above.
with tf.variable_scope(vs, reuse=True):
scale_by_y2 = tf.make_template('scale_by_y', my_op, scalar_name='y')
z2 = scale_by_y2(input1)
w2 = scale_by_y2(input2)
```
Depending on the value of `create_scope_now_`, the full variable scope may be
captured either at the time of first call or at the time of construction. If
this option is set to True, then all Tensors created by repeated calls to the
template will have an extra trailing _N+1 to their name, as the first time the
scope is entered in the Template constructor no Tensors are created.
Note: `name_`, `func_` and `create_scope_now_` have a trailing underscore to
reduce the likelihood of collisions with kwargs.
Args:
name_: A name for the scope created by this template. If necessary, the name
will be made unique by appending `_N` to the name.
func_: The function to wrap.
create_scope_now_: Boolean controlling whether the scope should be created
when the template is constructed or when the template is called. Default
is False, meaning the scope is created when the template is called.
unique_name_: When used, it overrides name_ and is not made unique. If a
template of the same scope/unique_name already exists and reuse is false,
an error is raised. Defaults to None.
**kwargs: Keyword arguments to apply to `func_`.
Returns:
A function to encapsulate a set of variables which should be created once
and reused. An enclosing scope will created, either where `make_template`
is called, or wherever the result is called, depending on the value of
`create_scope_now_`. Regardless of the value, the first time the template
is called it will enter the scope with no reuse, and call `func_` to create
variables, which are guaranteed to be unique. All subsequent calls will
re-enter the scope and reuse those variables.
Raises:
ValueError: if the name is None.
"""
if kwargs:
func_ = functools.partial(func_, **kwargs)
return Template(
name_, func_, create_scope_now=create_scope_now_,
unique_name=unique_name_)
def _skip_common_stack_elements(stacktrace, base_case):
"""Skips items that the target stacktrace shares with the base stacktrace."""
for i, (trace, base) in enumerate(zip(stacktrace, base_case)):
if trace != base:
return stacktrace[i:]
return stacktrace[-1:]
class Template(object):
"""Wrap a function to aid in variable sharing.
Templates are functions that create variables the first time they are called
and reuse them thereafter. See `make_template` for full documentation.
Note: By default, the full variable scope is captured at the time of first
call. If `create_scope_now_` is passed as True to the constructor, the full
scope will be captured there, but no variables will created until the first
call.
"""
def __init__(self, name, func, create_scope_now=False, unique_name=None):
"""Creates a template for the given function.
Args:
name: A name for the scope created by this template. The
name will be made unique by appending `_N` to the it (see how
`tf.variable_scope` treats the `default_name` for details).
func: The function to apply each time.
create_scope_now: Whether to create the scope at Template construction
time, rather than first call. Defaults to false. Creating the scope at
construction time may be more convenient if the template is to passed
through much lower level code, and you want to be sure of the scope
name without knowing exactly where it will be first called. If set to
True, the scope will be created in the constructor, and all subsequent
times in __call__, leading to a trailing numeral being added to the
names of all created Tensors. If set to False, the scope will be created
at the first call location.
unique_name: When used, it overrides name_ and is not made unique. If a
template of the same scope/unique_name already exists and reuse is
false, an error is raised. Defaults to None.
Raises:
ValueError: if the name is None.
"""
self._func = func
self._stacktrace = traceback.format_stack()[:-2]
self._name = name
self._unique_name = unique_name
if name is None:
raise ValueError("name cannot be None.")
if create_scope_now:
with variable_scope.variable_scope(
self._unique_name, self._name) as vs:
self._var_scope = vs
else:
self._var_scope = None
# This variable keeps track of whether the template has been called yet,
# which is not the same as whether the scope has been created.
self._variables_created = False
def _call_func(self, args, kwargs, check_for_new_variables):
try:
vars_at_start = len(ops.get_collection(ops.GraphKeys.VARIABLES))
trainable_at_start = len(
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
result = self._func(*args, **kwargs)
if check_for_new_variables:
trainable_variables = ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
# If a variable that we intend to train is created as a side effect
# of creating a template, then that is almost certainly an error.
if trainable_at_start != len(trainable_variables):
raise ValueError("Trainable variable created when calling a template "
"after the first time, perhaps you used tf.Variable "
"when you meant tf.get_variable: %s" %
(trainable_variables[trainable_at_start:],))
# Non-trainable tracking variables are a legitimate reason why a new
# variable would be created, but it is a relatively advanced use-case,
# so log it.
variables = ops.get_collection(ops.GraphKeys.VARIABLES)
if vars_at_start != len(variables):
logging.info("New variables created when calling a template after "
"the first time, perhaps you used tf.Variable when you "
"meant tf.get_variable: %s",
variables[vars_at_start:])
return result
except Exception as exc:
# Reraise the exception, but append the original definition to the
# trace.
args = exc.args
if not args:
arg0 = ""
else:
arg0 = args[0]
trace = "".join(_skip_common_stack_elements(self._stacktrace,
traceback.format_stack()))
arg0 = "%s\n\noriginally defined at:\n%s" % (arg0, trace)
new_args = [arg0]
new_args.extend(args[1:])
exc.args = tuple(new_args)
raise
def __call__(self, *args, **kwargs):
if self._var_scope:
if self._variables_created:
# This is not the first visit to __call__, so variables have already
# been created, and we want to reuse them.
with variable_scope.variable_scope(self._var_scope, reuse=True):
return self._call_func(args, kwargs, check_for_new_variables=True)
else:
# This is the first visit to __call__, but the scope has already been
# created in the constructor. Set _variables_created so that subsequent
# calls take the if branch above.
self._variables_created = True
with variable_scope.variable_scope(self._var_scope):
return self._call_func(args, kwargs, check_for_new_variables=False)
else:
# The scope was not created at construction time, so create it here.
# Subsequent calls should reuse variables.
self._variables_created = True
with variable_scope.variable_scope(
self._unique_name, self._name) as vs:
self._var_scope = vs
return self._call_func(args, kwargs, check_for_new_variables=False)
@property
def var_scope(self):
"""Returns the variable scope object created by this Template."""
return self._var_scope
|
tongwang01/tensorflow
|
tensorflow/python/ops/template.py
|
Python
|
apache-2.0
| 11,931
|
[
"VisIt"
] |
89511314e7180a82fcfe1aeafd7eea8ccd9a9eccdd9a1994b25caf5bdc573c84
|
"""
StdoutBackend wrapper
"""
__RCSID__ = "$Id$"
import logging
import sys
from DIRAC.Resources.LogBackends.AbstractBackend import AbstractBackend
from DIRAC.FrameworkSystem.private.standardLogging.Formatter.ColoredBaseFormatter import ColoredBaseFormatter
class StdoutBackend(AbstractBackend):
"""
StdoutBackend is used to create an abstraction of the handler and the formatter concepts from logging.
Here, we gather a StreamHandler object and a BaseFormatter.
- StreamHandler is from the standard logging library: it is used to write log messages in a desired stream
so it needs a name: here it is stdout.
- ColorBaseFormatter is a custom Formatter object, created for DIRAC in order to get the appropriate display
with color.
You can find it in FrameworkSystem/private/standardLogging/Formatter
"""
def __init__(self):
super(StdoutBackend, self).__init__(None, ColoredBaseFormatter)
def createHandler(self, parameters=None):
"""
Each backend can initialize its attributes and create its handler with them.
:params parameters: dictionary of parameters. ex: {'FileName': file.log}
"""
self._handler = logging.StreamHandler(sys.stdout)
|
Andrew-McNab-UK/DIRAC
|
Resources/LogBackends/StdoutBackend.py
|
Python
|
gpl-3.0
| 1,201
|
[
"DIRAC"
] |
2231b84a04a655d4bfe94ac15f0c335318bb7aeb5a34b7d6a53a5f181e0ea95d
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SurveyQuestionResponse.visit'
db.add_column(u'survey_surveyquestionresponse', 'visit',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['clinics.Visit'], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'SurveyQuestionResponse.visit'
db.delete_column(u'survey_surveyquestionresponse', 'visit_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'clinics.clinic': {
'Meta': {'object_name': 'Clinic'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'code': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_renovated': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'lga_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'pbf_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'ward': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'year_opened': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
u'clinics.clinicstaff': {
'Meta': {'object_name': 'ClinicStaff'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'staff_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'year_started': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
u'clinics.patient': {
'Meta': {'object_name': 'Patient'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']", 'null': 'True', 'blank': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'serial': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'clinics.service': {
'Meta': {'object_name': 'Service'},
'code': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'clinics.visit': {
'Meta': {'object_name': 'Visit'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Patient']"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Service']", 'null': 'True', 'blank': 'True'}),
'staff': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.ClinicStaff']", 'null': 'True', 'blank': 'True'}),
'survey_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'visit_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'rapidsms.contact': {
'Meta': {'object_name': 'Contact'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'statistics.statistic': {
'Meta': {'object_name': 'Statistic'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['statistics.StatisticGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'statistic_type': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
u'statistics.statisticgroup': {
'Meta': {'object_name': 'StatisticGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'survey.survey': {
'Meta': {'object_name': 'Survey'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'flow_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'survey.surveyquestion': {
'Meta': {'ordering': "['order', 'id']", 'unique_together': "[('survey', 'label')]", 'object_name': 'SurveyQuestion'},
'categories': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'designation': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '8'}),
'for_display': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'question_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'question_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'statistic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['statistics.Statistic']", 'null': 'True', 'blank': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"})
},
u'survey.surveyquestionresponse': {
'Meta': {'unique_together': "[('phone', 'question')]", 'object_name': 'SurveyQuestionResponse'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.SurveyQuestion']"}),
'response': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Service']", 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Visit']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['survey']
|
myvoice-nigeria/myvoice
|
myvoice/survey/migrations/0003_auto__add_field_surveyquestionresponse_visit.py
|
Python
|
bsd-2-clause
| 13,605
|
[
"VisIt"
] |
23b494e75c0fcff7fe193a57db6bdbdce98c0434f5ef6828e43087124bf84d4c
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import re
import rapidsms
from rapidsms.parsers import Matcher
from persistance.models import *
from models import *
from locations.models import *
from tags.models import *
from people.models import *
from rwanda.models import *
from rwanda.utils import *
class App(rapidsms.App):
def parse(self, msg):
msg.text = msg.text.replace(".", " ")
def handle(self, msg):
if msg.text.strip() == "":
msg.error("Your message was empty. You must enter some text.")
return True
def catch(self, msg):
if not msg.responses:
msg.error("Sorry, we could not understand that message.")
return True
class Appx(object):
MSG = {
"en": {
"bad-alias": "Sorry, I don't know anyone by that name.",
"first-login": "Nice to meet you, %(name)s. Your alias is %(alias)s.",
"login": "Hello, %(name)s. It has been %(days)d days since I last heard from you.",
"reminder": "I think you are %(name)s.",
"dont-know": "Please register your phone with RapidSMS.",
"list": "I have %(num)d %(noun)s: %(items)s",
"empty-list": "I don't have any %(noun)s.",
"lang-set": "I will now speak to you in English, where possible.",
"denied": "Sorry, you must identify yourself before you can do that.",
"disabled": "Sorry, but that functionality is disabled." },
# worst german translations _ever_
# just an example. all of this stuff
# should be moved to an i18n app!
"de": {
"bad-alias": "Tut mir leit, ich weiss nicht diesen Namen",
"first-login": "%(name)s hallo! Ich habe nicht gesehen, bevor Sie",
"login": "%(name)s hallo! Ich habe nicht gesehen, Sie sich fur %(days)d Tag",
"reminder": "Sie sind %(name)s.",
"lang-set": "Sie sind Deutsche." }}
HELP = [
("identify", "To identify yourself to RapidSMS, reply: IDENTIFY <alias>")
]
datesep = r"(\.|\/|\\|\-)"
date = r"\d\d?"
month = r"\d\d?"
year = r"\d{2}(\d{2})?"
datepattern = r"^\d\d?[\.|\/|\\|\-]\d\d?[\.|\/|\\|\-]\d{2}(\d{2})?$"
def __str(self, key, reporter=None, lang=None):
# if no language was explicitly requested,
# inherit it from the reporter, or fall
# back to english. because everyone in the
# world speaks english... right?
if lang is None:
if reporter is not None:
lang = reporter.language
# fall back
if lang is None:
lang = "en"
# look for an exact match, in the language
# that the reporter has chosen as preferred
if lang is not None:
if lang in self.MSG:
if key in self.MSG[lang]:
return self.MSG[lang][key]
# not found in localized language. try again in english
# TODO: allow the default to be set in rapidsms.ini
return self.__str(key, lang="en") if lang != "en" else None
def __deny(self, msg):
"""Responds to an incoming message with a localizable
error message to instruct the caller to identify."""
return msg.respond(self.__str("denied", msg.reporter))
# def configure(self, allow_join, allow_list, **kwargs):
# self.allow_join = allow_join
# self.allow_list = allow_list
def handle(self, msg):
matcher = Matcher(msg)
# TODO: this is sort of a lightweight implementation
# of the keyworder. it wasn't supposed to be. maybe
# replace it *with* the keyworder, or extract it
# into a parser of its own
map = {
"registerChild": ["(?:born) (whatever)"],
"registerMother": ["(?:preg) (whatever)"],
"reporterChild": ["(?:crep|mrep) (whatever)"],
"reporterMother": ["(?:crep|mrep) (whatever)"]
}
self.info("Entered mother")
#the user is unidentified Dont add pregnancy of births.
# search the map for a match, dispatch
# the message to it, and return/stop
for method, patterns in map.items():
if matcher(*patterns) and hasattr(self, method):
getattr(self, method)(msg, *matcher.groups)
return True
# no matches, so this message is not
# for us; allow processing to continue
return False
def parse_person(self,msg,text):
allwords = text.split()
tagsfound = []
person = Person()
#Find all occurances of the tags /codes and save them. or send alerts.
for word in allwords:
if len(word)==2 and Tag.objects.filter(code__iexact=word).count():
tagsfound = tagsfound + word
setattr(person,'tags',tagsfound)
if len(allwords) < 1:
msg.respond("missing national id")
return None
# Determine if the word is a National id
m = re.match(r"^(\d+)$", allwords[0], re.IGNORECASE)
if m is not None:
MatchCode = m.group(0)
setattr(person,'uniqueid', MatchCode)
else:
msg.respond("missing or invalid national id")
return None
if len(allwords) < 2:
msg.respond("missing national id and date")
return None
# Determine if the word is a Date
m = re.match( self.datepattern, allwords[1], re.IGNORECASE)
if m is not None:
MatchCode = m.group(0)
setattr(person,'date', util.get_good_date(MatchCode))
else:
msg.respond("missing or invalid date")
return None
# Determine if the word is weight
m = re.match( r"(\d+(?:\.\d+))(kg|lb)$", allwords[-1], re.IGNORECASE)
if m is not None:
MatchCode = m.group(0)
setattr(person,'weight', MatchCode.replace("kg","").replace("lb",""))
self.info(" weight %s" % MatchCode)
return person
def registerChild(self, msg, name):
try:
if msg.reporter is None:
msg.respond("you are not register")
return False
child = self.parse_person(msg,name)
if child is None:
return False
personid = child.uniqueid
DOB = child.date
self.info("Dob %s"% DOB)
weight = child.weight
persontype ,isno = PersonType.objects.get_or_create(singular="Child" , plural="Children")
if personid is not None:
child , dontcare = Child.objects.get_or_create( code=personid,name=personid ,date_of_birth = DOB ,weight=weight, type=persontype)
child.save()
self.info("Success fully added/updated child")
msg.respond("Birth was added successfully")
return True
except:
msg.respond("Sorry, I couldn't add child.")
raise
def registerMother(self, msg, name):
try:
if msg.reporter is None:
msg.respond("you are not register")
return False
person = self.parse_person(msg,name)
if person is None:
return False
# getattr(person,'uniqueid')
personid = person.uniqueid
# getattr(person,"date",date_of_m)
date_of_m = person.date
self.info("Date field %s" % date_of_m)
persontype ,isno = PersonType.objects.get_or_create(singular="Pregnant Woman" , plural="Pregnant Women")
self.info("Personid %s " % personid)
if personid is not None:
pregnant ,dontcare = Pregnant.objects.get_or_create( code=personid , name=personid, gender ="F" ,
date_last_menses = date_of_m ,type=persontype)
pregnant.save()
self.info("Successfully added or updated mother")
msg.respond("pregnancy was added successfully")
return True
except:
msg.respond("Sorry, I couldn't add pregnant woman.")
raise
def identify(self, msg, alias):
try:
# give me reporter.
# if no alias will match,
# exception must raise
rep = Reporter.objects.get(alias=alias)
# no such alias, but we can be pretty sure that the message
# was for us, since it matched a pretty specific pattern
# TODO: levenshtein spell-checking from rapidsms/ethiopia
except Reporter.DoesNotExist:
msg.respond(self.__str("bad-alias"))
return True
# before updating the connection, take note
# of the last time that we saw this reporter
ls = rep.last_seen()
# assign the reporter to this message's connection
# (it may currently be assigned to someone else)
msg.persistant_connection.reporter = rep
msg.persistant_connection.save()
msg.reporter = rep
# send a welcome message back to the now-registered reporter,
# depending on how long it's been since their last visit
if ls is not None:
msg.respond(
self.__str("login", rep) % {
"name": unicode(rep),
"days": (datetime.now() - ls).days })
# or a slightly different welcome message
else:
msg.respond(
self.__str("first-login", rep) % {
"name": unicode(rep),
"alias": rep.alias })
# re-call this app's prepare, so other apps can
# get hold of the reporter's info right away
self.parse(msg)
def remind(self, msg):
# if a reporter object was attached to the
# message by self.parse, respond with a reminder
if msg.reporter is not None:
msg.respond(
self.__str("reminder", msg.reporter) % {
"name": unicode(msg.reporter) })
# if not, we have no idea
# who the message was from
else:
msg.respond(self.__str(
"dont-know",
msg.reporter))
def reporters(self, msg):
# abort if listing reporters isn't allowed
# (it can get rather long and expensive)
if not self.allow_join:
msg.respond(self.__str("disabled"))
return True
# not identified yet; reject, so
# we don't allow random people to
# query our reporters list
if msg.reporter is None:
msg.respond(self.__str("denied"))
return True
# collate all reporters, with their full name,
# username, and current connection.
items = [
"%s (%s) %s" % (
rep.full_name(),
rep.alias,
rep.connection().identity)
for rep in Reporter.objects.all()
if rep.connection()]
# respond with the concatenated list.
# no need to check for empty _items_. there will
# always be at least one reporter, because only
# identified reporters can trigger this handler
msg.respond(
self.__str("list", msg.reporter) % {
"items": ", ".join(items),
"noun": "reporters",
"num": len(items) })
def lang(self, msg, code):
# reqiure identification to continue
# TODO: make this check a decorator, so other apps
# can easily indicate that methods need a valid login
if msg.reporter is not None:
# if the language code was valid, save it
# TODO: obviously, this is not cross-app
if code in self.MSG:
msg.reporter.language = code
msg.reporter.save()
resp = "lang-set"
# invalid language code. don't do
# anything, just send an error message
else: resp = "bad-lang"
# if the caller isn't logged in, send
# an error message, and halt processing
else: resp = "denied"
# always send *some*
# kind of response
msg.respond(
self.__str(
resp, msg.reporter))
|
adammck/rapidsms-community-apps
|
rwanda/app.py
|
Python
|
bsd-3-clause
| 12,980
|
[
"VisIt"
] |
9baafc05032bb559a05284056ec0a5d151d6b9726c1303e8adc0f69a0c57db23
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for representing statistical distributions.
## This package provides classes for statistical distributions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import, line-too-long
from tensorflow.contrib.distributions.python.ops import gaussian_conjugate_posteriors
from tensorflow.contrib.distributions.python.ops.dirichlet_multinomial import *
from tensorflow.contrib.distributions.python.ops.gaussian import *
# from tensorflow.contrib.distributions.python.ops.dirichlet import * # pylint: disable=line-too-long
|
peterbraden/tensorflow
|
tensorflow/contrib/bayesflow/__init__.py
|
Python
|
apache-2.0
| 1,307
|
[
"Gaussian"
] |
dd14c03d3d94a47714ab83af28ec994b49c8d5794d281183438a8f77582d513a
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
import numpy as np
from ..convolve import convolve, convolve_fft, convolve_models
from ...modeling import models, fitting
from ...tests.helper import pytest
from ...utils.misc import NumpyRNGContext
from numpy.testing import assert_allclose, assert_almost_equal
try:
import scipy
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
class TestConvolve1DModels(object):
@pytest.mark.parametrize('mode', ['convolve_fft', 'convolve'])
@pytest.mark.skipif('not HAS_SCIPY')
def test_is_consistency_with_astropy_convolution(self, mode):
kernel = models.Gaussian1D(1, 0, 1)
model = models.Gaussian1D(1, 0, 1)
model_conv = convolve_models(model, kernel, mode=mode)
x = np.arange(-5, 6)
ans = eval("{}(model(x), kernel(x))".format(mode))
assert_allclose(ans, model_conv(x), atol=1e-5)
@pytest.mark.parametrize('mode', ['convolve_fft', 'convolve'])
@pytest.mark.skipif('not HAS_SCIPY')
def test_against_scipy(self, mode):
from scipy.signal import fftconvolve
kernel = models.Gaussian1D(1, 0, 1)
model = models.Gaussian1D(1, 0, 1)
model_conv = convolve_models(model, kernel, mode=mode)
x = np.arange(-5, 6)
ans = fftconvolve(kernel(x), model(x), mode='same')
assert_allclose(ans, model_conv(x) * kernel(x).sum(), atol=1e-5)
@pytest.mark.parametrize('mode', ['convolve_fft', 'convolve'])
@pytest.mark.skipif('not HAS_SCIPY')
def test_against_scipy_with_additional_keywords(self, mode):
from scipy.signal import fftconvolve
kernel = models.Gaussian1D(1, 0, 1)
model = models.Gaussian1D(1, 0, 1)
model_conv = convolve_models(model, kernel, mode=mode,
normalize_kernel=False)
x = np.arange(-5, 6)
ans = fftconvolve(kernel(x), model(x), mode='same')
assert_allclose(ans, model_conv(x), atol=1e-5)
@pytest.mark.parametrize('mode', ['convolve_fft', 'convolve'])
def test_sum_of_gaussians(self, mode):
"""
Test that convolving N(a, b) with N(c, d) gives N(a + c, b + d),
where N(., .) stands for Gaussian probability density function,
in which a and c are their means and b and d are their variances.
"""
kernel = models.Gaussian1D(1 / math.sqrt(2 * np.pi), 1, 1)
model = models.Gaussian1D(1 / math.sqrt(2 * np.pi), 3, 1)
model_conv = convolve_models(model, kernel, mode=mode,
normalize_kernel=False)
ans = models.Gaussian1D(1 / (2 * math.sqrt(np.pi)), 4, np.sqrt(2))
x = np.arange(-5, 6)
assert_allclose(ans(x), model_conv(x), atol=1e-3)
@pytest.mark.parametrize('mode', ['convolve_fft', 'convolve'])
def test_convolve_box_models(self, mode):
kernel = models.Box1D()
model = models.Box1D()
model_conv = convolve_models(model, kernel, mode=mode)
x = np.linspace(-1, 1, 99)
ans = (x + 1) * (x < 0) + (-x + 1) * (x >= 0)
assert_allclose(ans, model_conv(x), atol=1e-3)
@pytest.mark.parametrize('mode', ['convolve_fft', 'convolve'])
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitting_convolve_models(self, mode):
"""
test that a convolve model can be fitted
"""
b1 = models.Box1D()
g1 = models.Gaussian1D()
x = np.linspace(-5, 5, 99)
fake_model = models.Gaussian1D(amplitude=10)
with NumpyRNGContext(123):
fake_data = fake_model(x) + np.random.normal(size=len(x))
init_model = convolve_models(b1, g1, mode=mode, normalize_kernel=False)
fitter = fitting.LevMarLSQFitter()
fitted_model = fitter(init_model, x, fake_data)
me = np.mean(fitted_model(x) - fake_data)
assert_almost_equal(me, 0.0, decimal=2)
|
AustereCuriosity/astropy
|
astropy/convolution/tests/test_convolve_models.py
|
Python
|
bsd-3-clause
| 4,056
|
[
"Gaussian"
] |
b78dd42ea91031bef903408052c864499a37df60511a99712dd09465575ce65b
|
# Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
# Maintainer: mphoward
R""" MPCD integration methods
Defines bounce-back methods for integrating solutes (MD particles) embedded in an MPCD
solvent. The integration scheme is velocity Verlet (NVE) with bounce-back performed at
the solid boundaries defined by a geometry, as in :py:mod:`.mpcd.stream`. This gives a
simple approximation of the interactions required to keep a solute bounded in a geometry,
and more complex interactions can be specified, for example, by writing custom external fields.
Similar caveats apply to these methods as for the :py:mod:`.mpcd.stream` methods. In particular:
1. The simulation box is periodic, but the geometry imposes inherently non-periodic boundary
conditions. You must ensure that the box is sufficiently large to enclose the geometry
and that all particles lie inside it, or an error will be raised at runtime.
2. You must also ensure that particles do not self-interact through the periodic boundaries.
This is usually achieved for simple pair potentials by padding the box size by the largest
cutoff radius. Failure to do so may result in unphysical interactions.
3. Bounce-back rules do not always enforce no-slip conditions at surfaces properly. It
may still be necessary to add additional 'ghost' MD particles in the surface
to achieve the right boundary conditions and reduce density fluctuations.
The integration methods defined here are not restricted to only MPCD simulations: they can be
used with both ``md.integrate.mode_standard`` and :py:class:`.mpcd.integrator`. For
example, the same integration methods might be used to run DPD simulations with surfaces.
These bounce-back methods do not support anisotropic integration because torques are currently
not computed for collisions with the boundary. Similarly, rigid bodies will also not be treated
correctly because the integrators are not aware of the extent of the particles; the surface
reflections are treated as point particles. An error will be raised if an anisotropic integration
mode is specified.
"""
import hoomd
from hoomd import _hoomd
from . import _mpcd
class _bounce_back():
""" NVE integration with bounce-back rules.
Args:
group (``hoomd.group``): Group of particles on which to apply this method.
:py:class:`_bounce_back` is a base class integration method. It must be used with
``md.integrate.mode_standard`` or :py:class:`.mpcd.integrator`.
Deriving classes implement the specific geometry and valid parameters for those geometries.
Currently, there is no mechanism to share geometries between multiple instances of the same
integration method.
A :py:class:`hoomd.md.compute.ThermodynamicQuantities` is automatically specified and associated with *group*.
"""
def __init__(self, group):
# initialize base class
# hoomd.integrate._integration_method.__init__(self)
# create the compute thermo
hoomd.compute._get_unique_thermo(group=group)
# store metadata
self.group = group
self.boundary = None
self.metadata_fields = ['group', 'boundary']
def _process_boundary(self, bc):
""" Process boundary condition string into enum
Args:
bc (str): Boundary condition, either "no_slip" or "slip"
Returns:
A valid boundary condition enum.
The enum interface is still fairly clunky for the user since the boundary
condition is buried too deep in the package structure. This is a convenience
method for interpreting.
"""
if bc == "no_slip":
return _mpcd.boundary.no_slip
elif bc == "slip":
return _mpcd.boundary.slip
else:
hoomd.context.current.device.cpp_msg.error(
"mpcd.integrate: boundary condition " + bc
+ " not recognized.\n")
raise ValueError("Unrecognized streaming boundary condition")
return None
class slit(_bounce_back):
""" NVE integration with bounce-back rules in a slit channel.
Args:
group (``hoomd.group``): Group of particles on which to apply this method.
H (float): channel half-width
V (float): wall speed (default: 0)
boundary : 'slip' or 'no_slip' boundary condition at wall (default: 'no_slip')
This integration method applies to particles in *group* in the parallel-plate channel geometry.
This method is the MD analog of :py:class:`.stream.slit`, which documents additional details
about the geometry.
Examples::
all = group.all()
slit = mpcd.integrate.slit(group=all, H=5.0)
slit = mpcd.integrate.slit(group=all, H=10.0, V=1.0)
.. versionadded:: 2.7
"""
def __init__(self, group, H, V=0.0, boundary="no_slip"):
# initialize base class
_bounce_back.__init__(self, group)
self.metadata_fields += ['H', 'V']
# initialize the c++ class
if not hoomd.context.current.device.mode == 'gpu':
cpp_class = _mpcd.BounceBackNVESlit
else:
cpp_class = _mpcd.BounceBackNVESlitGPU
self.H = H
self.V = V
self.boundary = boundary
bc = self._process_boundary(boundary)
geom = _mpcd.SlitGeometry(H, V, bc)
self.cpp_method = cpp_class(hoomd.context.current.system_definition,
group.cpp_group, geom)
self.cpp_method.validateGroup()
def set_params(self, H=None, V=None, boundary=None):
""" Set parameters for the slit geometry.
Args:
H (float): channel half-width
V (float): wall speed (default: 0)
boundary : 'slip' or 'no_slip' boundary condition at wall (default: 'no_slip')
Examples::
slit.set_params(H=8.)
slit.set_params(V=2.0)
slit.set_params(boundary='slip')
slit.set_params(H=5, V=0., boundary='no_slip')
"""
if H is not None:
self.H = H
if V is not None:
self.V = V
if boundary is not None:
self.boundary = boundary
bc = self._process_boundary(self.boundary)
self.cpp_method.geometry = _mpcd.SlitGeometry(self.H, self.V, bc)
class slit_pore(_bounce_back):
""" NVE integration with bounce-back rules in a slit pore channel.
Args:
group (``hoomd.group``): Group of particles on which to apply this method.
H (float): channel half-width.
L (float): pore half-length.
boundary : 'slip' or 'no_slip' boundary condition at wall (default: 'no_slip')
This integration method applies to particles in *group* in the parallel-plate (slit) pore geometry.
This method is the MD analog of :py:class:`.stream.slit_pore`, which documents additional details
about the geometry.
Examples::
all = group.all()
slit_pore = mpcd.integrate.slit_pore(group=all, H=10.0, L=10.)
.. versionadded:: 2.7
"""
def __init__(self, group, H, L, boundary="no_slip"):
# initialize base class
_bounce_back.__init__(self, group)
self.metadata_fields += ['H', 'L']
# initialize the c++ class
if not hoomd.context.current.device.mode == 'gpu':
cpp_class = _mpcd.BounceBackNVESlitPore
else:
cpp_class = _mpcd.BounceBackNVESlitPoreGPU
self.H = H
self.L = L
self.boundary = boundary
bc = self._process_boundary(boundary)
geom = _mpcd.SlitPoreGeometry(H, L, bc)
self.cpp_method = cpp_class(hoomd.context.current.system_definition,
group.cpp_group, geom)
self.cpp_method.validateGroup()
def set_params(self, H=None, L=None, boundary=None):
""" Set parameters for the slit pore geometry.
Args:
H (float): channel half-width.
L (float): pore half-length.
boundary : 'slip' or 'no_slip' boundary condition at wall (default: 'no_slip')
Examples::
slit_pore.set_params(H=8.)
slit_pore.set_params(L=2.0)
slit_pore.set_params(boundary='slip')
slit_pore.set_params(H=5, L=4., boundary='no_slip')
"""
if H is not None:
self.H = H
if L is not None:
self.L = L
if boundary is not None:
self.boundary = boundary
bc = self._process_boundary(self.boundary)
self.cpp_method.geometry = _mpcd.SlitPoreGeometry(self.H, self.L, bc)
|
joaander/hoomd-blue
|
hoomd/mpcd/integrate.py
|
Python
|
bsd-3-clause
| 8,782
|
[
"HOOMD-blue"
] |
15cdca29ef62cd00740683f93ebac627335b7a7fda580b26f2b9e5ae79aa697c
|
#-------------------------------------------------------------------------
# Name: pySaliencyMap
# Purpose: Extracting a saliency map from a single still image
#
# Author: Akisato Kimura <akisato@ieee.org>
#
# Created: April 24, 2014
# Copyright: (c) Akisato Kimura 2014-
# Licence: MIT
# URL: https://github.com/akisato-/pySaliencyMap
#-------------------------------------------------------------------------
import cv2
import numpy as np
from pliers.external.pysaliency import pySaliencyMapDefs
class pySaliencyMap:
# initialization
def __init__(self, width, height):
self.width = width
self.height = height
self.prev_frame = None
self.SM = None
self.GaborKernel0 = np.array(pySaliencyMapDefs.GaborKernel_0)
self.GaborKernel45 = np.array(pySaliencyMapDefs.GaborKernel_45)
self.GaborKernel90 = np.array(pySaliencyMapDefs.GaborKernel_90)
self.GaborKernel135 = np.array(pySaliencyMapDefs.GaborKernel_135)
# extracting color channels
def SMExtractRGBI(self, inputImage):
# convert scale of array elements
src = np.float32(inputImage) * 1./255
# split
(B, G, R) = cv2.split(src)
# extract an intensity image
I = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
# return
return R, G, B, I
# feature maps
# constructing a Gaussian pyramid
def FMCreateGaussianPyr(self, src):
dst = list()
dst.append(src)
for i in range(1, 9):
nowdst = cv2.pyrDown(dst[i-1])
dst.append(nowdst)
return dst
# taking center-surround differences
def FMCenterSurroundDiff(self, GaussianMaps):
dst = list()
for s in range(2, 5):
now_size = GaussianMaps[s].shape
now_size = (now_size[1], now_size[0]) # (width, height)
tmp = cv2.resize(
GaussianMaps[s+3], now_size, interpolation=cv2.INTER_LINEAR)
nowdst = cv2.absdiff(GaussianMaps[s], tmp)
dst.append(nowdst)
tmp = cv2.resize(
GaussianMaps[s+4], now_size, interpolation=cv2.INTER_LINEAR)
nowdst = cv2.absdiff(GaussianMaps[s], tmp)
dst.append(nowdst)
return dst
# constructing a Gaussian pyramid + taking center-surround differences
def FMGaussianPyrCSD(self, src):
GaussianMaps = self.FMCreateGaussianPyr(src)
dst = self.FMCenterSurroundDiff(GaussianMaps)
return dst
# intensity feature maps
def IFMGetFM(self, I):
return self.FMGaussianPyrCSD(I)
# color feature maps
def CFMGetFM(self, R, G, B):
# max(R,G,B)
tmp1 = cv2.max(R, G)
RGBMax = cv2.max(B, tmp1)
RGBMax[RGBMax <= 0] = 0.0001 # prevent dividing by 0
# min(R,G)
RGMin = cv2.min(R, G)
# RG = (R-G)/max(R,G,B)
RG = (R - G) / RGBMax
# BY = (B-min(R,G)/max(R,G,B)
BY = (B - RGMin) / RGBMax
# clamp nagative values to 0
RG[RG < 0] = 0
BY[BY < 0] = 0
# obtain feature maps in the same way as intensity
RGFM = self.FMGaussianPyrCSD(RG)
BYFM = self.FMGaussianPyrCSD(BY)
# return
return RGFM, BYFM
# orientation feature maps
def OFMGetFM(self, src):
# creating a Gaussian pyramid
GaussianI = self.FMCreateGaussianPyr(src)
# convoluting a Gabor filter with an intensity image to extract
# oriemtation features
# dummy data: any kinds of np.array()s are OK
GaborOutput0 = [np.empty((1, 1)), np.empty((1, 1))]
GaborOutput45 = [np.empty((1, 1)), np.empty((1, 1))]
GaborOutput90 = [np.empty((1, 1)), np.empty((1, 1))]
GaborOutput135 = [np.empty((1, 1)), np.empty((1, 1))]
for j in range(2, 9):
GaborOutput0.append(
cv2.filter2D(GaussianI[j], cv2.CV_32F, self.GaborKernel0))
GaborOutput45.append(
cv2.filter2D(GaussianI[j], cv2.CV_32F, self.GaborKernel45))
GaborOutput90.append(
cv2.filter2D(GaussianI[j], cv2.CV_32F, self.GaborKernel90))
GaborOutput135.append(
cv2.filter2D(GaussianI[j], cv2.CV_32F, self.GaborKernel135))
# calculating center-surround differences for every oriantation
CSD0 = self.FMCenterSurroundDiff(GaborOutput0)
CSD45 = self.FMCenterSurroundDiff(GaborOutput45)
CSD90 = self.FMCenterSurroundDiff(GaborOutput90)
CSD135 = self.FMCenterSurroundDiff(GaborOutput135)
# concatenate
dst = list(CSD0)
dst.extend(CSD45)
dst.extend(CSD90)
dst.extend(CSD135)
# return
return dst
# motion feature maps
def MFMGetFM(self, src):
# convert scale
I8U = np.uint8(255 * src)
cv2.waitKey(10)
# calculating optical flows
if self.prev_frame is not None:
farne_pyr_scale = pySaliencyMapDefs.farne_pyr_scale
farne_levels = pySaliencyMapDefs.farne_levels
farne_winsize = pySaliencyMapDefs.farne_winsize
farne_iterations = pySaliencyMapDefs.farne_iterations
farne_poly_n = pySaliencyMapDefs.farne_poly_n
farne_poly_sigma = pySaliencyMapDefs.farne_poly_sigma
farne_flags = pySaliencyMapDefs.farne_flags
flow = cv2.calcOpticalFlowFarneback(
prev=self.prev_frame,
next=I8U,
pyr_scale=farne_pyr_scale,
levels=farne_levels,
winsize=farne_winsize,
iterations=farne_iterations,
poly_n=farne_poly_n,
poly_sigma=farne_poly_sigma,
flags=farne_flags,
flow=None
)
flowx = flow[..., 0]
flowy = flow[..., 1]
else:
flowx = np.zeros(I8U.shape)
flowy = np.zeros(I8U.shape)
# create Gaussian pyramids
dst_x = self.FMGaussianPyrCSD(flowx)
dst_y = self.FMGaussianPyrCSD(flowy)
# update the current frame
self.prev_frame = np.uint8(I8U)
# return
return dst_x, dst_y
# conspicuity maps
# standard range normalization
def SMRangeNormalize(self, src):
minn, maxx, dummy1, dummy2 = cv2.minMaxLoc(src)
if maxx != minn:
dst = src/(maxx-minn) + minn/(minn-maxx)
else:
dst = src - minn
return dst
# computing an average of local maxima
def SMAvgLocalMax(self, src):
# size
stepsize = pySaliencyMapDefs.default_step_local
width = src.shape[1]
height = src.shape[0]
# find local maxima
numlocal = 0
lmaxmean = 0
for y in range(0, height-stepsize, stepsize):
for x in range(0, width-stepsize, stepsize):
localimg = src[y:y+stepsize, x:x+stepsize]
lmin, lmax, dummy1, dummy2 = cv2.minMaxLoc(localimg)
lmaxmean += lmax
numlocal += 1
# averaging over all the local regions
return lmaxmean / numlocal
# normalization specific for the saliency map model
def SMNormalization(self, src):
dst = self.SMRangeNormalize(src)
lmaxmean = self.SMAvgLocalMax(dst)
normcoeff = (1-lmaxmean)*(1-lmaxmean)
return dst * normcoeff
# normalizing feature maps
def normalizeFeatureMaps(self, FM):
NFM = list()
for i in range(0, 6):
normalizedImage = self.SMNormalization(FM[i])
nownfm = cv2.resize(
normalizedImage, (self.width, self.height), interpolation=cv2.INTER_LINEAR)
NFM.append(nownfm)
return NFM
# intensity conspicuity map
def ICMGetCM(self, IFM):
NIFM = self.normalizeFeatureMaps(IFM)
ICM = sum(NIFM)
return ICM
# color conspicuity map
def CCMGetCM(self, CFM_RG, CFM_BY):
# extracting a conspicuity map for every color opponent pair
CCM_RG = self.ICMGetCM(CFM_RG)
CCM_BY = self.ICMGetCM(CFM_BY)
# merge
CCM = CCM_RG + CCM_BY
# return
return CCM
# orientation conspicuity map
def OCMGetCM(self, OFM):
OCM = np.zeros((self.height, self.width))
for i in range(0, 4):
# slicing
nowofm = OFM[i*6:(i+1)*6] # angle = i*45
# extracting a conspicuity map for every angle
NOFM = self.ICMGetCM(nowofm)
# normalize
NOFM2 = self.SMNormalization(NOFM)
# accumulate
OCM += NOFM2
return OCM
# motion conspicuity map
def MCMGetCM(self, MFM_X, MFM_Y):
return self.CCMGetCM(MFM_X, MFM_Y)
# core
def SMGetSM(self, src):
# definitions
size = src.shape
width = size[1]
height = size[0]
# check
# if(width != self.width or height != self.height):
# sys.exit("size mismatch")
# extracting individual color channels
R, G, B, I = self.SMExtractRGBI(src)
# extracting feature maps
IFM = self.IFMGetFM(I)
CFM_RG, CFM_BY = self.CFMGetFM(R, G, B)
OFM = self.OFMGetFM(I)
MFM_X, MFM_Y = self.MFMGetFM(I)
# extracting conspicuity maps
ICM = self.ICMGetCM(IFM)
CCM = self.CCMGetCM(CFM_RG, CFM_BY)
OCM = self.OCMGetCM(OFM)
MCM = self.MCMGetCM(MFM_X, MFM_Y)
# adding all the conspicuity maps to form a saliency map
wi = pySaliencyMapDefs.weight_intensity
wc = pySaliencyMapDefs.weight_color
wo = pySaliencyMapDefs.weight_orientation
wm = pySaliencyMapDefs.weight_motion
SMMat = wi*ICM + wc*CCM + wo*OCM + wm*MCM
# normalize
normalizedSM = self.SMRangeNormalize(SMMat)
normalizedSM2 = normalizedSM.astype(np.float32)
smoothedSM = cv2.bilateralFilter(normalizedSM2, 7, 3, 1.55)
self.SM = cv2.resize(
smoothedSM, (width, height), interpolation=cv2.INTER_NEAREST)
# return
return self.SM
def SMGetBinarizedSM(self, src):
# get a saliency map
if self.SM is None:
self.SM = self.SMGetSM(src)
# convert scale
SM_I8U = np.uint8(255 * self.SM)
# binarize
thresh, binarized_SM = cv2.threshold(
SM_I8U, thresh=0, maxval=255, type=cv2.THRESH_BINARY+cv2.THRESH_OTSU)
return binarized_SM
def SMGetSalientRegion(self, src):
# get a binarized saliency map
binarized_SM = self.SMGetBinarizedSM(src)
# GrabCut
img = src.copy()
mask = np.where(
(binarized_SM != 0), cv2.GC_PR_FGD, cv2.GC_PR_BGD).astype('uint8')
bgdmodel = np.zeros((1, 65), np.float64)
fgdmodel = np.zeros((1, 65), np.float64)
rect = (0, 0, 1, 1) # dummy
iterCount = 1
cv2.grabCut(img, mask=mask, rect=rect, bgdModel=bgdmodel,
fgdModel=fgdmodel, iterCount=iterCount, mode=cv2.GC_INIT_WITH_MASK)
# post-processing
mask_out = np.where(
(mask == cv2.GC_FGD) + (mask == cv2.GC_PR_FGD), 255, 0).astype('uint8')
output = cv2.bitwise_and(img, img, mask=mask_out)
return output
|
tyarkoni/featureX
|
pliers/external/pysaliency/pySaliencyMap.py
|
Python
|
bsd-3-clause
| 11,365
|
[
"Gaussian"
] |
cad0bbfa540f4467d2416854b5d99b6e89bdbf609a7e1d02de56d97a028cb173
|
"""
Tests of inference methods.
"""
# pylint: disable=no-member
# pylint: disable=missing-docstring
# future imports
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
# global imports
import numpy as np
import numpy.testing as nt
import scipy.optimize as spop
# local imports
import pygp
### BASE TEST CLASS ###########################################################
class InferenceTest(object):
def test_repr(self):
_ = repr(self.gp)
def test_params(self):
_ = self.gp._params()
def test_data(self):
_ = self.gp.data
def test_copy(self):
_ = self.gp.copy()
def test_prior(self):
gp = self.gp.copy()
gp.reset()
_ = gp.posterior(self.X, grad=True)
_ = gp.sample(self.X)
def test_from(self):
# make sure we can call from_gp on the same class.
_ = self.gp.__class__.from_gp(self.gp)
_ = pygp.inference.ExactGP.from_gp(self.gp)
# reinterpret as exact inference and reset the data.
gp = pygp.inference.ExactGP.from_gp(self.gp)
gp.reset()
# make sure we can move from an ExactGP to the current class.
if hasattr(self.gp, 'pseudoinputs'):
_ = self.gp.__class__.from_gp(gp, self.gp.pseudoinputs)
nt.assert_raises(ValueError, self.gp.__class__.from_gp, gp)
else:
_ = self.gp.__class__.from_gp(gp)
def test_hyper(self):
hyper1 = self.gp.get_hyper()
self.gp.set_hyper(self.gp.get_hyper())
hyper2 = self.gp.get_hyper()
nt.assert_allclose(hyper1, hyper2)
def test_add_data(self):
# add additional data.
gp1 = self.gp.copy()
gp1.add_data(self.X, self.y)
# add additional data but make sure we don't do so incrementally.
updateinc = pygp.inference._base.GP._updateinc
gp2 = self.gp.copy()
gp2._updateinc = lambda X, y: updateinc(gp2, X, y)
gp2.add_data(self.X, self.y)
# make sure the posteriors match.
p1 = gp1.posterior(self.X)
p2 = gp2.posterior(self.X)
nt.assert_allclose(p1, p2)
def test_sample(self):
_ = self.gp.sample(self.X, m=2, latent=False)
_ = self.gp.sample(self.X, m=2, latent=True)
def test_sample_fourier(self):
# sample a function
f = self.gp.sample_fourier(10)
x = self.X[0]
# get the gradient and test it
_, g1 = f(x, True)
g2 = spop.approx_fprime(x, f, 1e-8)
nt.assert_allclose(g1, g2, rtol=1e-5, atol=1e-5)
# reset the gp and sample from the prior.
gp = self.gp.copy()
gp.reset()
f = gp.sample_fourier(10)
# get the gradient and test it
_, g1 = f(x, True)
g2 = spop.approx_fprime(x, f, 1e-8)
nt.assert_allclose(g1, g2, rtol=1e-5, atol=1e-5)
def test_loglikelihood(self):
x = self.gp.get_hyper()
f = lambda x: self.gp.copy(x).loglikelihood()
_, g1 = self.gp.loglikelihood(grad=True)
g2 = spop.approx_fprime(x, f, 1e-8)
# slightly lesser gradient tolerance. mostly due to FITC.
nt.assert_allclose(g1, g2, rtol=1e-5, atol=1e-5)
### TEST CLASS FOR REAL-VALUED INPUTS #########################################
class RealTest(InferenceTest):
def __init__(self, gp):
# create some data.
rng = np.random.RandomState(1)
X = rng.rand(10, gp._kernel.ndim)
y = gp._likelihood.sample(rng.rand(10), rng)
# create a gp.
self.gp = gp
self.gp.add_data(X, y)
# new set of points to predict at.
self.X = rng.rand(10, gp._kernel.ndim)
self.y = gp._likelihood.sample(rng.rand(10), rng)
def test_reset(self):
gp = self.gp.copy()
gp.reset()
# test that we can get the prior predictions.
gp.posterior(self.X)
# test that adding the data gets the same thing.
gp.add_data(*self.gp.data)
mu1, va1 = gp.posterior(self.X)
mu2, va2 = self.gp.posterior(self.X)
nt.assert_allclose(mu1, mu2, rtol=1e-6, atol=1e-6)
nt.assert_allclose(va1, va2, rtol=1e-6, atol=1e-6)
def test_hyper(self):
# set the hyperparameters with the given data.
gp = self.gp.copy()
gp.set_hyper(gp.get_hyper() + 1)
gp.posterior(self.X)
# set the hyperparameters after a reset.
gp = self.gp.copy()
gp.reset()
gp.set_hyper(gp.get_hyper() + 1)
gp.posterior(self.X)
def test_posterior_mu(self):
f = lambda x: self.gp.posterior(x[None])[0]
G1 = self.gp.posterior(self.X, grad=True)[2]
G2 = np.array([spop.approx_fprime(x, f, 1e-8) for x in self.X])
nt.assert_allclose(G1, G2, rtol=1e-6, atol=1e-6)
def test_posterior_s2(self):
f = lambda x: self.gp.posterior(x[None])[1]
G1 = self.gp.posterior(self.X, grad=True)[3]
G2 = np.array([spop.approx_fprime(x, f, 1e-8) for x in self.X])
nt.assert_allclose(G1, G2, rtol=1e-5, atol=1e-5)
### PER INFERENCE METHOD TESTS ################################################
class TestExact(RealTest):
def __init__(self):
likelihood = pygp.likelihoods.Gaussian(1)
kernel = pygp.kernels.SE(1, 1, ndim=2)
gp = pygp.inference.ExactGP(likelihood, kernel, 0.0)
RealTest.__init__(self, gp)
class TestBasic(RealTest):
def __init__(self):
gp = pygp.inference.BasicGP(1, 1, 1, 0, ndim=2)
RealTest.__init__(self, gp)
class TestFITC(RealTest):
def __init__(self):
rng = np.random.RandomState(1)
likelihood = pygp.likelihoods.Gaussian(1)
kernel = pygp.kernels.SE(1, 1, ndim=2)
mean = 0.0
U = rng.rand(10, kernel.ndim)
gp = pygp.inference.FITC(likelihood, kernel, mean, U)
RealTest.__init__(self, gp)
class TestDTC(RealTest):
def __init__(self):
rng = np.random.RandomState(1)
likelihood = pygp.likelihoods.Gaussian(1)
kernel = pygp.kernels.SE(1, 1, ndim=2)
mean = 0.0
U = rng.rand(10, kernel.ndim)
gp = pygp.inference.DTC(likelihood, kernel, mean, U)
RealTest.__init__(self, gp)
### INITIALIZATION TESTS ######################################################
# the following tests attempt to initialize a few models with invalid
# parameters, each of which should raise an exception.
def test_init_basic():
# make sure we can initialize correctly.
_ = pygp.BasicGP.from_gp(pygp.BasicGP(1, 1, 1, 0, 2, 'se'))
_ = pygp.BasicGP.from_gp(pygp.BasicGP(1, 1, 1, 0, 2, 'matern1'))
_ = pygp.BasicGP.from_gp(pygp.BasicGP(1, 1, 1, 0, 2, 'matern3'))
_ = pygp.BasicGP.from_gp(pygp.BasicGP(1, 1, 1, 0, 2, 'matern5'))
# throw an error with an unknown kernel.
nt.assert_raises(ValueError,
pygp.inference.BasicGP, 1, 1, 1, 0, 2, 'foo')
# throw an error for from_gp with incorrect kernel.
likelihood = pygp.likelihoods.Gaussian(1)
kernel = pygp.kernels.Periodic(1, 1, 1)
gp = pygp.inference.ExactGP(likelihood, kernel, 0)
nt.assert_raises(ValueError, pygp.BasicGP.from_gp, gp)
|
mwhoffman/pygp
|
tests/test_inference.py
|
Python
|
bsd-2-clause
| 7,224
|
[
"Gaussian"
] |
99bee3bff0da7d6cd335a7a3b7ceefaa36da954a8ebb90bedef3e0c3a83ea31f
|
""" This module loads all the classes from the VTK Imaging library into
its namespace. This is a required module."""
from vtkImagingPython import *
|
b3c/VTK-5.8
|
Wrapping/Python/vtk/imaging.py
|
Python
|
bsd-3-clause
| 150
|
[
"VTK"
] |
41920ccd13f2e488c2cbb5f06efc85e23f09f614f48a0a03b5b872fd7826e9fe
|
#!/usr/bin/env python
"""
hawaii.py
State Estimation and Analysis for PYthon
Utilities for dealing with data around Hawaii
Examples
--------
Assume you have longitude, latitude, and sst values:
>>> m=seapy.hawaii()
>>> m.pcolor(lon,lat,sst,vmin=22,vmax=26,cmap=plt.cm.bwr)
>>> m.land()
>>> m.colorbar(label="Sea Surface Temp [$^\circ$C]",cticks=[22,23,24,25,26])
>>> m.ax.patch.set_facecolor("aqua")
>>> m.ax.patch.set_alpha(1)
>>> m.fig.patch.set_alpha(0.0)
>>> m.fig.savefig("sst.png",dpi=100)
Written by Brian Powell on 9/4/14
Copyright (c)2017 University of Hawaii under the BSD-License.
"""
from .mapping import map
from matplotlib.patches import Polygon
from matplotlib.collections import PolyCollection
import os
_shape_file = os.path.dirname(__file__) + "/hawaii_coast/hawaii"
class hawaii(map):
def __init__(self, grid=None, llcrnrlon=-163, llcrnrlat=17, urcrnrlon=-153,
urcrnrlat=24, figsize=(8., 6.), dlat=1, dlon=2, fig=None, ax=None,
fill_color="aqua"):
super().__init__(grid=grid, llcrnrlon=llcrnrlon, llcrnrlat=llcrnrlat,
urcrnrlon=urcrnrlon, urcrnrlat=urcrnrlat,
figsize=figsize, dlat=dlat, dlon=dlon, fig=fig, ax=ax,
fill_color=fill_color)
def land(self, color="black"):
"""
Draw the GIS coastline data from the state of Hawaii to draw the
land boundaries. This does not include rivers, etc., only the
coastline.
Parameters
----------
color: string, optional
Color to draw the land mask with
Returns
-------
None
"""
if hasattr(self.basemap, "coast") == False or hasattr(self, "landpoly"):
self.basemap.readshapefile(_shape_file, "coast")
vert = []
for shape in self.basemap.coast:
vert.append(shape)
self.landpoly = PolyCollection(
vert, facecolors=color, edgecolors=color)
# Draw the loaded shapes
self.ax.add_collection(self.landpoly)
|
dalepartridge/seapy
|
hawaii.py
|
Python
|
mit
| 2,156
|
[
"Brian"
] |
5a2ee97095a6c6e8769c091aeb84076386126a79eb7961018a70fa07adeef02c
|
# coding=utf-8
from __future__ import absolute_import, division, print_function
import argparse
import antlr4
from conversion import *
from conversionLexer import *
from conversionVisitor import *
def add_indent(code, level):
retval = ''
for line in code:
if isinstance(line, list):
retval += add_indent(line, level+1)
else:
retval += ' '*level + line + '\n'
return retval
def docstring(string):
return "'''\n{}'''".format(string.replace('\\', '\\\\').replace('\'', '\\\''))
class TestsGenVisitor(conversionVisitor):
'''Visitor that extracts conversion sections'''
def visitConversionTests(self, ctx):
t = '''
! from __future__ import print_function
!
! import difflib
!
! import vfp2py
'''.split('! ')[1:]
for i, test in enumerate(ctx.conversionTest()):
foxlines, pylines = self.visit(test)
test_func = '''
!
!
! def Test{}():
! input_str = {}.strip()
! output_str = {}.strip()
! test_output_str = {}.strip()
! try:
! assert test_output_str == output_str
! except AssertionError:
! diff = difflib.unified_diff((test_output_str + '\\n').splitlines(1), (output_str + '\\n').splitlines(1))
! print(''.join(diff))
! raise
'''
special_directive = str(test.FoxStart().symbol.text[13:].strip())
if special_directive:
test_output = 'vfp2py.vfp2py.prg2py(input_str, \'cp1252\', parser_start={}, prepend_data=\'\')'.format(repr(special_directive))
else:
test_output = 'vfp2py.vfp2py.prg2py(input_str, \'cp1252\')'
test_func = test_func.format(i, docstring(foxlines), docstring(pylines), test_output)
t += test_func.split('! ')[1:]
t = [l.rstrip() for l in t]
return add_indent(t, 0)
def visitConversionTest(self, ctx):
foxlines = ''.join(tok.symbol.text for tok in ctx.FoxLine())
pylines = ''.join(tok.symbol.text for tok in ctx.PyLine())
return foxlines, pylines
def generate_tests(filename):
with open(filename, 'rb') as fid:
file_contents = fid.read().decode('utf-8')
input_stream = antlr4.InputStream(file_contents)
lexer = conversionLexer(input_stream)
stream = antlr4.CommonTokenStream(lexer)
parser = conversion(stream)
tree = parser.conversionTests()
visitor = TestsGenVisitor()
return visitor.visit(tree)
def parse_args(argv=None):
parser = argparse.ArgumentParser(description='Tool for generating vfp to python conversion tests from conversion file')
parser.add_argument("infile", help="file of conversions", type=str)
return parser.parse_args(argv)
def main(argv=None):
args = parse_args(argv)
print(generate_tests(args.infile))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
mwisslead/vfp2py
|
testbed/__main__.py
|
Python
|
mit
| 3,092
|
[
"VisIt"
] |
d7050176410c43d9c995656ff5fc321a9476edf74b9d17fceaae15a8cc120fdc
|
import os
import external.cclib as cclib
import logging
from subprocess import Popen, PIPE
import re
from rmgpy.molecule import Molecule
from qmdata import CCLibData
from molecule import QMMolecule
class Gaussian:
"""
A base class for all QM calculations that use Gaussian.
Classes such as :class:`GaussianMol` will inherit from this class.
"""
inputFileExtension = '.gjf'
outputFileExtension = '.log'
gaussEnv = os.getenv('GAUSS_EXEDIR') or os.getenv('g09root') or os.getenv('g03root') or ""
if os.path.exists(os.path.join(gaussEnv , 'g09')):
executablePath = os.path.join(gaussEnv , 'g09')
elif os.path.exists(os.path.join(gaussEnv , 'g03')):
executablePath = os.path.join(gaussEnv , 'g03')
else:
executablePath = os.path.join(gaussEnv , '(g03 or g09)')
usePolar = False
#: List of phrases that indicate failure
#: NONE of these must be present in a succesful job.
failureKeys = [
'ERROR TERMINATION',
'IMAGINARY FREQUENCIES'
]
#: List of phrases to indicate success.
#: ALL of these must be present in a successful job.
successKeys = [
'Normal termination of Gaussian'
]
def testReady(self):
if not os.path.exists(self.executablePath):
raise Exception("Couldn't find Gaussian executable at {0}. Try setting your GAUSS_EXEDIR environment variable.".format(self.executablePath))
def run(self):
self.testReady()
# submits the input file to Gaussian
process = Popen([self.executablePath, self.inputFilePath, self.outputFilePath])
process.communicate()# necessary to wait for executable termination!
return self.verifyOutputFile()
def verifyOutputFile(self):
"""
Check's that an output file exists and was successful.
Returns a boolean flag that states whether a successful GAUSSIAN simulation already exists for the molecule with the
given (augmented) InChI Key.
The definition of finding a successful simulation is based on these criteria:
1) finding an output file with the file name equal to the InChI Key
2) NOT finding any of the keywords that are denote a calculation failure
3) finding all the keywords that denote a calculation success.
4) finding a match between the InChI of the given molecule and the InchI found in the calculation files
5) checking that the optimized geometry, when connected by single bonds, is isomorphic with self.molecule (converted to single bonds)
If any of the above criteria is not matched, False will be returned.
If all are satisfied, it will return True.
"""
if not os.path.exists(self.outputFilePath):
logging.info("Output file {0} does not exist.".format(self.outputFilePath))
return False
InChIMatch=False #flag (1 or 0) indicating whether the InChI in the file matches InChIaug this can only be 1 if InChIFound is also 1
InChIFound=False #flag (1 or 0) indicating whether an InChI was found in the log file
# Initialize dictionary with "False"s
successKeysFound = dict([(key, False) for key in self.successKeys])
with open(self.outputFilePath) as outputFile:
for line in outputFile:
line = line.strip()
for element in self.failureKeys: #search for failure keywords
if element in line:
logging.error("Gaussian output file contains the following error: {0}".format(element) )
return False
for element in self.successKeys: #search for success keywords
if element in line:
successKeysFound[element] = True
if line.startswith("InChI="):
logFileInChI = line #output files should take up to 240 characters of the name in the input file
InChIFound = True
if logFileInChI == self.geometry.uniqueIDlong:
InChIMatch = True
elif self.geometry.uniqueIDlong.startswith(logFileInChI):
logging.info("InChI too long to check, but beginning matches so assuming OK.")
InChIMatch = True
else:
logging.warning("InChI in log file ({0}) didn't match that in geometry ({1}).".format(logFileInChI, self.geometry.uniqueIDlong))
if self.geometry.uniqueIDlong.startswith(logFileInChI):
logging.warning("but the beginning matches so it's probably just a truncation problem.")
InChIMatch = True
# Check that ALL 'success' keywords were found in the file.
if not all( successKeysFound.values() ):
logging.error('Not all of the required keywords for success were found in the output file!')
return False
if not InChIFound:
logging.error("No InChI was found in the Gaussian output file {0}".format(self.outputFilePath))
return False
if not InChIMatch:
#InChIs do not match (most likely due to limited name length mirrored in log file (240 characters), but possibly due to a collision)
return self.checkForInChiKeyCollision(logFileInChI) # Not yet implemented!
# Compare the optimized geometry to the original molecule
qmData = self.parse()
cclibMol = Molecule()
cclibMol.fromXYZ(qmData.atomicNumbers, qmData.atomCoords.value)
testMol = self.molecule.toSingleBonds()
if not cclibMol.isIsomorphic(testMol):
logging.info("Incorrect connectivity for optimized geometry in file {0}".format(self.outputFilePath))
return False
logging.info("Successful MOPAC quantum result found in {0}".format(self.outputFilePath))
return True
def parse(self):
"""
Parses the results of the Gaussian calculation, and returns a CCLibData object.
"""
parser = cclib.parser.Gaussian(self.outputFilePath)
parser.logger.setLevel(logging.ERROR) #cf. http://cclib.sourceforge.net/wiki/index.php/Using_cclib#Additional_information
cclibData = parser.parse()
radicalNumber = sum([i.radicalElectrons for i in self.molecule.atoms])
qmData = CCLibData(cclibData, radicalNumber+1)
return qmData
class GaussianMol(QMMolecule, Gaussian):
"""
A base Class for calculations of molecules using Gaussian.
Inherits from both :class:`QMMolecule` and :class:`Gaussian`.
"""
def inputFileKeywords(self, attempt):
"""
Return the top keywords for attempt number `attempt`.
NB. `attempt`s begin at 1, not 0.
"""
assert attempt <= self.maxAttempts
if attempt > self.scriptAttempts:
attempt -= self.scriptAttempts
return self.keywords[attempt-1]
def writeInputFile(self, attempt):
"""
Using the :class:`Geometry` object, write the input file
for the `attmept`th attempt.
"""
molfile = self.getMolFilePathForCalculation(attempt)
atomline = re.compile('\s*([\- ][0-9.]+\s+[\-0-9.]+\s+[\-0-9.]+)\s+([A-Za-z]+)')
output = ['', self.geometry.uniqueIDlong, '' ]
output.append("{charge} {mult}".format(charge=0, mult=(self.molecule.getRadicalCount() + 1) ))
atomCount = 0
with open(molfile) as molinput:
for line in molinput:
match = atomline.match(line)
if match:
output.append("{0:8s} {1}".format(match.group(2), match.group(1)))
atomCount += 1
assert atomCount == len(self.molecule.atoms)
output.append('')
input_string = '\n'.join(output)
top_keys = self.inputFileKeywords(attempt)
with open(self.inputFilePath, 'w') as gaussianFile:
gaussianFile.write(top_keys)
gaussianFile.write('\n')
gaussianFile.write(input_string)
gaussianFile.write('\n')
if self.usePolar:
gaussianFile.write('\n\n\n')
raise NotImplementedError("Not sure what should be here, if anything.")
#gaussianFile.write(polar_keys)
def generateQMData(self):
"""
Calculate the QM data and return a QMData object.
"""
self.createGeometry()
if self.verifyOutputFile():
logging.info("Found a successful output file already; using that.")
else:
success = False
for attempt in range(1, self.maxAttempts+1):
self.writeInputFile(attempt)
success = self.run()
if success:
logging.info('Attempt {0} of {1} on species {2} succeeded.'.format(attempt, self.maxAttempts, self.molecule.toAugmentedInChI()))
break
else:
logging.error('QM thermo calculation failed for {0}.'.format(self.molecule.toAugmentedInChI()))
return None
result = self.parse() # parsed in cclib
return result
class GaussianMolPM3(GaussianMol):
"""
Gaussian PM3 calculations for molecules
This is a class of its own in case you wish to do anything differently,
but for now it's only the 'pm3' in the keywords that differs.
"""
#: Keywords that will be added at the top of the qm input file
keywords = [
# The combinations of keywords were derived by Greg Magoon for pm3 in Gaussian. His comments are attached to each combination.
"# pm3 opt=(verytight,gdiis) freq IOP(2/16=3)", # added IOP option to avoid aborting when symmetry changes; 3 is supposed to be default according to documentation, but it seems that 0 (the default) is the only option that doesn't work from 0-4; also, it is interesting to note that all 4 options seem to work for test case with z-matrix input rather than xyz coords; cf. http://www.ccl.net/cgi-bin/ccl/message-new?2006+10+17+005 for original idea for solution
"# pm3 opt=(verytight,gdiis) freq IOP(2/16=3) IOP(4/21=2)", # use different SCF method; this addresses at least one case of failure for a C4H7J species
"# pm3 opt=(verytight,calcfc,maxcyc=200) freq IOP(2/16=3) nosymm" , # try multiple different options (no gdiis, use calcfc, nosymm); 7/21/09: added maxcyc option to fix case of MPTBUKVAJYJXDE-UHFFFAOYAPmult3 (InChI=1/C4H10O5Si/c1-3-7-9-10(5,6)8-4-2/h4-5H,3H2,1-2H3/mult3) (file manually copied to speed things along)
"# pm3 opt=(verytight,calcfc,maxcyc=200) freq=numerical IOP(2/16=3) nosymm", # numerical frequency keyword version of keyword #3; used to address GYFVJYRUZAKGFA-UHFFFAOYALmult3 (InChI=1/C6H14O6Si/c1-3-10-13(8,11-4-2)12-6-5-9-7/h6-7H,3-5H2,1-2H3/mult3) case; (none of the existing Gaussian or MOPAC combinations worked with it)
"# pm3 opt=(verytight,gdiis,small) freq IOP(2/16=3)", # somehow, this worked for problematic case of ZGAWAHRALACNPM-UHFFFAOYAF (InChI=1/C8H17O5Si/c1-3-11-14(10,12-4-2)13-8-5-7(9)6-8/h7-9H,3-6H2,1-2H3); (was otherwise giving l402 errors); even though I had a keyword that worked for this case, I manually copied the fixed log file to QMfiles folder to speed things along; note that there are a couple of very low frequencies (~5-6 cm^-1 for this case)
"# pm3 opt=(verytight,nolinear,calcfc,small) freq IOP(2/16=3)", # used for troublesome C5H7J2 case (similar error to C5H7J below); calcfc is not necessary for this particular species, but it speeds convergence and probably makes it more robust for other species
"# pm3 opt=(verytight,gdiis,maxcyc=200) freq=numerical IOP(2/16=3)", # use numerical frequencies; this takes a relatively long time, so should only be used as one of the last resorts; this seemed to address at least one case of failure for a C6H10JJ species; 7/15/09: maxcyc=200 added to address GVCMURUDAUQXEY-UHFFFAOYAVmult3 (InChI=1/C3H4O7Si/c1-2(9-6)10-11(7,8)3(4)5/h6-7H,1H2/mult3)...however, result was manually pasted in QMfiles folder to speed things along
"# pm3 opt=tight freq IOP(2/16=3)", # this worked for problematic case of SZSSHFMXPBKYPR-UHFFFAOYAF (InChI=1/C7H15O5Si/c1-3-10-13(8,11-4-2)12-7-5-6-9-7/h7H,3-6H2,1-2H3) (otherwise, it had l402.exe errors); corrected log file was manually copied to QMfiles to speed things along; we could also add a freq=numerical version of this keyword combination for added robustness; UPDATE: see below
"# pm3 opt=tight freq=numerical IOP(2/16=3)", # used for problematic case of CIKDVMUGTARZCK-UHFFFAOYAImult4 (InChI=1/C8H15O6Si/c1-4-12-15(10,13-5-2)14-7-6-11-8(7,3)9/h7H,3-6H2,1-2H3/mult4 (most other cases had l402.exe errors); corrected log file was manually copied to QMfiles to speed things along
"# pm3 opt=(tight,nolinear,calcfc,small,maxcyc=200) freq IOP(2/16=3)", # similar to existing #5, but uses tight rather than verytight; used for ADMPQLGIEMRGAT-UHFFFAOYAUmult3 (InChI=1/C6H14O5Si/c1-4-9-12(8,10-5-2)11-6(3)7/h6-7H,3-5H2,1-2H3/mult3)
"# pm3 opt freq IOP(2/16=3)", # use default (not verytight) convergence criteria; use this as last resort
"# pm3 opt=(verytight,gdiis) freq=numerical IOP(2/16=3) IOP(4/21=200)", # to address problematic C10H14JJ case
"# pm3 opt=(calcfc,verytight,newton,notrustupdate,small,maxcyc=100,maxstep=100) freq=(numerical,step=10) IOP(2/16=3) nosymm", # for very troublesome RRMZRNPRCUANER-UHFFFAOYAQ (InChI=1/C5H7/c1-3-5-4-2/h3H,1-2H3) case...there were troubles with negative frequencies, where I don't think they should have been; step size of numerical frequency was adjusted to give positive result; accuracy of result is questionable; it is possible that not all of these keywords are needed; note that for this and other nearly free rotor cases, I think heat capacity will be overestimated by R/2 (R vs. R/2) (but this is a separate issue)
"# pm3 opt=(tight,gdiis,small,maxcyc=200,maxstep=100) freq=numerical IOP(2/16=3) nosymm", # for troublesome QDERTVAGQZYPHT-UHFFFAOYAHmult3(InChI=1/C6H14O4Si/c1-4-8-11(7,9-5-2)10-6-3/h4H,5-6H2,1-3H3/mult3); key aspects appear to be tight (rather than verytight) convergence criteria, no calculation of frequencies during optimization, use of numerical frequencies, and probably also the use of opt=small
"# pm3 opt=(verytight,gdiis,calcall) IOP(2/16=3)", # used for troublesome C5H7J case; note that before fixing, I got errors like the following: "Incomplete coordinate system. Try restarting with Geom=Check Guess=Read Opt=(ReadFC,NewRedundant) Incomplete coordinate system. Error termination via Lnk1e in l103.exe"; we could try to restart, but it is probably preferrable to have each keyword combination standalone; another keyword that may be helpful if additional problematic cases are encountered is opt=small; 6/9/09 note: originally, this had # pm3 opt=(verytight,gdiis,calcall) freq IOP(2/16=3)" (with freq keyword), but I discovered that in this case, there are two thermochemistry sections and cclib parses frequencies twice, giving twice the number of desired frequencies and hence produces incorrect thermo; this turned up on C5H6JJ isomer
"# pm3 opt=(verytight,gdiis,calcall,small,maxcyc=200) IOP(2/16=3) IOP(4/21=2) nosymm", # worked for troublesome ketene case: CCGKOQOJPYTBIH-UHFFFAOYAO (InChI=1/C2H2O/c1-2-3/h1H2) (could just increase number of iterations for similar keyword combination above (#6 at the time of this writing), allowing symmetry, but nosymm seemed to reduce # of iterations; I think one of nosymm or higher number of iterations would allow the similar keyword combination to converge; both are included here for robustness)
"# pm3 opt=(verytight,gdiis,calcall,small) IOP(2/16=3) nosymm", # added for case of ZWMVZWMBTVHPBS-UHFFFAOYAEmult3 (InChI=1/C4H4O2/c1-3-5-6-4-2/h1-2H2/mult3)
"# pm3 opt=(calcall,small,maxcyc=100) IOP(2/16=3)", # used to address troublesome FILUFGAZMJGNEN-UHFFFAOYAImult3 case (InChI=1/C5H6/c1-3-5-4-2/h3H,1H2,2H3/mult3)
]
class GaussianMolPM6(GaussianMol):
"""
Gaussian PM6 calculations for molecules
This is a class of its own in case you wish to do anything differently,
but for now it's only the 'pm6' in the keywords that differs.
"""
#: Keywords that will be added at the top of the qm input file
keywords = [
# The combinations of keywords were derived by Greg Magoon for pm3. For now, we assume similar ones will work for pm6:
"# pm6 opt=(verytight,gdiis) freq IOP(2/16=3)",
"# pm6 opt=(verytight,gdiis) freq IOP(2/16=3) IOP(4/21=2)",
"# pm6 opt=(verytight,calcfc,maxcyc=200) freq IOP(2/16=3) nosymm" ,
"# pm6 opt=(verytight,calcfc,maxcyc=200) freq=numerical IOP(2/16=3) nosymm",
"# pm6 opt=(verytight,gdiis,small) freq IOP(2/16=3)",
"# pm6 opt=(verytight,nolinear,calcfc,small) freq IOP(2/16=3)",
"# pm6 opt=(verytight,gdiis,maxcyc=200) freq=numerical IOP(2/16=3)",
"# pm6 opt=tight freq IOP(2/16=3)",
"# pm6 opt=tight freq=numerical IOP(2/16=3)",
"# pm6 opt=(tight,nolinear,calcfc,small,maxcyc=200) freq IOP(2/16=3)",
"# pm6 opt freq IOP(2/16=3)",
"# pm6 opt=(verytight,gdiis) freq=numerical IOP(2/16=3) IOP(4/21=200)",
"# pm6 opt=(calcfc,verytight,newton,notrustupdate,small,maxcyc=100,maxstep=100) freq=(numerical,step=10) IOP(2/16=3) nosymm",
"# pm6 opt=(tight,gdiis,small,maxcyc=200,maxstep=100) freq=numerical IOP(2/16=3) nosymm",
"# pm6 opt=(verytight,gdiis,calcall) IOP(2/16=3)",
"# pm6 opt=(verytight,gdiis,calcall,small,maxcyc=200) IOP(2/16=3) IOP(4/21=2) nosymm",
"# pm6 opt=(verytight,gdiis,calcall,small) IOP(2/16=3) nosymm",
"# pm6 opt=(calcall,small,maxcyc=100) IOP(2/16=3)",
]
|
KEHANG/RMG-Py
|
rmgpy/qm/gaussian.py
|
Python
|
mit
| 18,505
|
[
"Gaussian",
"MOPAC",
"cclib"
] |
e18f1896394fe65b31c33402a6e885e9681ae5e579b2c193131c83557c7ed5ab
|
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2004, 2006 Sean C. Gillies
# Copyright (c) 2007 STFC <http://www.stfc.ac.uk>
#
# Authors :
# Dominic Lowe <d.lowe@rl.ac.uk>
#
# Contact email: d.lowe@rl.ac.uk
# =============================================================================
##########NOTE: Does not conform to new interfaces yet #################
from wcsBase import WCSBase, WCSCapabilitiesReader, ServiceException
from owslib.util import openURL, testXMLValue
from urllib import urlencode
from urllib2 import urlopen
from owslib.etree import etree
import os, errno
from owslib.coverage import wcsdecoder
from owslib.crs import Crs
import logging
from owslib.util import log
def ns(tag):
return '{http://www.opengis.net/wcs/1.1}'+tag
class WebCoverageService_1_1_0(WCSBase):
"""Abstraction for OGC Web Coverage Service (WCS), version 1.1.0
Implements IWebCoverageService.
"""
def __getitem__(self, name):
''' check contents dictionary to allow dict like access to service layers'''
if name in self.__getattribute__('contents').keys():
return self.__getattribute__('contents')[name]
else:
raise KeyError, "No content named %s" % name
def __init__(self,url,xml, cookies):
self.version='1.1.0'
self.url = url
self.cookies=cookies
# initialize from saved capability document or access the server
reader = WCSCapabilitiesReader(self.version)
if xml:
self._capabilities = reader.readString(xml)
else:
self._capabilities = reader.read(self.url)
# check for exceptions
se = self._capabilities.find('{http://www.opengis.net/ows/1.1}Exception')
if se is not None:
err_message = str(se.text).strip()
raise ServiceException(err_message, xml)
#build metadata objects:
#serviceIdentification metadata
elem=self._capabilities.find('{http://www.opengis.net/wcs/1.1/ows}ServiceIdentification')
if elem is None:
elem=self._capabilities.find('{http://www.opengis.net/ows/1.1}ServiceIdentification')
self.identification=ServiceIdentification(elem)
#serviceProvider
elem=self._capabilities.find('{http://www.opengis.net/ows/1.1}ServiceProvider')
self.provider=ServiceProvider(elem)
#serviceOperations
self.operations = []
for elem in self._capabilities.findall('{http://www.opengis.net/wcs/1.1/ows}OperationsMetadata/{http://www.opengis.net/wcs/1.1/ows}Operation/'):
self.operations.append(Operation(elem))
# exceptions - ***********TO DO *************
self.exceptions = [f.text for f \
in self._capabilities.findall('Capability/Exception/Format')]
# serviceContents: our assumption is that services use a top-level layer
# as a metadata organizer, nothing more.
self.contents = {}
top = self._capabilities.find('{http://www.opengis.net/wcs/1.1}Contents/{http://www.opengis.net/wcs/1.1}CoverageSummary')
for elem in self._capabilities.findall('{http://www.opengis.net/wcs/1.1}Contents/{http://www.opengis.net/wcs/1.1}CoverageSummary/{http://www.opengis.net/wcs/1.1}CoverageSummary'):
cm=ContentMetadata(elem, top, self)
self.contents[cm.id]=cm
if self.contents=={}:
#non-hierarchical.
top=None
for elem in self._capabilities.findall('{http://www.opengis.net/wcs/1.1}Contents/{http://www.opengis.net/wcs/1.1}CoverageSummary'):
cm=ContentMetadata(elem, top, self)
#make the describeCoverage requests to populate the supported formats/crs attributes
self.contents[cm.id]=cm
def items(self):
'''supports dict-like items() access'''
items=[]
for item in self.contents:
items.append((item,self.contents[item]))
return items
#TO DECIDE: Offer repackaging of coverageXML/Multipart MIME output?
#def getData(self, directory='outputdir', outputfile='coverage.nc', **kwargs):
#u=self.getCoverageRequest(**kwargs)
##create the directory if it doesn't exist:
#try:
#os.mkdir(directory)
#except OSError, e:
## Ignore directory exists error
#if e.errno <> errno.EEXIST:
#raise
##elif wcs.version=='1.1.0':
##Could be multipart mime or XML Coverages document, need to use the decoder...
#decoder=wcsdecoder.WCSDecoder(u)
#x=decoder.getCoverages()
#if type(x) is wcsdecoder.MpartMime:
#filenames=x.unpackToDir(directory)
##print 'Files from 1.1.0 service written to %s directory'%(directory)
#else:
#filenames=x
#return filenames
#TO DO: Handle rest of the WCS 1.1.0 keyword parameters e.g. GridCRS etc.
def getCoverage(self, identifier=None, bbox=None, time=None, format = None, store=False, rangesubset=None, gridbaseCRS=None, gridtype=None, gridCS=None, gridorigin=None, gridoffsets=None, method='Get',**kwargs):
"""Request and return a coverage from the WCS as a file-like object
note: additional **kwargs helps with multi-version implementation
core keyword arguments should be supported cross version
example:
cvg=wcs.getCoverageRequest(identifier=['TuMYrRQ4'], time=['2792-06-01T00:00:00.0'], bbox=(-112,36,-106,41),format='application/netcdf', store='true')
is equivalent to:
http://myhost/mywcs?SERVICE=WCS&REQUEST=GetCoverage&IDENTIFIER=TuMYrRQ4&VERSION=1.1.0&BOUNDINGBOX=-180,-90,180,90&TIMESEQUENCE=2792-06-01T00:00:00.0&FORMAT=application/netcdf
if store = true, returns a coverages XML file
if store = false, returns a multipart mime
"""
if log.isEnabledFor(logging.DEBUG):
log.debug('WCS 1.1.0 DEBUG: Parameters passed to GetCoverage: identifier=%s, bbox=%s, time=%s, format=%s, rangesubset=%s, gridbaseCRS=%s, gridtype=%s, gridCS=%s, gridorigin=%s, gridoffsets=%s, method=%s, other_arguments=%s'%(identifier, bbox, time, format, rangesubset, gridbaseCRS, gridtype, gridCS, gridorigin, gridoffsets, method, str(kwargs)))
if method == 'Get':
method='{http://www.opengis.net/wcs/1.1/ows}Get'
base_url = self.getOperationByName('GetCoverage').methods[method]['url']
#process kwargs
request = {'version': self.version, 'request': 'GetCoverage', 'service':'WCS'}
assert len(identifier) > 0
request['identifier']=identifier
#request['identifier'] = ','.join(identifier)
if bbox:
request['boundingbox']=','.join([repr(x) for x in bbox])
if time:
request['timesequence']=','.join(time)
request['format']=format
request['store']=store
#rangesubset: untested - require a server implementation
if rangesubset:
request['RangeSubset']=rangesubset
#GridCRS structure: untested - require a server implementation
if gridbaseCRS:
request['gridbaseCRS']=gridbaseCRS
if gridtype:
request['gridtype']=gridtype
if gridCS:
request['gridCS']=gridCS
if gridorigin:
request['gridorigin']=gridorigin
if gridoffsets:
request['gridoffsets']=gridoffsets
#anything else e.g. vendor specific parameters must go through kwargs
if kwargs:
for kw in kwargs:
request[kw]=kwargs[kw]
#encode and request
data = urlencode(request)
u=openURL(base_url, data, method, self.cookies)
return u
def getOperationByName(self, name):
"""Return a named operation item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError, "No operation named %s" % name
class Operation(object):
"""Abstraction for operation metadata
Implements IOperationMetadata.
"""
def __init__(self, elem):
self.name = elem.get('name')
self.formatOptions = [f.text for f in elem.findall('{http://www.opengis.net/wcs/1.1/ows}Parameter/{http://www.opengis.net/wcs/1.1/ows}AllowedValues/{http://www.opengis.net/wcs/1.1/ows}Value')]
methods = []
for verb in elem.findall('{http://www.opengis.net/wcs/1.1/ows}DCP/{http://www.opengis.net/wcs/1.1/ows}HTTP/*'):
url = verb.attrib['{http://www.w3.org/1999/xlink}href']
methods.append((verb.tag, {'url': url}))
self.methods = dict(methods)
class ServiceIdentification(object):
""" Abstraction for ServiceIdentification Metadata
implements IServiceIdentificationMetadata"""
def __init__(self,elem):
self.service="WCS"
self.version="1.1.0"
self.title=testXMLValue(elem.find('{http://www.opengis.net/ows}Title'))
if self.title is None: #may have used the wcs ows namespace:
self.title=testXMLValue(elem.find('{http://www.opengis.net/wcs/1.1/ows}Title'))
self.abstract=testXMLValue(elem.find('{http://www.opengis.net/ows}Abstract'))
if self.abstract is None:#may have used the wcs ows namespace:
self.abstract=testXMLValue(elem.find('{http://www.opengis.net/wcs/1.1/ows}Abstract'))
if elem.find('{http://www.opengis.net/ows}Abstract') is not None:
self.abstract=elem.find('{http://www.opengis.net/ows}Abstract').text
else:
self.abstract = None
self.keywords = [f.text for f in elem.findall('{http://www.opengis.net/ows}Keywords/{http://www.opengis.net/ows}Keyword')]
#self.link = elem.find('{http://www.opengis.net/wcs/1.1}Service/{http://www.opengis.net/wcs/1.1}OnlineResource').attrib.get('{http://www.w3.org/1999/xlink}href', '')
if elem.find('{http://www.opengis.net/wcs/1.1/ows}Fees') is not None:
self.fees=elem.find('{http://www.opengis.net/wcs/1.1/ows}Fees').text
else:
self.fees=None
if elem.find('{http://www.opengis.net/wcs/1.1/ows}AccessConstraints') is not None:
self.accessConstraints=elem.find('{http://www.opengis.net/wcs/1.1/ows}AccessConstraints').text
else:
self.accessConstraints=None
class ServiceProvider(object):
""" Abstraction for ServiceProvider metadata
implements IServiceProviderMetadata """
def __init__(self,elem):
name=elem.find('{http://www.opengis.net/ows}ProviderName')
if name is not None:
self.name=name.text
else:
self.name=None
#self.contact=ServiceContact(elem.find('{http://www.opengis.net/ows}ServiceContact'))
self.contact =ContactMetadata(elem)
self.url=self.name # no obvious definitive place for url in wcs, repeat provider name?
class ContactMetadata(object):
''' implements IContactMetadata'''
def __init__(self, elem):
try:
self.name = elem.find('{http://www.opengis.net/ows}ServiceContact/{http://www.opengis.net/ows}IndividualName').text
except AttributeError:
self.name = None
try:
self.organization=elem.find('{http://www.opengis.net/ows}ProviderName').text
except AttributeError:
self.organization = None
try:
self.address = elem.find('{http://www.opengis.net/ows}ServiceContact/{http://www.opengis.net/ows}ContactInfo/{http://www.opengis.net/ows}Address/{http://www.opengis.net/ows}DeliveryPoint').text
except AttributeError:
self.address = None
try:
self.city= elem.find('{http://www.opengis.net/ows}ServiceContact/{http://www.opengis.net/ows}ContactInfo/{http://www.opengis.net/ows}Address/{http://www.opengis.net/ows}City').text
except AttributeError:
self.city = None
try:
self.region= elem.find('{http://www.opengis.net/ows}ServiceContact/{http://www.opengis.net/ows}ContactInfo/{http://www.opengis.net/ows}Address/{http://www.opengis.net/ows}AdministrativeArea').text
except AttributeError:
self.region = None
try:
self.postcode= elem.find('{http://www.opengis.net/ows}ServiceContact/{http://www.opengis.net/ows}ContactInfo/{http://www.opengis.net/ows}Address/{http://www.opengis.net/ows}PostalCode').text
except AttributeError:
self.postcode = None
try:
self.country= elem.find('{http://www.opengis.net/ows}ServiceContact/{http://www.opengis.net/ows}ContactInfo/{http://www.opengis.net/ows}Address/{http://www.opengis.net/ows}Country').text
except AttributeError:
self.country = None
try:
self.email = elem.find('{http://www.opengis.net/ows}ServiceContact/{http://www.opengis.net/ows}ContactInfo/{http://www.opengis.net/ows}Address/{http://www.opengis.net/ows}ElectronicMailAddress').text
except AttributeError:
self.email = None
class ContentMetadata(object):
"""Abstraction for WCS ContentMetadata
Implements IContentMetadata
"""
def __init__(self, elem, parent, service):
"""Initialize."""
#TODO - examine the parent for bounding box info.
self._service=service
self._elem=elem
self._parent=parent
self.id=self._checkChildAndParent('{http://www.opengis.net/wcs/1.1}Identifier')
self.description =self._checkChildAndParent('{http://www.opengis.net/wcs/1.1}Description')
self.title =self._checkChildAndParent('{http://www.opengis.net/ows}Title')
self.abstract =self._checkChildAndParent('{http://www.opengis.net/ows}Abstract')
#keywords.
self.keywords=[]
for kw in elem.findall('{http://www.opengis.net/ows}Keywords/{http://www.opengis.net/ows}Keyword'):
if kw is not None:
self.keywords.append(kw.text)
#also inherit any keywords from parent coverage summary (if there is one)
if parent is not None:
for kw in parent.findall('{http://www.opengis.net/ows}Keywords/{http://www.opengis.net/ows}Keyword'):
if kw is not None:
self.keywords.append(kw.text)
self.boundingBox=None #needed for iContentMetadata harmonisation
self.boundingBoxWGS84 = None
b = elem.find('{http://www.opengis.net/ows}WGS84BoundingBox')
if b is not None:
lc=b.find('{http://www.opengis.net/ows}LowerCorner').text
uc=b.find('{http://www.opengis.net/ows}UpperCorner').text
self.boundingBoxWGS84 = (
float(lc.split()[0]),float(lc.split()[1]),
float(uc.split()[0]), float(uc.split()[1]),
)
# bboxes - other CRS
self.boundingboxes = []
for bbox in elem.findall('{http://www.opengis.net/ows}BoundingBox'):
if bbox is not None:
try:
lc=b.find('{http://www.opengis.net/ows}LowerCorner').text
uc=b.find('{http://www.opengis.net/ows}UpperCorner').text
boundingBox = (
float(lc.split()[0]),float(lc.split()[1]),
float(uc.split()[0]), float(uc.split()[1]),
b.attrib['crs'])
self.boundingboxes.append(boundingBox)
except:
pass
#others not used but needed for iContentMetadata harmonisation
self.styles=None
self.crsOptions=None
#SupportedCRS
self.supportedCRS=[]
for crs in elem.findall('{http://www.opengis.net/wcs/1.1}SupportedCRS'):
self.supportedCRS.append(Crs(crs.text))
#SupportedFormats
self.supportedFormats=[]
for format in elem.findall('{http://www.opengis.net/wcs/1.1}SupportedFormat'):
self.supportedFormats.append(format.text)
#grid is either a gml:Grid or a gml:RectifiedGrid if supplied as part of the DescribeCoverage response.
def _getGrid(self):
grid=None
#TODO- convert this to 1.1 from 1.0
#if not hasattr(self, 'descCov'):
#self.descCov=self._service.getDescribeCoverage(self.id)
#gridelem= self.descCov.find(ns('CoverageOffering/')+ns('domainSet/')+ns('spatialDomain/')+'{http://www.opengis.net/gml}RectifiedGrid')
#if gridelem is not None:
#grid=RectifiedGrid(gridelem)
#else:
#gridelem=self.descCov.find(ns('CoverageOffering/')+ns('domainSet/')+ns('spatialDomain/')+'{http://www.opengis.net/gml}Grid')
#grid=Grid(gridelem)
return grid
grid=property(_getGrid, None)
#time limits/postions require a describeCoverage request therefore only resolve when requested
def _getTimeLimits(self):
timelimits=[]
for elem in self._service.getDescribeCoverage(self.id).findall(ns('CoverageDescription/')+ns('Domain/')+ns('TemporalDomain/')+ns('TimePeriod/')):
subelems=elem.getchildren()
timelimits=[subelems[0].text,subelems[1].text]
return timelimits
timelimits=property(_getTimeLimits, None)
#TODO timepositions property
def _getTimePositions(self):
return []
timepositions=property(_getTimePositions, None)
def _checkChildAndParent(self, path):
''' checks child coverage summary, and if item not found checks higher level coverage summary'''
try:
value = self._elem.find(path).text
except:
try:
value = self._parent.find(path).text
except:
value = None
return value
|
rbejar/odrl-ogc-cache-policies
|
owslib/coverage/wcs110.py
|
Python
|
mit
| 18,383
|
[
"NetCDF"
] |
ea79c42365bb5e69e36a176b346f3a37f0d166083d9cfd1f8c66c6d5ef3f301a
|
#!/usr/bin/python
def enum(collection,st):
#just like "enumerate" but you define your own starting position.
#this returns indices RELATIVE TO ORIGINAL LIST
i = st
while i < len(collection):
yield (i,collection[i])
i += 1
def getfirstindex(L,st,value,K):
for pos,t in enum(L,st):
if t[1] > value-K:
return pos #returns first read in the list that CAN contain given bp
return 0
def getlastindex(L,st,value):
for pos,t in enum(L,st):
if t[1] > value:
return pos #returns first read in the list that CANNOT contain given bp
return len(L)
# ^these 2 were inspired by code here: http://stackoverflow.com/questions/946860/using-pythons-list-index-method-on-a-list-of-tuples-or-objects (see answer labeled "10", superperformant)
# function for converting SAM "flags" to bits:
def to_bin(n):
return bin(n)[2:].zfill(11)
def countReadlets(fname, outfname, k, chromosome, stranded, rev):
import pysam
#from datetime import datetime #for debugging
samfile = pysam.Samfile(fname,"rb")
id_start_end = []
cigar = []
maxpos = 0
minpos = 3000000000
for read in samfile.fetch(chromosome):
if to_bin(read.flag)[6] == '1' and not rev:
strand = "-"
elif to_bin(read.flag)[6] == '1' and rev:
strand = "+"
elif to_bin(read.flag)[6] == '0' and not rev:
strand = "+"
else:
strand = "-"
readstart = read.pos+1
readend = read.aend
if len(read.cigar)>1:
currentloc = readstart
for a in range(len(read.cigar)):
if read.cigar[a][0] == 0: #match: count this segment as covered, obviously
id_start_end.append([read.qname, currentloc, currentloc+read.cigar[a][1]-1, strand])
currentloc = currentloc + read.cigar[a][1]
if read.cigar[a][0] == 1: #insertion - not in reference, so our location doesn't move
continue
if read.cigar[a][0] == 2: #deletion - count this segment as covered, since a read overlaps it.
id_start_end.append([read.qname, currentloc, currentloc+read.cigar[a][1]-1, strand])
currentloc = currentloc + read.cigar[a][1]
if read.cigar[a][0] == 3: #skipped region: don't count and move the location
currentloc = currentloc + read.cigar[a][1]
if read.cigar[a][0] > 3: #I don't think the rest of these flags exist in these alignments, but just make sure.
print("WARNING: you have a cigar flag in here that you haven't accounted for.")
else:
id_start_end.append([read.qname, readstart, readend, strand])
if read.aend > maxpos:
maxpos = read.aend
if read.pos+1 < minpos:
minpos = read.pos+1
# sort list by starting points, since we've now introduced extra complication...:
import operator
id_start_end.sort(key=operator.itemgetter(1))
# credit: http://stackoverflow.com/questions/5201191/sorting-a-list-of-lists-in-python
if stranded:
fplus = open(outfname+'_plus', 'w')
fminus = open(outfname+'_minus', 'w')
first = 0
last = 0
for z in xrange(1,minpos):
fplus.write("%s\t%s\n" % (z,0))
fminus.write("%s\t%s\n" % (z,0))
for i in xrange(minpos, maxpos+1):
last = getlastindex(id_start_end,first,i)
first = getfirstindex(id_start_end,first,i,k)
if first == last:
fplus.write("%s\t%s\n" % (i,0))
fminus.write("%s\t%s\n" % (i,0))
continue
overlaps = id_start_end[first:last]
readnames_plus = set()
readnames_minus = set()
for j in xrange(len(overlaps)):
if i>=overlaps[j][1] and i<=overlaps[j][2]:
if overlaps[j][3] == "+":
readnames_plus.add(overlaps[j][0])
else:
readnames_minus.add(overlaps[j][0])
numreads_plus = len(readnames_plus) #count readlets only once per read
numreads_minus = len(readnames_minus) #count readlets only once per read
fplus.write("%s\t%s\n" % (i,numreads_plus))
fminus.write("%s\t%s\n" % (i,numreads_minus))
fplus.close()
fminus.close()
else:
f = open(outfname, 'w')
#g = open('keeptrackofiterations','w') #for debugging
first = 0
last = 0
#npos = 0 #for debugging
for z in xrange(1,minpos):
f.write("%s\t%s\n" % (z,0))
for i in xrange(minpos, maxpos+1):
#for i in xrange(maxpos-2999,maxpos+1): #for debugging
#npos = npos+1 #for debugging
#if npos % 10000 == 0: g.write("Did 10000 "+str(datetime.now())+"\n") #for debugging
last = getlastindex(id_start_end,first,i)
first = getfirstindex(id_start_end,first,i,k)
if first == last:
f.write("%s\t%s\n" % (i,0))
continue
overlaps = id_start_end[first:last]
readnames = set()
for j in xrange(len(overlaps)):
if i>=overlaps[j][1] and i<=overlaps[j][2]:
readnames.add(overlaps[j][0])
numreads = len(readnames) #count readlets only once per read
f.write("%s\t%s\n" % (i,numreads))
f.close()
return None
# get arguments from command line
from optparse import OptionParser
opts = OptionParser()
opts.add_option("--file","-f",type="string",help="input file name (must be .bam)")
opts.add_option("--output","-o",type="string",help="output file name")
opts.add_option("--kmer","-k",type="int",help="kmer length")
opts.add_option("--chrom","-c",type="string",help="chromosome to parse")
opts.add_option("--stranded","-s",type="string",help="stranded or reverse-stranded protocol?",default="FALSE")
options,arguments = opts.parse_args()
if options.stranded == "TRUE":
stranded = True
rev = False
elif options.stranded == "REVERSE":
stranded = True
rev = True
elif options.stranded != "FALSE":
ValueError('stranded must either be TRUE, REVERSE, or FALSE')
else:
stranded = False
rev = False
countReadlets(options.file, options.output, options.kmer, options.chrom, stranded, rev)
|
leekgroup/derfinder
|
countReads.py
|
Python
|
mit
| 6,467
|
[
"pysam"
] |
38d6b3dfa136987b13d0af6c4eae85062c0c53e8077b7dec314e1844ac4cc242
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.sources.tables Contains table classes.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from ...core.basics.table import SmartTable
from ...core.basics.curve import FilterCurve
from ...core.units.parsing import parse_unit as u
from ...core.filter.filter import parse_filter
# -----------------------------------------------------------------
class FWHMTable(FilterCurve):
"""
This function ...
"""
def __init__(self, *args, **kwargs):
"""
This function ...
:param args:
:param kwargs:
"""
# Set properties
kwargs["y_name"] = "FWHM"
kwargs["y_description"] = "FWHM of the PSF"
kwargs["y_unit"] = "arcsec"
# Call the constructor of the base class
super(FWHMTable, self).__init__(*args, **kwargs)
# -----------------------------------------------------------------
def add_fwhm(self, fltr, fwhm):
"""
This function ...
:param fltr:
:param fwhm:
:return:
"""
self.add_point(fltr, fwhm)
# -----------------------------------------------------------------
def fwhm_for_filter(self, fltr):
"""
This function ...
:param fltr:
:return:
"""
return self.value_for_filter(fltr)
# -----------------------------------------------------------------
class GalaxyTable(SmartTable):
"""
This class ...
"""
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param args:
:param kwargs:
"""
# Check
if "filters" in kwargs: from_astropy = False
else: from_astropy = True
# Get properties
if not from_astropy: filters = kwargs.pop("filters")
else: filters = None
# Call the constructor of the base class
super(GalaxyTable, self).__init__(*args, **kwargs)
# Add column info
if not from_astropy:
# Add columns
self.add_column_info("Index", int, None, "index of the extended source in the catalog")
self.add_column_info("Name", str, None, "name of the galaxy")
for fltr in filters:
column_name = str(fltr) + " flux"
self.add_column_info(column_name, float, u("Jy"), str(fltr) + " flux density")
# -----------------------------------------------------------------
def add_galaxy(self, galaxy):
"""
This function ...
:param galaxy:
:return:
"""
# Setup if necessary
if len(self.colnames) == 0: self._setup()
values = []
index = galaxy.index
name = galaxy.name
# Add index and name
values.append(index)
values.append(name)
# Loop over the filters for which we need a flux
for name in self.colnames:
# Skip
if not name.endswith("flux"): continue
# Filter
#fltr = BroadBandFilter(name.split(" flux")[0])
fltr = parse_filter(name.split(" flux")[0])
# Get flux
if galaxy.sed is not None and fltr in galaxy.sed.filters(): flux = galaxy.sed.photometry_for_filter(fltr)
else: flux = None
# Add the flux to the values
values.append(flux)
# Add a row to the table
self.add_row(values)
# -----------------------------------------------------------------
class StarTable(SmartTable):
"""
This class ...
"""
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param args:
:param kwargs:
"""
# Check
if "filters" in kwargs: from_astropy = False
else: from_astropy = True
# Get properties
if not from_astropy: filters = kwargs.pop("filters")
else: filters = None
# Call the constructor of the base class
super(StarTable, self).__init__(*args, **kwargs)
# Add column info
if not from_astropy:
self.add_column_info("Index", int, None, "index of the point source in the catalog")
self.add_column_info("Catalog", str, None, "original catalog")
self.add_column_info("ID", str, None, "ID of the point source in the original catalog")
# Loop over the filters
for fltr in filters:
column_name = str(fltr) + " FWHM"
self.add_column_info(column_name, float, u("arcsec"), str(fltr) + " FWHM")
# Loop over the filters
for fltr in filters:
column_name = str(fltr) + " flux"
self.add_column_info(column_name, float, u("Jy"), str(fltr) + " flux density")
# -----------------------------------------------------------------
def add_star(self, star):
"""
This function ...
:param star:
:return:
"""
if len(self.colnames) == 0: self._setup()
values = []
catalog = star.catalog
id = star.id
# Add index, catalog and ID
values.append(star.index)
values.append(catalog)
values.append(id)
# Loop over the filters for which we need a FWHM
for name in self.colnames:
if name == "Index": continue
if name == "Catalog": continue
if name == "ID": continue
# FWHM
if name.endswith("FWHM"):
filter_name = name.split(" FWHM")[0]
# Filter
fltr = parse_filter(filter_name)
#filter_name = str(fltr)
#print(star.fwhms)
if star.fwhms.has_filter(fltr): fwhm = star.fwhms.fwhm_for_filter(fltr)
#if filter_name in star.fwhms: fwhm = star.fwhms[filter_name]
else: fwhm = None
values.append(fwhm)
# Flux
elif name.endswith("flux"):
# Filter
#fltr = BroadBandFilter(name.split(" flux")[0])
fltr = parse_filter(name.split(" flux")[0])
#print(star.sed)
#print(fltr)
# Get flux
flux = star.sed.photometry_for_filter(fltr)
# Add the flux to the values
values.append(flux)
# Unknown
else: raise ValueError("Don't know what value to fill in for column '" + name + "'")
# Add the row
self.add_row(values)
# -----------------------------------------------------------------
|
SKIRT/PTS
|
magic/sources/tables.py
|
Python
|
agpl-3.0
| 7,113
|
[
"Galaxy"
] |
65cd9ec5a328bd236f4ada9e8a24d467d9b0a8e41186b85c25847ef84e31b822
|
# -*- coding: utf-8 -*-
"""
celery.datastructures
~~~~~~~~~~~~~~~~~~~~~
Custom types and data structures.
"""
from __future__ import absolute_import, print_function, unicode_literals
import sys
import time
from collections import defaultdict, Mapping, MutableMapping, MutableSet
from heapq import heapify, heappush, heappop
from functools import partial
from itertools import chain
from billiard.einfo import ExceptionInfo # noqa
from kombu.utils.encoding import safe_str
from kombu.utils.limits import TokenBucket # noqa
from celery.five import items
from celery.utils.functional import LRUCache, first, uniq # noqa
DOT_HEAD = """
{IN}{type} {id} {{
{INp}graph [{attrs}]
"""
DOT_ATTR = '{name}={value}'
DOT_NODE = '{INp}"{0}" [{attrs}]'
DOT_EDGE = '{INp}"{0}" {dir} "{1}" [{attrs}]'
DOT_ATTRSEP = ', '
DOT_DIRS = {'graph': '--', 'digraph': '->'}
DOT_TAIL = '{IN}}}'
__all__ = ['GraphFormatter', 'CycleError', 'DependencyGraph',
'AttributeDictMixin', 'AttributeDict', 'DictAttribute',
'ConfigurationView', 'LimitedSet']
class GraphFormatter(object):
_attr = DOT_ATTR.strip()
_node = DOT_NODE.strip()
_edge = DOT_EDGE.strip()
_head = DOT_HEAD.strip()
_tail = DOT_TAIL.strip()
_attrsep = DOT_ATTRSEP
_dirs = dict(DOT_DIRS)
scheme = {
'shape': 'box',
'arrowhead': 'vee',
'style': 'filled',
'fontname': 'HelveticaNeue',
}
edge_scheme = {
'color': 'darkseagreen4',
'arrowcolor': 'black',
'arrowsize': 0.7,
}
node_scheme = {'fillcolor': 'palegreen3', 'color': 'palegreen4'}
term_scheme = {'fillcolor': 'palegreen1', 'color': 'palegreen2'}
graph_scheme = {'bgcolor': 'mintcream'}
def __init__(self, root=None, type=None, id=None,
indent=0, inw=' ' * 4, **scheme):
self.id = id or 'dependencies'
self.root = root
self.type = type or 'digraph'
self.direction = self._dirs[self.type]
self.IN = inw * (indent or 0)
self.INp = self.IN + inw
self.scheme = dict(self.scheme, **scheme)
self.graph_scheme = dict(self.graph_scheme, root=self.label(self.root))
def attr(self, name, value):
value = '"{0}"'.format(value)
return self.FMT(self._attr, name=name, value=value)
def attrs(self, d, scheme=None):
d = dict(self.scheme, **dict(scheme, **d or {}) if scheme else d)
return self._attrsep.join(
safe_str(self.attr(k, v)) for k, v in items(d)
)
def head(self, **attrs):
return self.FMT(
self._head, id=self.id, type=self.type,
attrs=self.attrs(attrs, self.graph_scheme),
)
def tail(self):
return self.FMT(self._tail)
def label(self, obj):
return obj
def node(self, obj, **attrs):
return self.draw_node(obj, self.node_scheme, attrs)
def terminal_node(self, obj, **attrs):
return self.draw_node(obj, self.term_scheme, attrs)
def edge(self, a, b, **attrs):
return self.draw_edge(a, b, **attrs)
def _enc(self, s):
return s.encode('utf-8', 'ignore')
def FMT(self, fmt, *args, **kwargs):
return self._enc(fmt.format(
*args, **dict(kwargs, IN=self.IN, INp=self.INp)
))
def draw_edge(self, a, b, scheme=None, attrs=None):
return self.FMT(
self._edge, self.label(a), self.label(b),
dir=self.direction, attrs=self.attrs(attrs, self.edge_scheme),
)
def draw_node(self, obj, scheme=None, attrs=None):
return self.FMT(
self._node, self.label(obj), attrs=self.attrs(attrs, scheme),
)
class CycleError(Exception):
"""A cycle was detected in an acyclic graph."""
class DependencyGraph(object):
"""A directed acyclic graph of objects and their dependencies.
Supports a robust topological sort
to detect the order in which they must be handled.
Takes an optional iterator of ``(obj, dependencies)``
tuples to build the graph from.
.. warning::
Does not support cycle detection.
"""
def __init__(self, it=None, formatter=None):
self.formatter = formatter or GraphFormatter()
self.adjacent = {}
if it is not None:
self.update(it)
def add_arc(self, obj):
"""Add an object to the graph."""
self.adjacent.setdefault(obj, [])
def add_edge(self, A, B):
"""Add an edge from object ``A`` to object ``B``
(``A`` depends on ``B``)."""
self[A].append(B)
def connect(self, graph):
"""Add nodes from another graph."""
self.adjacent.update(graph.adjacent)
def topsort(self):
"""Sort the graph topologically.
:returns: a list of objects in the order
in which they must be handled.
"""
graph = DependencyGraph()
components = self._tarjan72()
NC = dict((node, component)
for component in components
for node in component)
for component in components:
graph.add_arc(component)
for node in self:
node_c = NC[node]
for successor in self[node]:
successor_c = NC[successor]
if node_c != successor_c:
graph.add_edge(node_c, successor_c)
return [t[0] for t in graph._khan62()]
def valency_of(self, obj):
"""Return the valency (degree) of a vertex in the graph."""
try:
l = [len(self[obj])]
except KeyError:
return 0
for node in self[obj]:
l.append(self.valency_of(node))
return sum(l)
def update(self, it):
"""Update the graph with data from a list
of ``(obj, dependencies)`` tuples."""
tups = list(it)
for obj, _ in tups:
self.add_arc(obj)
for obj, deps in tups:
for dep in deps:
self.add_edge(obj, dep)
def edges(self):
"""Return generator that yields for all edges in the graph."""
return (obj for obj, adj in items(self) if adj)
def _khan62(self):
"""Khans simple topological sort algorithm from '62
See http://en.wikipedia.org/wiki/Topological_sorting
"""
count = defaultdict(lambda: 0)
result = []
for node in self:
for successor in self[node]:
count[successor] += 1
ready = [node for node in self if not count[node]]
while ready:
node = ready.pop()
result.append(node)
for successor in self[node]:
count[successor] -= 1
if count[successor] == 0:
ready.append(successor)
result.reverse()
return result
def _tarjan72(self):
"""Tarjan's algorithm to find strongly connected components.
See http://bit.ly/vIMv3h.
"""
result, stack, low = [], [], {}
def visit(node):
if node in low:
return
num = len(low)
low[node] = num
stack_pos = len(stack)
stack.append(node)
for successor in self[node]:
visit(successor)
low[node] = min(low[node], low[successor])
if num == low[node]:
component = tuple(stack[stack_pos:])
stack[stack_pos:] = []
result.append(component)
for item in component:
low[item] = len(self)
for node in self:
visit(node)
return result
def to_dot(self, fh, formatter=None):
"""Convert the graph to DOT format.
:param fh: A file, or a file-like object to write the graph to.
"""
seen = set()
draw = formatter or self.formatter
P = partial(print, file=fh)
def if_not_seen(fun, obj):
if draw.label(obj) not in seen:
P(fun(obj))
seen.add(draw.label(obj))
P(draw.head())
for obj, adjacent in items(self):
if not adjacent:
if_not_seen(draw.terminal_node, obj)
for req in adjacent:
if_not_seen(draw.node, obj)
P(draw.edge(obj, req))
P(draw.tail())
def format(self, obj):
return self.formatter(obj) if self.formatter else obj
def __iter__(self):
return iter(self.adjacent)
def __getitem__(self, node):
return self.adjacent[node]
def __len__(self):
return len(self.adjacent)
def __contains__(self, obj):
return obj in self.adjacent
def _iterate_items(self):
return items(self.adjacent)
items = iteritems = _iterate_items
def __repr__(self):
return '\n'.join(self.repr_node(N) for N in self)
def repr_node(self, obj, level=1, fmt='{0}({1})'):
output = [fmt.format(obj, self.valency_of(obj))]
if obj in self:
for other in self[obj]:
d = fmt.format(other, self.valency_of(other))
output.append(' ' * level + d)
output.extend(self.repr_node(other, level + 1).split('\n')[1:])
return '\n'.join(output)
class AttributeDictMixin(object):
"""Augment classes with a Mapping interface by adding attribute access.
I.e. `d.key -> d[key]`.
"""
def __getattr__(self, k):
"""`d.key -> d[key]`"""
try:
return self[k]
except KeyError:
raise AttributeError(
'{0!r} object has no attribute {1!r}'.format(
type(self).__name__, k))
def __setattr__(self, key, value):
"""`d[key] = value -> d.key = value`"""
self[key] = value
class AttributeDict(dict, AttributeDictMixin):
"""Dict subclass with attribute access."""
pass
class DictAttribute(object):
"""Dict interface to attributes.
`obj[k] -> obj.k`
`obj[k] = val -> obj.k = val`
"""
obj = None
def __init__(self, obj):
object.__setattr__(self, 'obj', obj)
def __getattr__(self, key):
return getattr(self.obj, key)
def __setattr__(self, key, value):
return setattr(self.obj, key, value)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default):
try:
return self[key]
except KeyError:
self[key] = default
return default
def __getitem__(self, key):
try:
return getattr(self.obj, key)
except AttributeError:
raise KeyError(key)
def __setitem__(self, key, value):
setattr(self.obj, key, value)
def __contains__(self, key):
return hasattr(self.obj, key)
def _iterate_keys(self):
return iter(dir(self.obj))
iterkeys = _iterate_keys
def __iter__(self):
return self._iterate_keys()
def _iterate_items(self):
for key in self._iterate_keys():
yield key, getattr(self.obj, key)
iteritems = _iterate_items
def _iterate_values(self):
for key in self._iterate_keys():
yield getattr(self.obj, key)
itervalues = _iterate_values
if sys.version_info[0] == 3: # pragma: no cover
items = _iterate_items
keys = _iterate_keys
values = _iterate_values
else:
def keys(self):
return list(self)
def items(self):
return list(self._iterate_items())
def values(self):
return list(self._iterate_values())
MutableMapping.register(DictAttribute)
class ConfigurationView(AttributeDictMixin):
"""A view over an applications configuration dicts.
Custom (but older) version of :class:`collections.ChainMap`.
If the key does not exist in ``changes``, the ``defaults`` dicts
are consulted.
:param changes: Dict containing changes to the configuration.
:param defaults: List of dicts containing the default configuration.
"""
changes = None
defaults = None
_order = None
def __init__(self, changes, defaults):
self.__dict__.update(changes=changes, defaults=defaults,
_order=[changes] + defaults)
def add_defaults(self, d):
if not isinstance(d, Mapping):
d = DictAttribute(d)
self.defaults.insert(0, d)
self._order.insert(1, d)
def __getitem__(self, key):
for d in self._order:
try:
return d[key]
except KeyError:
pass
raise KeyError(key)
def __setitem__(self, key, value):
self.changes[key] = value
def first(self, *keys):
return first(None, (self.get(key) for key in keys))
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def clear(self):
"""Remove all changes, but keep defaults."""
self.changes.clear()
def setdefault(self, key, default):
try:
return self[key]
except KeyError:
self[key] = default
return default
def update(self, *args, **kwargs):
return self.changes.update(*args, **kwargs)
def __contains__(self, key):
return any(key in m for m in self._order)
def __bool__(self):
return any(self._order)
__nonzero__ = __bool__ # Py2
def __repr__(self):
return repr(dict(items(self)))
def __iter__(self):
return self._iterate_keys()
def __len__(self):
# The logic for iterating keys includes uniq(),
# so to be safe we count by explicitly iterating
return len(set().union(*self._order))
def _iter(self, op):
# defaults must be first in the stream, so values in
# changes takes precedence.
return chain(*[op(d) for d in reversed(self._order)])
def _iterate_keys(self):
return uniq(self._iter(lambda d: d))
iterkeys = _iterate_keys
def _iterate_items(self):
return ((key, self[key]) for key in self)
iteritems = _iterate_items
def _iterate_values(self):
return (self[key] for key in self)
itervalues = _iterate_values
if sys.version_info[0] == 3: # pragma: no cover
keys = _iterate_keys
items = _iterate_items
values = _iterate_values
else: # noqa
def keys(self):
return list(self._iterate_keys())
def items(self):
return list(self._iterate_items())
def values(self):
return list(self._iterate_values())
MutableMapping.register(ConfigurationView)
class LimitedSet(object):
"""Kind-of Set with limitations.
Good for when you need to test for membership (`a in set`),
but the list might become to big.
:keyword maxlen: Maximum number of members before we start
evicting expired members.
:keyword expires: Time in seconds, before a membership expires.
"""
def __init__(self, maxlen=None, expires=None, data=None, heap=None):
self.maxlen = maxlen
self.expires = expires
self._data = {} if data is None else data
self._heap = [] if heap is None else heap
# make shortcuts
self.__len__ = self._heap.__len__
self.__iter__ = self._heap.__iter__
self.__contains__ = self._data.__contains__
def add(self, value, now=time.time):
"""Add a new member."""
# offset is there to modify the length of the list,
# this way we can expire an item before inserting the value,
# and it will end up in correct order.
self.purge(1, offset=1)
inserted = now()
self._data[value] = inserted
heappush(self._heap, (inserted, value))
def clear(self):
"""Remove all members"""
self._data.clear()
self._heap[:] = []
def discard(self, value):
"""Remove membership by finding value."""
try:
itime = self._data[value]
except KeyError:
return
try:
self._heap.remove((value, itime))
except ValueError:
pass
self._data.pop(value, None)
pop_value = discard # XXX compat
def purge(self, limit=None, offset=0, now=time.time):
"""Purge expired items."""
H, maxlen = self._heap, self.maxlen
if not maxlen:
return
# If the data/heap gets corrupted and limit is None
# this will go into an infinite loop, so limit must
# have a value to guard the loop.
limit = len(self) + offset if limit is None else limit
i = 0
while len(self) + offset > maxlen:
if i >= limit:
break
try:
item = heappop(H)
except IndexError:
break
if self.expires:
if now() < item[0] + self.expires:
heappush(H, item)
break
try:
self._data.pop(item[1])
except KeyError: # out of sync with heap
pass
i += 1
def update(self, other, heappush=heappush):
if isinstance(other, LimitedSet):
self._data.update(other._data)
self._heap.extend(other._heap)
heapify(self._heap)
else:
for obj in other:
self.add(obj)
def as_dict(self):
return self._data
def __eq__(self, other):
return self._heap == other._heap
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return 'LimitedSet({0})'.format(len(self))
def __iter__(self):
return (item[1] for item in self._heap)
def __len__(self):
return len(self._heap)
def __contains__(self, key):
return key in self._data
def __reduce__(self):
return self.__class__, (
self.maxlen, self.expires, self._data, self._heap,
)
MutableSet.register(LimitedSet)
|
sivaprakashniet/push_pull
|
p2p/lib/python2.7/site-packages/celery/datastructures.py
|
Python
|
bsd-3-clause
| 18,310
|
[
"VisIt"
] |
031696865d814c7fc62c02d09b303d2dd26c6fb850b15b5aadaae3614d219703
|
"""
Learning functions for Projections.
For example, CFProjectionLearningFunctions compute a new set of
ConnectionFields when given an input and output pattern and a set of
ConnectionField objects.
"""
import numpy as np
import param
from topo.base.cf import CFPLearningFn
from topo.base.sheet import activity_type
from topo.base.functionfamily import Hebbian,LearningFn
# Imported here so that all ProjectionLearningFns will be in the same package
from topo.base.cf import CFPLF_Identity,CFPLF_Plugin # pyflakes:ignore (API import)
class CFPLF_EuclideanHebbian(CFPLearningFn):
"""
Hebbian CFProjection learning rule based on Euclidean distance.
Learning is driven by the distance from the input pattern to the
weights, scaled by the current activity. To implement a Kohonen
SOM algorithm, the activity should be the neighborhood kernel
centered around the winning unit, as implemented by KernelMax.
"""
# CEBERRORALERT: ignoring the sheet mask
def __call__(self, iterator, input_activity, output_activity, learning_rate, **params):
# This learning function does not need to scale the learning
# rate like some do, so it does not use constant_sum_connection_rate()
cfs = iterator.flatcfs
rows,cols = output_activity.shape
for r in xrange(rows):
for c in xrange(cols):
flati = r*cols+c
out = output_activity.flat[flati]
if out !=0:
rate = learning_rate * out
cf = cfs[flati]
X = cf.get_input_matrix(input_activity)
cf.weights += rate * (X - cf.weights)
# CEBHACKALERT: see ConnectionField.__init__()
cf.weights *= cf.mask
#### JABHACKALERT: Untested
##class CFPLF_BCM(CFPLearningFn):
## """
## Bienenstock, Cooper, and Munro (1982) learning rule with sliding threshold.
##
## (See Dayan and Abbott, 2001, equation 8.12, 8.13).
##
## Activities change only when there is both pre- and post-synaptic activity.
## Threshold is adjusted based on recent firing rates.
## """
## single_cf_fn = param.ClassSelector(LearningFn,default=BCMFixed())
##
## unit_threshold_0=param.Number(default=0.5,bounds=(0,None),
## doc="Initial value of threshold between LTD and LTP; actual value computed based on recent history.")
## unit_threshold_learning_rate=param.Number(default=0.1,bounds=(0,None),
## doc="Amount by which the unit_threshold is adjusted for each activity calculation.")
##
## def __call__(self, iterator, input_activity, output_activity, learning_rate, **params):
## cfs = iterator.proj._cfs
## # Initialize thresholds the first time we learn the size of the output_activity.
## if not hasattr(self,'unit_thresholds'):
## self.unit_thresholds=np.ones(output_activity.shape, dtype=np.float32)*self.unit_threshold_0
##
## rows,cols = output_activity.shape
##
## # JABALERT: Is this correct?
## single_connection_learning_rate = self.constant_sum_connection_rate(iterator.proj_n_units,learning_rate)
##
## # avoid evaluating these references each time in the loop
## single_cf_fn = self.single_cf_fn
## for r in xrange(rows):
## for c in xrange(cols):
## cf = cfs[r][c]
## input_act = cf.get_input_matrix(input_activity)
## unit_activity = output_activity[r,c]
## threshold=self.unit_thresholds[r,c]
## #print cf.weights, type(cf.weights)
## #print input_act, type(input_act)
## #print single_connection_learning_rate,unit_activity,threshold, (unit_activity-threshold)
## cf.weights += (single_connection_learning_rate * unit_activity * (unit_activity-threshold)) * input_act
## self.unit_thresholds[r,c] += self.unit_threshold_learning_rate*(unit_activity*unit_activity-threshold)
##
## # CEBHACKALERT: see ConnectionField.__init__()
## cf.weights *= cf.mask
class CFPLF_Trace(CFPLearningFn):
"""
LearningFn that incorporates a trace of recent activity,
not just the current activity.
Based on P. Foldiak (1991), "Learning Invariance from
Transformation Sequences", Neural Computation 3:194-200. Also see
Sutton and Barto (1981) and Wallis and Rolls (1997).
Incorporates a decay term to keep the weight vector bounded, and
so it does not normally require any output_fn normalization for
stability.
NOT YET TESTED.
"""
trace_strength=param.Number(default=0.5,bounds=(0.0,1.0),
doc="How much the learning is dominated by the activity trace, relative to the current value.")
single_cf_fn = param.ClassSelector(LearningFn,default=Hebbian(),
doc="LearningFn that will be applied to each CF individually.")
def __call__(self, iterator, input_activity, output_activity, learning_rate, **params):
single_connection_learning_rate = self.constant_sum_connection_rate(iterator.proj_n_units,learning_rate)
##Initialise traces to zero if they don't already exist
if not hasattr(self,'traces'):
self.traces=np.zeros(output_activity.shape,activity_type)
for cf,i in iterator():
unit_activity = output_activity.flat[i]
# print "unit activity is",unit_activity
# print "self trace is",self.traces[r,c]
new_trace = (self.trace_strength*unit_activity)+((1-self.trace_strength)*self.traces.flat[i])
# print "and is now",new_trace
self.traces.flat[i] = new_trace
cf.weights += single_connection_learning_rate * new_trace * \
(cf.get_input_matrix(input_activity) - cf.weights)
#CEBHACKALERT: see ConnectionField.__init__()
cf.weights *= cf.mask
class CFPLF_OutstarHebbian(CFPLearningFn):
"""
CFPLearningFunction applying the specified (default is Hebbian)
single_cf_fn to each CF, where normalization is done in an outstar-manner.
Presumably does not need a separate output_fn for normalization.
NOT YET TESTED.
"""
single_cf_fn = param.ClassSelector(LearningFn,default=Hebbian(),
doc="LearningFn that will be applied to each CF individually.")
outstar_wsum = None
def __call__(self, iterator, input_activity, output_activity, learning_rate, **params):
single_connection_learning_rate = self.constant_sum_connection_rate(iterator.proj_n_units,learning_rate)
# avoid evaluating these references each time in the loop
single_cf_fn = self.single_cf_fn
outstar_wsum = np.zeros(input_activity.shape)
for cf,i in iterator():
single_cf_fn(cf.get_input_matrix(input_activity),
output_activity.flat[i], cf.weights, single_connection_learning_rate)
# Outstar normalization
wrows,wcols = cf.weights.shape
for wr in xrange(wrows):
for wc in xrange(wcols):
outstar_wsum[wr][wc] += cf.weights[wr][wc]
# CEBHACKALERT: see ConnectionField.__init__()
cf.weights *= cf.mask
class HomeoSynaptic(CFPLearningFn):
"""
Learning function using homeostatic synaptic scaling from
Sullivan & de Sa, "Homeostatic Synaptic Scaling in Self-Organizing Maps",
Neural Networks (2006), 19(6-7):734-43.
Does not necessarily require output_fn normalization for stability.
"""
single_cf_fn = param.ClassSelector(LearningFn,default=Hebbian(),
doc="LearningFn that will be applied to each CF individually")
beta_n = param.Number(default=0.01,bounds=(0,None),
doc="homeostatic learning rate")
beta_c = param.Number(default=0.005,bounds=(0,None),
doc="time window over which the neuron's firing rate is averaged")
activity_target = param.Number(default=0.1,bounds=(0,None),
doc="Target average activity")
#debug = param.Boolean(default=False,doc="Print average activity values")
#beta_n = param.Number(default=0.00033,bounds=(0,None),doc="Homeostatic learning rate") #Too small?
#beta_c = param.Number(default=0.000033,bounds=(0,None),doc="Time window over which the neuron's firing rate is averaged")
def __init__(self,**params):
super(HomeoSynaptic,self).__init__(**params)
self.temp_hist = []
self.ave_hist = []
def __call__(self, iterator, input_activity, output_activity, learning_rate, **params):
"""
Update the value of the given weights matrix based on the
input_activity matrix (of the same size as the weights matrix)
and the response of this unit (the unit_activity), governed by
a per-connection learning rate.
"""
if not hasattr(self,'averages'):
self.averages = np.ones(output_activity.shape, dtype=np.float) * 0.1
# normalize initial weights to 1.0
for cf,i in iterator():
current_norm_value = 1.0*np.sum(abs(cf.weights.ravel()))
if current_norm_value != 0:
factor = (1.0/current_norm_value)
cf.weights *= factor
# compute recent average of output activity
self.averages = self.beta_c * output_activity + (1.0-self.beta_c) * self.averages
activity_norm = 1.0 + self.beta_n * \
((self.averages - self.activity_target)/self.activity_target)
single_connection_learning_rate = self.constant_sum_connection_rate(iterator.proj_n_units,learning_rate)
# avoid evaluating these references each time in the loop
single_cf_fn = self.single_cf_fn
for cf,i in iterator():
single_cf_fn(cf.get_input_matrix(input_activity),
output_activity.flat[i], cf.weights, single_connection_learning_rate)
# homeostatic normalization
cf.weights /= activity_norm.flat[i]
# CEBHACKALERT: see ConnectionField.__init__()
cf.weights *= cf.mask
# For analysis only; can be removed (in which case also remove the initializations above)
# CEBALERT: I changed [0][7] to [0]!
self.ave_hist.append(self.averages.flat[0])
self.temp_hist.append (np.sum(abs(iterator.flatcfs[0].weights.ravel())))
class CFPLF_PluginScaled(CFPLearningFn):
"""
CFPLearningFunction applying the specified single_cf_fn to each CF.
Scales the single-connection learning rate by a scaling factor
that is different for each individual unit. Thus each individual
connection field uses a different learning rate.
"""
single_cf_fn = param.ClassSelector(LearningFn,default=Hebbian(),
doc="Accepts a LearningFn that will be applied to each CF individually.")
learning_rate_scaling_factor = param.Parameter(default=None,
doc="Matrix of scaling factors for scaling the learning rate of each CF individually.")
def __call__(self, iterator, input_activity, output_activity, learning_rate, **params):
"""Apply the specified single_cf_fn to every CF."""
if self.learning_rate_scaling_factor is None:
self.learning_rate_scaling_factor = np.ones(output_activity.shape)
single_cf_fn = self.single_cf_fn
single_connection_learning_rate = self.constant_sum_connection_rate(iterator.proj_n_units,learning_rate)
for cf,i in iterator():
sc_learning_rate = self.learning_rate_scaling_factor.flat[i] * single_connection_learning_rate
single_cf_fn(cf.get_input_matrix(input_activity),
output_activity.flat[i], cf.weights, sc_learning_rate)
# CEBHACKALERT: see ConnectionField.__init__() re. mask & output fn
cf.weights *= cf.mask
def update_scaling_factor(self, new_scaling_factor):
"""Update the single-connection learning rate scaling factor."""
self.learning_rate_scaling_factor = new_scaling_factor
__all__ = [
"CFPLF_Identity",
"CFPLF_Plugin",
"CFPLF_EuclideanHebbian",
"CFPLF_Trace",
"CFPLF_OutstarHebbian",
"HomeoSynaptic",
"CFPLF_PluginScaled",
]
|
Tasignotas/topographica_mirror
|
topo/learningfn/projfn.py
|
Python
|
bsd-3-clause
| 12,281
|
[
"NEURON"
] |
6971e3751a5e3557455dcf23b51ac94d951d2343641c25385885b734284bb055
|
#!/usr/bin/env python
##########################################################################
#
# QGIS-meshing plugins.
#
# Copyright (C) 2012-2013 Imperial College London and others.
#
# Please see the AUTHORS file in the main source directory for a
# full list of copyright holders.
#
# Dr Adam S. Candy, adam.candy@imperial.ac.uk
# Applied Modelling and Computation Group
# Department of Earth Science and Engineering
# Imperial College London
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation,
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
##########################################################################
import numpy
import argparse
import os
import math
from Scientific.IO import NetCDF
def main():
parser = argparse.ArgumentParser(
prog="gaussian_bump",
description="""Create a Gaussian bump in a netcdf file"""
)
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help="Verbose output: mainly progress reports.",
default=False
)
parser.add_argument(
'-d',
'--domain',
help="Domain size. Defualt is 1000x1000m",
default=1000.0,
type=float
)
parser.add_argument(
'-b',
'--bumpheight',
help="Distance between seabed and top of bump. Default is 100m",
default=100,
type=float
)
parser.add_argument(
'-r',
'--resolution',
help="Resolution of output netcdf file. Default is 10m",
default=10.0,
type=float
)
parser.add_argument(
'--shift',
help="Shift the bump in the 'north-south' direction, wrapping along the top/bottom",
default = 0,
type=float
)
parser.add_argument(
'--spread',
help="Spread of Gaussian",
default = 100.0,
type=float
)
parser.add_argument(
'output_file',
metavar='output_file',
nargs=1,
help='The output netcdf file'
)
args = parser.parse_args()
verbose = args.verbose
output_file = args.output_file[0]
domain_size = args.domain
bump_height = args.bumpheight
resolution = args.resolution
shift = args.shift
spread = args.spread
nPoints = int(domain_size / resolution)
shift = int(shift/resolution)
if (verbose):
print nPoints, shift
# generate regular grid
X, Y = numpy.meshgrid(numpy.linspace(0.0, domain_size, nPoints), numpy.linspace(0.0, domain_size, nPoints))
Z = numpy.zeros((nPoints,nPoints))
#for each point calculate the Gaussian
centre = domain_size/2.0
for i in range(0,len(X)):
for j in range(0,len(X[0])):
r = ((X[i][j]-centre)**2/(2.0*spread**2) + (Y[i][j]-centre)**2/(2.0*spread**2))
Z[i][j] = bump_height * math.exp(-1.0*r)
if (not shift == 0.0):
Z = numpy.roll(Z, shift, 0)
f = NetCDF.NetCDFFile(output_file, 'w')
xDim = f.createDimension("X", nPoints)
yDim = f.createDimension("Y", nPoints)
x = f.createVariable("X","d",("X",))
y = f.createVariable("Y","d",("Y",))
zVar = f.createVariable("Z","d",("X","Y"))
x.assignValue(X[0,0:nPoints])
y.assignValue(Y[0:nPoints,0])
zVar.assignValue(Z)
f.close()
os.system('grdreformat '+output_file+' '+output_file)
os.system('rm -f 1_contour.* 50_contour.*')
os.system('gdal_contour -fl 1.0 NETCDF:"'+output_file+'":z 1_contour.shp')
os.system('gdal_contour -fl 50.0 NETCDF:"'+output_file+'":z 50_contour.shp')
if __name__ == "__main__":
main()
|
adamcandy/qgis-plugins-meshing
|
dev/tests/gaussian_bump.py
|
Python
|
lgpl-2.1
| 4,413
|
[
"Gaussian",
"NetCDF"
] |
d14449e105b51f8978061b1cd1749f48c73ea8aff98f467355bfee38cecd41a0
|
"""Module symbol-table generator"""
from compiler import ast
from compiler.consts import SC_LOCAL, SC_GLOBAL, SC_FREE, SC_CELL, SC_UNKNOWN
from compiler.misc import mangle
import types
import sys
MANGLE_LEN = 256
class Scope:
# XXX how much information do I need about each name?
def __init__(self, name, module, klass=None):
self.name = name
self.module = module
self.defs = {}
self.uses = {}
self.globals = {}
self.params = {}
self.frees = {}
self.cells = {}
self.children = []
# nested is true if the class could contain free variables,
# i.e. if it is nested within another function.
self.nested = None
self.generator = None
self.klass = None
if klass is not None:
for i in range(len(klass)):
if klass[i] != '_':
self.klass = klass[i:]
break
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.name)
def mangle(self, name):
if self.klass is None:
return name
return mangle(name, self.klass)
def add_def(self, name):
self.defs[self.mangle(name)] = 1
def add_use(self, name):
self.uses[self.mangle(name)] = 1
def add_global(self, name):
name = self.mangle(name)
if self.uses.has_key(name) or self.defs.has_key(name):
pass # XXX warn about global following def/use
if self.params.has_key(name):
raise SyntaxError, "%s in %s is global and parameter" % \
(name, self.name)
self.globals[name] = 1
self.module.add_def(name)
def add_param(self, name):
name = self.mangle(name)
self.defs[name] = 1
self.params[name] = 1
def get_names(self):
d = {}
d.update(self.defs)
d.update(self.uses)
d.update(self.globals)
return d.keys()
def add_child(self, child):
self.children.append(child)
def get_children(self):
return self.children
def DEBUG(self):
print >> sys.stderr, self.name, self.nested and "nested" or ""
print >> sys.stderr, "\tglobals: ", self.globals
print >> sys.stderr, "\tcells: ", self.cells
print >> sys.stderr, "\tdefs: ", self.defs
print >> sys.stderr, "\tuses: ", self.uses
print >> sys.stderr, "\tfrees:", self.frees
def check_name(self, name):
"""Return scope of name.
The scope of a name could be LOCAL, GLOBAL, FREE, or CELL.
"""
if self.globals.has_key(name):
return SC_GLOBAL
if self.cells.has_key(name):
return SC_CELL
if self.defs.has_key(name):
return SC_LOCAL
if self.nested and (self.frees.has_key(name) or
self.uses.has_key(name)):
return SC_FREE
if self.nested:
return SC_UNKNOWN
else:
return SC_GLOBAL
def get_free_vars(self):
if not self.nested:
return ()
free = {}
free.update(self.frees)
for name in self.uses.keys():
if not (self.defs.has_key(name) or
self.globals.has_key(name)):
free[name] = 1
return free.keys()
def handle_children(self):
for child in self.children:
frees = child.get_free_vars()
globals = self.add_frees(frees)
for name in globals:
child.force_global(name)
def force_global(self, name):
"""Force name to be global in scope.
Some child of the current node had a free reference to name.
When the child was processed, it was labelled a free
variable. Now that all its enclosing scope have been
processed, the name is known to be a global or builtin. So
walk back down the child chain and set the name to be global
rather than free.
Be careful to stop if a child does not think the name is
free.
"""
self.globals[name] = 1
if self.frees.has_key(name):
del self.frees[name]
for child in self.children:
if child.check_name(name) == SC_FREE:
child.force_global(name)
def add_frees(self, names):
"""Process list of free vars from nested scope.
Returns a list of names that are either 1) declared global in the
parent or 2) undefined in a top-level parent. In either case,
the nested scope should treat them as globals.
"""
child_globals = []
for name in names:
sc = self.check_name(name)
if self.nested:
if sc == SC_UNKNOWN or sc == SC_FREE \
or isinstance(self, ClassScope):
self.frees[name] = 1
elif sc == SC_GLOBAL:
child_globals.append(name)
elif isinstance(self, FunctionScope) and sc == SC_LOCAL:
self.cells[name] = 1
elif sc != SC_CELL:
child_globals.append(name)
else:
if sc == SC_LOCAL:
self.cells[name] = 1
elif sc != SC_CELL:
child_globals.append(name)
return child_globals
def get_cell_vars(self):
return self.cells.keys()
class ModuleScope(Scope):
__super_init = Scope.__init__
def __init__(self):
self.__super_init("global", self)
class FunctionScope(Scope):
pass
class GenExprScope(Scope):
__super_init = Scope.__init__
__counter = 1
def __init__(self, module, klass=None):
i = self.__counter
self.__counter += 1
self.__super_init("generator expression<%d>"%i, module, klass)
self.add_param('[outmost-iterable]')
def get_names(self):
keys = Scope.get_names()
return keys
class LambdaScope(FunctionScope):
__super_init = Scope.__init__
__counter = 1
def __init__(self, module, klass=None):
i = self.__counter
self.__counter += 1
self.__super_init("lambda.%d" % i, module, klass)
class ClassScope(Scope):
__super_init = Scope.__init__
def __init__(self, name, module):
self.__super_init(name, module, name)
class SymbolVisitor:
def __init__(self):
self.scopes = {}
self.klass = None
# node that define new scopes
def visitModule(self, node):
scope = self.module = self.scopes[node] = ModuleScope()
self.visit(node.node, scope)
visitExpression = visitModule
def visitFunction(self, node, parent):
if node.decorators:
self.visit(node.decorators, parent)
parent.add_def(node.name)
for n in node.defaults:
self.visit(n, parent)
scope = FunctionScope(node.name, self.module, self.klass)
if parent.nested or isinstance(parent, FunctionScope):
scope.nested = 1
self.scopes[node] = scope
self._do_args(scope, node.argnames)
self.visit(node.code, scope)
self.handle_free_vars(scope, parent)
def visitGenExpr(self, node, parent):
scope = GenExprScope(self.module, self.klass);
if parent.nested or isinstance(parent, FunctionScope) \
or isinstance(parent, GenExprScope):
scope.nested = 1
self.scopes[node] = scope
self.visit(node.code, scope)
self.handle_free_vars(scope, parent)
def visitGenExprInner(self, node, scope):
for genfor in node.quals:
self.visit(genfor, scope)
self.visit(node.expr, scope)
def visitGenExprFor(self, node, scope):
self.visit(node.assign, scope, 1)
self.visit(node.iter, scope)
for if_ in node.ifs:
self.visit(if_, scope)
def visitGenExprIf(self, node, scope):
self.visit(node.test, scope)
def visitLambda(self, node, parent, assign=0):
# Lambda is an expression, so it could appear in an expression
# context where assign is passed. The transformer should catch
# any code that has a lambda on the left-hand side.
assert not assign
for n in node.defaults:
self.visit(n, parent)
scope = LambdaScope(self.module, self.klass)
if parent.nested or isinstance(parent, FunctionScope):
scope.nested = 1
self.scopes[node] = scope
self._do_args(scope, node.argnames)
self.visit(node.code, scope)
self.handle_free_vars(scope, parent)
def _do_args(self, scope, args):
for name in args:
if type(name) == types.TupleType:
self._do_args(scope, name)
else:
scope.add_param(name)
def handle_free_vars(self, scope, parent):
parent.add_child(scope)
scope.handle_children()
def visitClass(self, node, parent):
parent.add_def(node.name)
for n in node.bases:
self.visit(n, parent)
scope = ClassScope(node.name, self.module)
if parent.nested or isinstance(parent, FunctionScope):
scope.nested = 1
if node.doc is not None:
scope.add_def('__doc__')
scope.add_def('__module__')
self.scopes[node] = scope
prev = self.klass
self.klass = node.name
self.visit(node.code, scope)
self.klass = prev
self.handle_free_vars(scope, parent)
# name can be a def or a use
# XXX a few calls and nodes expect a third "assign" arg that is
# true if the name is being used as an assignment. only
# expressions contained within statements may have the assign arg.
def visitName(self, node, scope, assign=0):
if assign:
scope.add_def(node.name)
else:
scope.add_use(node.name)
# operations that bind new names
def visitFor(self, node, scope):
self.visit(node.assign, scope, 1)
self.visit(node.list, scope)
self.visit(node.body, scope)
if node.else_:
self.visit(node.else_, scope)
def visitFrom(self, node, scope):
for name, asname in node.names:
if name == "*":
continue
scope.add_def(asname or name)
def visitImport(self, node, scope):
for name, asname in node.names:
i = name.find(".")
if i > -1:
name = name[:i]
scope.add_def(asname or name)
def visitGlobal(self, node, scope):
for name in node.names:
scope.add_global(name)
def visitAssign(self, node, scope):
"""Propagate assignment flag down to child nodes.
The Assign node doesn't itself contains the variables being
assigned to. Instead, the children in node.nodes are visited
with the assign flag set to true. When the names occur in
those nodes, they are marked as defs.
Some names that occur in an assignment target are not bound by
the assignment, e.g. a name occurring inside a slice. The
visitor handles these nodes specially; they do not propagate
the assign flag to their children.
"""
for n in node.nodes:
self.visit(n, scope, 1)
self.visit(node.expr, scope)
def visitAssName(self, node, scope, assign=1):
scope.add_def(node.name)
def visitAssAttr(self, node, scope, assign=0):
self.visit(node.expr, scope, 0)
def visitSubscript(self, node, scope, assign=0):
self.visit(node.expr, scope, 0)
for n in node.subs:
self.visit(n, scope, 0)
def visitSlice(self, node, scope, assign=0):
self.visit(node.expr, scope, 0)
if node.lower:
self.visit(node.lower, scope, 0)
if node.upper:
self.visit(node.upper, scope, 0)
def visitAugAssign(self, node, scope):
# If the LHS is a name, then this counts as assignment.
# Otherwise, it's just use.
self.visit(node.node, scope)
if isinstance(node.node, ast.Name):
self.visit(node.node, scope, 1) # XXX worry about this
self.visit(node.expr, scope)
# prune if statements if tests are false
_const_types = types.StringType, types.IntType, types.FloatType
def visitIf(self, node, scope):
for test, body in node.tests:
if isinstance(test, ast.Const):
if type(test.value) in self._const_types:
if not test.value:
continue
self.visit(test, scope)
self.visit(body, scope)
if node.else_:
self.visit(node.else_, scope)
# a yield statement signals a generator
def visitYield(self, node, scope):
scope.generator = 1
self.visit(node.value, scope)
def sort(l):
l = l[:]
l.sort()
return l
def list_eq(l1, l2):
return sort(l1) == sort(l2)
if __name__ == "__main__":
import sys
from compiler import parseFile, walk
import symtable
def get_names(syms):
return [s for s in [s.get_name() for s in syms.get_symbols()]
if not (s.startswith('_[') or s.startswith('.'))]
for file in sys.argv[1:]:
print file
f = open(file)
buf = f.read()
f.close()
syms = symtable.symtable(buf, file, "exec")
mod_names = get_names(syms)
tree = parseFile(file)
s = SymbolVisitor()
walk(tree, s)
# compare module-level symbols
names2 = s.scopes[tree].get_names()
if not list_eq(mod_names, names2):
print
print "oops", file
print sort(mod_names)
print sort(names2)
sys.exit(-1)
d = {}
d.update(s.scopes)
del d[tree]
scopes = d.values()
del d
for s in syms.get_symbols():
if s.is_namespace():
l = [sc for sc in scopes
if sc.name == s.get_name()]
if len(l) > 1:
print "skipping", s.get_name()
else:
if not list_eq(get_names(s.get_namespace()),
l[0].get_names()):
print s.get_name()
print sort(get_names(s.get_namespace()))
print sort(l[0].get_names())
sys.exit(-1)
|
xbmc/atv2
|
xbmc/lib/libPython/Python/Lib/compiler/symbols.py
|
Python
|
gpl-2.0
| 14,591
|
[
"VisIt"
] |
03a02b2c63b777f129b2223cc36159b3f8ec2582a74f6f1114040c9fd58a6ec0
|
# -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <vincent.michel@inria.fr>
# Minor fixes by Fabian Pedregosa
# Amit Aides <amitibo@tx.technion.ac.il>
# Yehuda Finkelstein <yehudaf@tx.technion.ac.il>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .externals import six
from .preprocessing import LabelBinarizer
from .preprocessing import binarize
from .preprocessing import label_binarize
from .utils import check_X_y, check_array
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.fixes import in1d
from .utils.multiclass import _check_partial_fit_first_call
from .utils.validation import check_is_fitted
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
epsilon = 1e-9 * np.var(X, axis=0).max()
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
self.class_count_ = np.zeros(n_classes)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
self.class_prior_[:] = self.class_count_ / np.sum(self.class_count_)
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
|
DailyActie/Surrogate-Model
|
01-codes/scikit-learn-master/sklearn/naive_bayes.py
|
Python
|
mit
| 28,917
|
[
"Gaussian"
] |
ae6ff96992e2da31797b17ce9dd01f79715257e68c9fd17ac75251f3e89844e9
|
#!/usr/bin/env python
import argparse
import pysam
def Parser():
parser = argparse.ArgumentParser(description='miRNAs counts and coverages')
parser.add_argument('-a', '--alignment', metavar='FILE', type=str,
dest='alignment_file', help='Alignment bam file')
parser.add_argument('--gff', metavar='FILE', type=str, dest='gff_file',
help='GFF3 describing both pre-miRNAs\
and mature miRNAs')
parser.add_argument('-q', '--quality_threshold', type=int,
dest='quality_threshold',
help='Quality threshold for coverage (default=10)',
default=10)
parser.add_argument('-p', '--pre_mirs', type=str, dest='pre_mirs',
help='pre-miRNAs count file path', metavar='FILE')
parser.add_argument('-m', '--mirs', type=str, dest='mirs',
help='mature miRNA count file path', metavar='FILE')
parser.add_argument('--lattice', metavar='FILE', type=str, dest='lattice',
help='Output file for the lattice dataframe.')
args = parser.parse_args()
return args
def get_pre_mir_counts(bamfile):
"""
Takes a AlignmentFile object and returns a dictionary of counts for reads
aligning with pre_mirs (as keys)
"""
count = dict()
for ref_name in bamfile.references:
count[ref_name] = bamfile.count(reference=ref_name)
return count
def get_pre_mir_coverage(bamfile, quality=10):
"""
Takes a AlignmentFile object and returns a dictionary of lists
of coverage along the coordinates of pre_mirs (as keys)
"""
coverage = dict()
for ref_name, ref_len in zip(bamfile.references, bamfile.lengths):
coverage[ref_name] = bamfile.count_coverage(reference=ref_name,
start=0, end=ref_len,
quality_threshold=quality)
""" Add the 4 coverage values """
coverage[ref_name] = [sum(x) for x in
zip(*coverage[ref_name])]
return coverage
def get_mir_counts(bamfile, gff_file):
"""
Takes a AlignmentFile and a gff file and computes for
each 'miRNA' region of the gff the number of reads that hit it
returns a dict[mir_name] = count
"""
counts = dict()
for line in open(gff_file, 'r'):
if line[0] != '#':
gff_fields = line[:-1].split("\t")
if gff_fields[2] == 'miRNA':
mir_name = gff_fields[0]
premir_name = gff_fields[8].split('=')[-1]
mir_start = int(gff_fields[3])
mir_end = int(gff_fields[4])
# GFF is 1-based, pysam is 0-based.
counts[mir_name] = bamfile.count(reference=premir_name,
start=mir_start-1,
end=mir_end-1)
return counts
def write_dataframe_coverage(countdict, outfile):
"""
Takes a dict[pre_mir reference name] = [coverage list]
and writes a dataframe with columns:
<gene_type name>, offset, normoffset, counts and normcounts
in the outfile
"""
F = open(outfile, 'w')
F.write('Mir_hairpin\tOffset\tNorm_offset\tCount\tNorm_count\n')
for ref in sorted(countdict):
"""
For each reference name in mirs,
write the coverage of each of its positions
"""
maximum = max(countdict[ref])
reference_length = len(countdict[ref])
for pos, c in enumerate(countdict[ref]):
""" Compute and write value for each reference position"""
F.write('%s\t%s\t%s\t%s\t%s\n' % (ref, str(pos + 1),
str(float(pos+1)/reference_length), str(float(c)),
str(float(c)/maximum) if maximum != 0 else '0'))
F.close()
def write_counts(countdict, outfile):
"""
Takes a dict[<gene_type name>]=count and
writes a count table
"""
F = open(outfile, 'w')
for gene in sorted(countdict):
F.write('%s\t%s\n' % (gene, str(countdict[gene])))
F.close()
def main():
args = Parser()
bamfile = pysam.AlignmentFile(args.alignment_file, 'rb', check_sq=False)
if args.pre_mirs:
pre_mirs = get_pre_mir_counts(bamfile)
write_counts(pre_mirs, args.pre_mirs)
if args.lattice:
pre_mirs_coverage = get_pre_mir_coverage(bamfile,
args.quality_threshold)
write_dataframe_coverage(pre_mirs_coverage, args.lattice)
if args.mirs:
mirs = get_mir_counts(bamfile, args.gff_file)
write_counts(mirs, args.mirs)
if __name__ == '__main__':
main()
|
drosofff/tools-artbio
|
tools/mircounts/mircounts.py
|
Python
|
mit
| 4,816
|
[
"pysam"
] |
64c3f43e8790e2e269ad12e0cc6c794b2b43bfa5eb3d2e04a00db0a45523743a
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific Python Development Environment
=====================================================
Developped and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from __future__ import print_function
import errno
import gc
import os
import os.path as osp
import re
import signal
import socket
import subprocess
import sys
import threading
import traceback
#==============================================================================
# Keeping a reference to the original sys.exit before patching it
#==============================================================================
ORIGINAL_SYS_EXIT = sys.exit
#==============================================================================
# Check requirements
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
requirements.check_spyder_kernels()
#==============================================================================
# Windows only: support for hiding console window when started with python.exe
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
is_attached_console_visible,
set_windows_appusermodelid)
#==============================================================================
# Workaround: importing rope.base.project here, otherwise this module can't
# be imported if Spyder was executed from another folder than spyder
#==============================================================================
try:
import rope.base.project # analysis:ignore
except ImportError:
pass
#==============================================================================
# Qt imports
#==============================================================================
from qtpy import API, PYQT5
from qtpy.compat import from_qvariant
from qtpy.QtCore import (QByteArray, QCoreApplication, QPoint, QSize, Qt,
QThread, QTimer, QUrl, Signal, Slot)
from qtpy.QtGui import QColor, QDesktopServices, QIcon, QKeySequence, QPixmap
from qtpy.QtWidgets import (QAction, QApplication, QDockWidget, QMainWindow,
QMenu, QMessageBox, QShortcut, QSplashScreen,
QStyleFactory)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
# For issue 7447
try:
from qtpy.QtQuick import QQuickWindow, QSGRendererInterface
except Exception:
QQuickWindow = QSGRendererInterface = None
# To catch font errors in QtAwesome
from qtawesome.iconic_font import FontError
#==============================================================================
# Proper high DPI scaling is available in Qt >= 5.6.0. This attibute must
# be set before creating the application.
#==============================================================================
from spyder.config.main import CONF
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling,
CONF.get('main', 'high_dpi_scaling'))
#==============================================================================
# Create our QApplication instance here because it's needed to render the
# splash screen created below
#==============================================================================
from spyder.utils.qthelpers import qapplication, MENU_SEPARATOR
from spyder.config.base import get_image_path
MAIN_APP = qapplication()
if PYQT5:
APP_ICON = QIcon(get_image_path("spyder.svg"))
else:
APP_ICON = QIcon(get_image_path("spyder.png"))
MAIN_APP.setWindowIcon(APP_ICON)
#==============================================================================
# Create splash screen out of MainWindow to reduce perceived startup time.
#==============================================================================
from spyder.config.base import _, get_image_path, DEV, running_under_pytest
if not running_under_pytest():
SPLASH = QSplashScreen(QPixmap(get_image_path('Tellurium_splash.png'), 'png'))
SPLASH_FONT = SPLASH.font()
SPLASH_FONT.setPixelSize(10)
SPLASH.setFont(SPLASH_FONT)
SPLASH.show()
SPLASH.showMessage(_("Initializing..."), Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute, QColor(Qt.black))
QApplication.processEvents()
else:
SPLASH = None
#==============================================================================
# Local utility imports
#==============================================================================
from spyder import (__version__, __project_url__, __forum_url__,
__trouble_url__, __trouble_url_short__, __website_url__,
get_versions)
from spyder.config.base import (get_conf_path, get_module_source_path, STDERR,
DEBUG, debug_print, MAC_APP_NAME, get_home_dir,
running_in_mac_app, get_module_path,
reset_config_files)
from spyder.config.main import OPEN_FILES_PORT
from spyder.config.utils import IMPORT_EXT, is_gtk_desktop
from spyder.app.cli_options import get_options
from spyder import dependencies
from spyder.py3compat import (is_text_string, to_text_string,
PY3, qbytearray_to_str, configparser as cp)
from spyder.utils import encoding, programs
from spyder.utils import icon_manager as ima
from spyder.utils.introspection import module_completion
from spyder.utils.programs import is_module_installed
from spyder.utils.misc import select_port, getcwd_or_home, get_python_executable
from spyder.widgets.fileswitcher import FileSwitcher
#==============================================================================
# Local gui imports
#==============================================================================
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
try:
from spyder.utils.environ import WinUserEnvDialog
except ImportError:
WinUserEnvDialog = None # analysis:ignore
from spyder.utils.qthelpers import (create_action, add_actions, get_icon,
add_shortcut_to_tooltip,
create_module_bookmark_actions,
create_program_action, DialogManager,
create_python_script_action, file_uri)
from spyder.config.gui import get_shortcut
from spyder.otherplugins import get_spyderplugins_mods
from spyder.app import tour
#==============================================================================
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
#==============================================================================
CWD = getcwd_or_home()
#==============================================================================
# Utility functions
#==============================================================================
def get_python_doc_path():
"""
Return Python documentation path
(Windows: return the PythonXX.chm path if available)
"""
if os.name == 'nt':
doc_path = osp.join(sys.prefix, "Doc")
if not osp.isdir(doc_path):
return
python_chm = [path for path in os.listdir(doc_path)
if re.match(r"(?i)Python[0-9]{3,6}.chm", path)]
if python_chm:
return file_uri(osp.join(doc_path, python_chm[0]))
else:
vinf = sys.version_info
doc_path = '/usr/share/doc/python%d.%d/html' % (vinf[0], vinf[1])
python_doc = osp.join(doc_path, "index.html")
if osp.isfile(python_doc):
return file_uri(python_doc)
def set_opengl_implementation(option):
"""
Set the OpenGL implementation used by Spyder.
See issue 7447 for the details.
"""
if option == 'software':
QCoreApplication.setAttribute(Qt.AA_UseSoftwareOpenGL)
if QQuickWindow is not None:
QQuickWindow.setSceneGraphBackend(QSGRendererInterface.Software)
elif option == 'desktop':
QCoreApplication.setAttribute(Qt.AA_UseDesktopOpenGL)
if QQuickWindow is not None:
QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL)
elif option == 'gles':
QCoreApplication.setAttribute(Qt.AA_UseOpenGLES)
if QQuickWindow is not None:
QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL)
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = QMainWindow.AllowTabbedDocks|QMainWindow.AllowNestedDocks
CURSORBLINK_OSDEFAULT = QApplication.cursorFlashTime()
SPYDER_PATH = get_conf_path('path')
SPYDER_NOT_ACTIVE_PATH = get_conf_path('not_active_path')
BOOKMARKS = (
('Python2', "https://docs.python.org/2/index.html",
_("Python2 documentation")),
('Python3', "https://docs.python.org/3/index.html",
_("Python3 documentation")),
('numpy', "https://docs.scipy.org/doc/",
_("Numpy and Scipy documentation")),
('matplotlib', "https://matplotlib.org/contents.html",
_("Matplotlib documentation")),
('PyQt5',
"http://pyqt.sourceforge.net/Docs/PyQt5/",
_("PyQt5 Reference Guide")),
('PyQt5',
"http://pyqt.sourceforge.net/Docs/PyQt5/class_reference.html",
_("PyQt5 API Reference")),
('winpython', "https://winpython.github.io/",
_("WinPython"))
)
DEFAULT_LAYOUTS = 4
# Signals
restore_scrollbar_position = Signal()
all_actions_defined = Signal()
sig_pythonpath_changed = Signal()
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent") # related to interactive tour
sig_moved = Signal("QMoveEvent") # related to interactive tour
def __init__(self, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if PYQT5:
# Enabling scaling for high dpi
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
self.default_style = str(qapp.style().objectName())
self.dialog_manager = DialogManager()
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.new_instance = options.new_instance
self.open_project = options.project
self.window_title = options.window_title
self.debug_print("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
if not DEV:
# Make spyder quit when presing ctrl+C in the console
# In DEV Ctrl+C doesn't quit, because it helps to
# capture the traceback when spyder freezes
signal.signal(signal.SIGINT, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Shortcut management data
self.shortcut_data = []
# Loading Spyder path
self.path = []
self.not_active_path = []
self.project_path = []
if osp.isfile(self.SPYDER_PATH):
self.path, _x = encoding.readlines(self.SPYDER_PATH)
self.path = [name for name in self.path if osp.isdir(name)]
if osp.isfile(self.SPYDER_NOT_ACTIVE_PATH):
self.not_active_path, _x = \
encoding.readlines(self.SPYDER_NOT_ACTIVE_PATH)
self.not_active_path = \
[name for name in self.not_active_path if osp.isdir(name)]
self.remove_path_from_sys_path()
self.add_path_to_sys_path()
# Plugins
self.console = None
self.workingdirectory = None
self.editor = None
self.explorer = None
self.help = None
self.onlinehelp = None
self.projects = None
self.outlineexplorer = None
self.historylog = None
self.extconsole = None
self.ipyconsole = None
self.variableexplorer = None
self.findinfiles = None
self.thirdparty_plugins = []
# Tour # TODO: Should I consider it a plugin?? or?
self.tour = None
self.tours_available = None
# File switcher
self.fileswitcher = None
# Check for updates Thread and Worker, refereces needed to prevent
# segfaulting
self.check_updates_action = None
self.thread_updates = None
self.worker_updates = None
self.give_updates_feedback = True
# Preferences
from spyder.plugins.configdialog import (MainConfigPage,
ColorSchemeConfigPage)
from spyder.plugins.shortcuts import ShortcutsConfigPage
from spyder.plugins.runconfig import RunConfigPage
from spyder.plugins.maininterpreter import MainInterpreterConfigPage
self.general_prefs = [MainConfigPage, ShortcutsConfigPage,
ColorSchemeConfigPage, MainInterpreterConfigPage,
RunConfigPage]
self.prefs_index = None
self.prefs_dialog_size = None
# Quick Layouts and Dialogs
from spyder.plugins.layoutdialog import (LayoutSaveDialog,
LayoutSettingsDialog)
self.dialog_layout_save = LayoutSaveDialog
self.dialog_layout_settings = LayoutSettingsDialog
# Actions
self.lock_dockwidgets_action = None
self.show_toolbars_action = None
self.close_dockwidget_action = None
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
self.maximize_action = None
self.fullscreen_action = None
# Menu bars
self.file_menu = None
self.file_menu_actions = []
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
self.consoles_menu = None
self.consoles_menu_actions = []
self.projects_menu = None
self.projects_menu_actions = []
self.tools_menu = None
self.tools_menu_actions = []
self.external_tools_menu = None # We must keep a reference to this,
# otherwise the external tools menu is lost after leaving setup method
self.external_tools_menu_actions = []
self.view_menu = None
self.plugins_menu = None
self.plugins_menu_actions = []
self.toolbars_menu = None
self.help_menu = None
self.help_menu_actions = []
# Status bar widgets
self.mem_status = None
self.cpu_status = None
# Toolbars
self.visible_toolbars = []
self.toolbarslist = []
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.edit_toolbar = None
self.edit_toolbar_actions = []
self.search_toolbar = None
self.search_toolbar_actions = []
self.source_toolbar = None
self.source_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.layout_toolbar = None
self.layout_toolbar_actions = []
if running_under_pytest():
# Show errors in internal console when testing.
CONF.set('main', 'show_internal_errors', False)
# Set window title
self.set_window_title()
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
debug_print("appusermodelid: " + str(res))
# Setting QTimer if running in travis
test_travis = os.environ.get('TEST_CI_APP', None)
if test_travis is not None:
global MAIN_APP
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(MAIN_APP.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = SPLASH
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.dockwidgets_locked = CONF.get('main', 'panes_locked')
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
self.state_before_maximizing = None
self.current_quick_layout = None
self.previous_layout_settings = None # TODO: related to quick layouts
self.last_plugin = None
self.fullscreen_flag = None # isFullscreen does not work as expected
# The following flag remember the maximized state even when
# the window is in fullscreen mode:
self.maximized_flag = None
# The following flag is used to restore window's geometry when
# toggling out of fullscreen mode in Windows.
self.saved_normal_geometry = None
# To keep track of the last focused widget
self.last_focused_widget = None
self.previous_focused_widget = None
# Server to open external files on a single instance
# This is needed in order to handle socket creation problems.
# See issue 4132
if os.name == 'nt':
try:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
except OSError as e:
self.open_files_server = None
QMessageBox.warning(None, "Spyder",
_("An error occurred while creating a socket needed "
"by Spyder. Please, try to run as an Administrator "
"from cmd.exe the following command and then "
"restart your computer: <br><br><span "
"style=\'color: #555555\'><b>netsh winsock reset"
"</b></span><br>"))
else:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
self.apply_settings()
self.debug_print("End of MainWindow constructor")
def debug_print(self, message):
"""Debug prints"""
debug_print(message)
#---- Window setup
def create_toolbar(self, title, object_name, iconsize=24):
"""Create and return toolbar with *title* and *object_name*"""
toolbar = self.addToolBar(title)
toolbar.setObjectName(object_name)
toolbar.setIconSize(QSize(iconsize, iconsize))
self.toolbarslist.append(toolbar)
return toolbar
def setup(self):
"""Setup main window"""
self.debug_print("*** Start of MainWindow setup ***")
self.debug_print(" ..core actions")
self.close_dockwidget_action = create_action(self,
icon=ima.icon('DialogCloseButton'),
text=_("Close current pane"),
triggered=self.close_current_dockwidget,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.close_dockwidget_action, "_",
"Close pane")
self.lock_dockwidgets_action = create_action(self, _("Lock panes"),
toggled=self.toggle_lock_dockwidgets,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.lock_dockwidgets_action, "_",
"Lock unlock panes")
# custom layouts shortcuts
self.toggle_next_layout_action = create_action(self,
_("Use next layout"),
triggered=self.toggle_next_layout,
context=Qt.ApplicationShortcut)
self.toggle_previous_layout_action = create_action(self,
_("Use previous layout"),
triggered=self.toggle_previous_layout,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.toggle_next_layout_action, "_",
"Use next layout")
self.register_shortcut(self.toggle_previous_layout_action, "_",
"Use previous layout")
# File switcher shortcuts
self.file_switcher_action = create_action(
self,
_('File switcher...'),
icon=ima.icon('filelist'),
tip=_('Fast switch between files'),
triggered=self.open_fileswitcher,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.file_switcher_action, context="_",
name="File switcher")
self.symbol_finder_action = create_action(
self, _('Symbol finder...'),
icon=ima.icon('symbol_find'),
tip=_('Fast symbol search in file'),
triggered=self.open_symbolfinder,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.symbol_finder_action, context="_",
name="symbol finder", add_sc_to_tip=True)
self.file_toolbar_actions = [self.file_switcher_action,
self.symbol_finder_action]
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions = [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action]
namespace = None
self.debug_print(" ..toolbars")
# File menu/toolbar
self.file_menu = self.menuBar().addMenu(_("&File"))
self.file_toolbar = self.create_toolbar(_("File toolbar"),
"file_toolbar")
# Edit menu/toolbar
self.edit_menu = self.menuBar().addMenu(_("&Edit"))
self.edit_toolbar = self.create_toolbar(_("Edit toolbar"),
"edit_toolbar")
# Search menu/toolbar
self.search_menu = self.menuBar().addMenu(_("&Search"))
self.search_toolbar = self.create_toolbar(_("Search toolbar"),
"search_toolbar")
# Source menu/toolbar
self.source_menu = self.menuBar().addMenu(_("Sour&ce"))
self.source_toolbar = self.create_toolbar(_("Source toolbar"),
"source_toolbar")
# Run menu/toolbar
self.run_menu = self.menuBar().addMenu(_("&Run"))
self.run_toolbar = self.create_toolbar(_("Run toolbar"),
"run_toolbar")
# Debug menu/toolbar
self.debug_menu = self.menuBar().addMenu(_("&Debug"))
self.debug_toolbar = self.create_toolbar(_("Debug toolbar"),
"debug_toolbar")
# Consoles menu/toolbar
self.consoles_menu = self.menuBar().addMenu(_("C&onsoles"))
# Projects menu
self.projects_menu = self.menuBar().addMenu(_("&Projects"))
self.projects_menu.aboutToShow.connect(self.valid_project)
# Tools menu
self.tools_menu = self.menuBar().addMenu(_("&Tools"))
# View menu
self.view_menu = self.menuBar().addMenu(_("&View"))
# Help menu
self.help_menu = self.menuBar().addMenu(_("&Help"))
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
self.debug_print(" ..tools")
# Tools + External Tools
prefs_action = create_action(self, _("Pre&ferences"),
icon=ima.icon('configure'),
triggered=self.edit_preferences,
context=Qt.ApplicationShortcut)
self.register_shortcut(prefs_action, "_", "Preferences",
add_sc_to_tip=True)
spyder_path_action = create_action(self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.path_manager_callback,
tip=_("Python Path Manager"),
menurole=QAction.ApplicationSpecificRole)
update_modules_action = create_action(self,
_("Update module names list"),
triggered=lambda:
module_completion.reset(),
tip=_("Refresh list of module names "
"available in PYTHONPATH"))
reset_spyder_action = create_action(
self, _("Reset Spyder to factory defaults"),
triggered=self.reset_spyder)
self.tools_menu_actions = [prefs_action, spyder_path_action]
if WinUserEnvDialog is not None:
winenv_action = create_action(self,
_("Current user environment variables..."),
icon='win_env.png',
tip=_("Show and edit current user environment "
"variables in Windows registry "
"(i.e. for all sessions)"),
triggered=self.win_env)
self.tools_menu_actions.append(winenv_action)
self.tools_menu_actions += [reset_spyder_action, MENU_SEPARATOR,
update_modules_action]
# External Tools submenu
self.external_tools_menu = QMenu(_("External Tools"))
self.external_tools_menu_actions = []
# WinPython control panel
self.wp_action = create_action(self, _("WinPython control panel"),
icon=get_icon('winpython.svg'),
triggered=lambda:
programs.run_python_script('winpython', 'controlpanel'))
if os.name == 'nt' and is_module_installed('winpython'):
self.external_tools_menu_actions.append(self.wp_action)
# Qt-related tools
additact = []
for name in ("designer-qt4", "designer"):
qtdact = create_program_action(self, _("Qt Designer"), name)
if qtdact:
break
for name in ("linguist-qt4", "linguist"):
qtlact = create_program_action(self, _("Qt Linguist"), "linguist")
if qtlact:
break
args = ['-no-opengl'] if os.name == 'nt' else []
for act in (qtdact, qtlact):
if act:
additact.append(act)
if additact and is_module_installed('winpython'):
self.external_tools_menu_actions += [None] + additact
# Guidata and Sift
self.debug_print(" ..sift?")
gdgq_act = []
# Guidata and Guiqwt don't support PyQt5 yet and they fail
# with an AssertionError when imported using those bindings
# (see issue 2274)
try:
from guidata import configtools
from guidata import config # analysis:ignore
guidata_icon = configtools.get_icon('guidata.svg')
guidata_act = create_python_script_action(self,
_("guidata examples"), guidata_icon,
"guidata",
osp.join("tests", "__init__"))
gdgq_act += [guidata_act]
except:
pass
try:
from guidata import configtools
from guiqwt import config # analysis:ignore
guiqwt_icon = configtools.get_icon('guiqwt.svg')
guiqwt_act = create_python_script_action(self,
_("guiqwt examples"), guiqwt_icon, "guiqwt",
osp.join("tests", "__init__"))
if guiqwt_act:
gdgq_act += [guiqwt_act]
sift_icon = configtools.get_icon('sift.svg')
sift_act = create_python_script_action(self, _("Sift"),
sift_icon, "guiqwt", osp.join("tests", "sift"))
if sift_act:
gdgq_act += [sift_act]
except:
pass
if gdgq_act:
self.external_tools_menu_actions += [None] + gdgq_act
# Maximize current plugin
self.maximize_action = create_action(self, '',
triggered=self.maximize_dockwidget,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.maximize_action, "_", "Maximize pane")
self.__update_maximize_action()
# Fullscreen mode
self.fullscreen_action = create_action(self,
_("Fullscreen mode"),
triggered=self.toggle_fullscreen,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.fullscreen_action, "_",
"Fullscreen mode", add_sc_to_tip=True)
# Main toolbar
self.main_toolbar_actions = [self.maximize_action,
self.fullscreen_action,
None,
prefs_action, spyder_path_action]
self.main_toolbar = self.create_toolbar(_("Main toolbar"),
"main_toolbar")
# Internal console plugin
self.debug_print(" ..plugin: internal console")
from spyder.plugins.console import Console
self.console = Console(self, namespace, exitfunc=self.closing,
profile=self.profile,
multithreaded=self.multithreaded,
message=_("Spyder Internal Console\n\n"
"This console is used to report application\n"
"internal errors and to inspect Spyder\n"
"internals with the following commands:\n"
" spy.app, spy.window, dir(spy)\n\n"
"Please don't use it to run your code\n\n"))
self.console.register_plugin()
# Working directory plugin
self.debug_print(" ..plugin: working directory")
from spyder.plugins.workingdirectory import WorkingDirectory
self.workingdirectory = WorkingDirectory(self, self.init_workdir, main=self)
self.workingdirectory.register_plugin()
self.toolbarslist.append(self.workingdirectory)
# Help plugin
if CONF.get('help', 'enable'):
self.set_splash(_("Loading help..."))
from spyder.plugins.help import Help
self.help = Help(self)
self.help.register_plugin()
# Outline explorer widget
if CONF.get('outline_explorer', 'enable'):
self.set_splash(_("Loading outline explorer..."))
from spyder.plugins.outlineexplorer import OutlineExplorer
self.outlineexplorer = OutlineExplorer(self)
self.outlineexplorer.register_plugin()
# Editor plugin
self.set_splash(_("Loading editor..."))
from spyder.plugins.editor import Editor
self.editor = Editor(self)
self.editor.register_plugin()
# Populating file menu entries
quit_action = create_action(self, _("&Quit"),
icon=ima.icon('exit'),
tip=_("Quit"),
triggered=self.console.quit,
context=Qt.ApplicationShortcut)
self.register_shortcut(quit_action, "_", "Quit")
restart_action = create_action(self, _("&Restart"),
icon=ima.icon('restart'),
tip=_("Restart"),
triggered=self.restart,
context=Qt.ApplicationShortcut)
self.register_shortcut(restart_action, "_", "Restart")
self.file_menu_actions += [self.file_switcher_action,
self.symbol_finder_action, None,
restart_action, quit_action]
self.set_splash("")
self.debug_print(" ..widgets")
# Explorer
if CONF.get('explorer', 'enable'):
self.set_splash(_("Loading file explorer..."))
from spyder.plugins.explorer import Explorer
self.explorer = Explorer(self)
self.explorer.register_plugin()
# History log widget
if CONF.get('historylog', 'enable'):
self.set_splash(_("Loading history plugin..."))
from spyder.plugins.history import HistoryLog
self.historylog = HistoryLog(self)
self.historylog.register_plugin()
# Online help widget
try: # Qt >= v4.4
from spyder.plugins.onlinehelp import OnlineHelp
except ImportError: # Qt < v4.4
OnlineHelp = None # analysis:ignore
if CONF.get('onlinehelp', 'enable') and OnlineHelp is not None:
self.set_splash(_("Loading online help..."))
self.onlinehelp = OnlineHelp(self)
self.onlinehelp.register_plugin()
# Project explorer widget
self.set_splash(_("Loading project explorer..."))
from spyder.plugins.projects import Projects
self.projects = Projects(self)
self.projects.register_plugin()
self.project_path = self.projects.get_pythonpath(at_start=True)
# Find in files
if CONF.get('find_in_files', 'enable'):
from spyder.plugins.findinfiles import FindInFiles
self.findinfiles = FindInFiles(self)
self.findinfiles.register_plugin()
# Namespace browser
self.set_splash(_("Loading namespace browser..."))
from spyder.plugins.variableexplorer import VariableExplorer
self.variableexplorer = VariableExplorer(self)
self.variableexplorer.register_plugin()
# IPython console
self.set_splash(_("Loading IPython console..."))
from spyder.plugins.ipythonconsole import IPythonConsole
self.ipyconsole = IPythonConsole(self)
self.ipyconsole.register_plugin()
self.set_splash(_("Setting up main window..."))
# Help menu
trouble_action = create_action(self,
_("Troubleshooting..."),
triggered=self.trouble_guide)
dep_action = create_action(self, _("Dependencies..."),
triggered=self.show_dependencies,
icon=ima.icon('advanced'))
report_action = create_action(self,
_("Report issue..."),
icon=ima.icon('bug'),
triggered=self.report_issue)
support_action = create_action(self,
_("Spyder support..."),
triggered=self.google_group)
self.check_updates_action = create_action(self,
_("Check for updates..."),
triggered=self.check_updates)
# Spyder documentation
spyder_doc = 'https://docs.spyder-ide.org/'
doc_action = create_action(self, _("Spyder documentation"),
icon=ima.icon('DialogHelpButton'),
triggered=lambda:
programs.start_file(spyder_doc))
self.register_shortcut(doc_action, "_",
"spyder documentation")
if self.help is not None:
tut_action = create_action(self, _("Spyder tutorial"),
triggered=self.help.show_tutorial)
else:
tut_action = None
shortcuts_action = create_action(self, _("Shortcuts Summary"),
shortcut="Meta+F1",
triggered=self.show_shortcuts_dialog)
#----- Tours
self.tour = tour.AnimatedTour(self)
self.tours_menu = QMenu(_("Interactive tours"))
self.tour_menu_actions = []
# TODO: Only show intro tour for now. When we are close to finish
# 3.0, we will finish and show the other tour
self.tours_available = tour.get_tours(0)
for i, tour_available in enumerate(self.tours_available):
self.tours_available[i]['last'] = 0
tour_name = tour_available['name']
def trigger(i=i, self=self): # closure needed!
return lambda: self.show_tour(i)
temp_action = create_action(self, tour_name, tip="",
triggered=trigger())
self.tour_menu_actions += [temp_action]
self.tours_menu.addActions(self.tour_menu_actions)
self.help_menu_actions = [doc_action, tut_action, shortcuts_action,
self.tours_menu,
MENU_SEPARATOR, trouble_action,
report_action, dep_action,
self.check_updates_action, support_action,
MENU_SEPARATOR]
# Python documentation
if get_python_doc_path() is not None:
pydoc_act = create_action(self, _("Python documentation"),
triggered=lambda:
programs.start_file(get_python_doc_path()))
self.help_menu_actions.append(pydoc_act)
# IPython documentation
if self.help is not None:
ipython_menu = QMenu(_("IPython documentation"), self)
intro_action = create_action(self, _("Intro to IPython"),
triggered=self.ipyconsole.show_intro)
quickref_action = create_action(self, _("Quick reference"),
triggered=self.ipyconsole.show_quickref)
guiref_action = create_action(self, _("Console help"),
triggered=self.ipyconsole.show_guiref)
add_actions(ipython_menu, (intro_action, guiref_action,
quickref_action))
self.help_menu_actions.append(ipython_menu)
# Windows-only: documentation located in sys.prefix/Doc
ipm_actions = []
def add_ipm_action(text, path):
"""Add installed Python module doc action to help submenu"""
# QAction.triggered works differently for PySide and PyQt
path = file_uri(path)
if not API == 'pyside':
slot=lambda _checked, path=path: programs.start_file(path)
else:
slot=lambda path=path: programs.start_file(path)
action = create_action(self, text,
icon='%s.png' % osp.splitext(path)[1][1:],
triggered=slot)
ipm_actions.append(action)
sysdocpth = osp.join(sys.prefix, 'Doc')
if osp.isdir(sysdocpth): # exists on Windows, except frozen dist.
for docfn in os.listdir(sysdocpth):
pt = r'([a-zA-Z\_]*)(doc)?(-dev)?(-ref)?(-user)?.(chm|pdf)'
match = re.match(pt, docfn)
if match is not None:
pname = match.groups()[0]
if pname not in ('Python', ):
add_ipm_action(pname, osp.join(sysdocpth, docfn))
# Installed Python modules submenu (Windows only)
if ipm_actions:
pymods_menu = QMenu(_("Installed Python modules"), self)
add_actions(pymods_menu, ipm_actions)
self.help_menu_actions.append(pymods_menu)
# Online documentation
web_resources = QMenu(_("Online documentation"))
webres_actions = create_module_bookmark_actions(self,
self.BOOKMARKS)
webres_actions.insert(2, None)
webres_actions.insert(5, None)
webres_actions.insert(8, None)
add_actions(web_resources, webres_actions)
self.help_menu_actions.append(web_resources)
# Qt assistant link
if sys.platform.startswith('linux') and not PYQT5:
qta_exe = "assistant-qt4"
else:
qta_exe = "assistant"
qta_act = create_program_action(self, _("Qt documentation"),
qta_exe)
if qta_act:
self.help_menu_actions += [qta_act, None]
# About Spyder
about_action = create_action(self,
_("About %s...") % "Spyder",
icon=ima.icon('MessageBoxInformation'),
triggered=self.about)
self.help_menu_actions += [MENU_SEPARATOR, about_action]
# Status bar widgets
from spyder.widgets.status import MemoryStatus, CPUStatus
self.mem_status = MemoryStatus(self, status)
self.cpu_status = CPUStatus(self, status)
self.apply_statusbar_settings()
# Third-party plugins
for mod in get_spyderplugins_mods():
try:
plugin = mod.PLUGIN_CLASS(self)
try:
# Not all the plugins have the check_compatibility method
# i.e Breakpoints, Profiler, Pylint
check = plugin.check_compatibility()[0]
except AttributeError:
check = True
if check:
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
#----- View
# View menu
self.plugins_menu = QMenu(_("Panes"), self)
self.toolbars_menu = QMenu(_("Toolbars"), self)
self.quick_layout_menu = QMenu(_("Window layouts"), self)
self.quick_layout_set_menu()
self.view_menu.addMenu(self.plugins_menu) # Panes
add_actions(self.view_menu, (self.lock_dockwidgets_action,
self.close_dockwidget_action,
self.maximize_action,
MENU_SEPARATOR))
self.show_toolbars_action = create_action(self,
_("Show toolbars"),
triggered=self.show_toolbars,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.show_toolbars_action, "_",
"Show toolbars")
self.view_menu.addMenu(self.toolbars_menu)
self.view_menu.addAction(self.show_toolbars_action)
add_actions(self.view_menu, (MENU_SEPARATOR,
self.quick_layout_menu,
self.toggle_previous_layout_action,
self.toggle_next_layout_action,
MENU_SEPARATOR,
self.fullscreen_action))
if set_attached_console_visible is not None:
cmd_act = create_action(self,
_("Attached console window (debugging)"),
toggled=set_attached_console_visible)
cmd_act.setChecked(is_attached_console_visible())
add_actions(self.view_menu, (MENU_SEPARATOR, cmd_act))
# Adding external tools action to "Tools" menu
if self.external_tools_menu_actions:
external_tools_act = create_action(self, _("External Tools"))
external_tools_act.setMenu(self.external_tools_menu)
self.tools_menu_actions += [None, external_tools_act]
# Filling out menu/toolbar entries:
add_actions(self.file_menu, self.file_menu_actions)
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
add_actions(self.consoles_menu, self.consoles_menu_actions)
add_actions(self.projects_menu, self.projects_menu_actions)
add_actions(self.tools_menu, self.tools_menu_actions)
add_actions(self.external_tools_menu,
self.external_tools_menu_actions)
add_actions(self.help_menu, self.help_menu_actions)
add_actions(self.main_toolbar, self.main_toolbar_actions)
add_actions(self.file_toolbar, self.file_toolbar_actions)
add_actions(self.edit_toolbar, self.edit_toolbar_actions)
add_actions(self.search_toolbar, self.search_toolbar_actions)
add_actions(self.source_toolbar, self.source_toolbar_actions)
add_actions(self.debug_toolbar, self.debug_toolbar_actions)
add_actions(self.run_toolbar, self.run_toolbar_actions)
# Apply all defined shortcuts (plugins + 3rd-party plugins)
self.apply_shortcuts()
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
# Window set-up
self.debug_print("Setting up window...")
self.setup_layout(default=False)
# Show and hide shortcuts in menus for Mac.
# This is a workaround because we can't disable shortcuts
# by setting context=Qt.WidgetShortcut there
if sys.platform == 'darwin':
for name in ['file', 'edit', 'search', 'source', 'run', 'debug',
'projects', 'tools', 'plugins']:
menu_object = getattr(self, name + '_menu')
menu_object.aboutToShow.connect(
lambda name=name: self.show_shortcuts(name))
menu_object.aboutToHide.connect(
lambda name=name: self.hide_shortcuts(name))
if self.splash is not None:
self.splash.hide()
# Enabling tear off for all menus except help menu
if CONF.get('main', 'tear_off_menus'):
for child in self.menuBar().children():
if isinstance(child, QMenu) and child != self.help_menu:
child.setTearOffEnabled(True)
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
try:
child.aboutToShow.connect(self.update_edit_menu)
child.aboutToShow.connect(self.update_search_menu)
except TypeError:
pass
self.debug_print("*** End of MainWindow setup ***")
self.is_starting_up = False
def post_visible_setup(self):
"""Actions to be performed only after the main window's `show` method
was triggered"""
self.restore_scrollbar_position.emit()
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# In MacOS X 10.7 our app is not displayed after initialized (I don't
# know why because this doesn't happen when started from the terminal),
# so we need to resort to this hack to make it appear.
if running_in_mac_app():
idx = __file__.index(MAC_APP_NAME)
app_path = __file__[:idx]
subprocess.call(['open', app_path + MAC_APP_NAME])
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if (CONF.get('main', 'single_instance') and not self.new_instance
and self.open_files_server):
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emmited by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Create Plugins and toolbars submenus
self.create_plugins_menu()
self.create_toolbars_menu()
# Update toolbar visibility status
self.toolbars_visible = CONF.get('main', 'toolbars_visible')
self.load_last_visible_toolbars()
# Update lock status of dockidgets (panes)
self.lock_dockwidgets_action.setChecked(self.dockwidgets_locked)
self.apply_panes_settings()
# Hide Internal Console so that people don't use it instead of
# the External or IPython ones
if self.console.dockwidget.isVisible() and DEV is None:
self.console.toggle_view_action.setChecked(False)
self.console.dockwidget.hide()
# Show Help and Consoles by default
plugins_to_show = [self.ipyconsole]
if self.help is not None:
plugins_to_show.append(self.help)
for plugin in plugins_to_show:
if plugin.dockwidget.isVisible():
plugin.dockwidget.raise_()
# Show history file if no console is visible
if not self.ipyconsole.isvisible:
self.historylog.add_history(get_conf_path('history.py'))
if self.open_project:
self.projects.open_project(self.open_project)
else:
# Load last project if a project was active when Spyder
# was closed
self.projects.reopen_last_project()
# If no project is active, load last session
if self.projects.get_active_project() is None:
self.editor.setup_open_files()
# Check for spyder updates
if DEV is None and CONF.get('main', 'check_updates_on_startup'):
self.give_updates_feedback = False
self.check_updates(startup=True)
# Show dialog with missing dependencies
self.report_missing_dependencies()
self.is_setting_up = False
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if DEBUG:
title += u" [DEBUG MODE %d]" % DEBUG
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title)
def report_missing_dependencies(self):
"""Show a QMessageBox with a list of missing hard dependencies"""
missing_deps = dependencies.missing_dependencies()
if missing_deps:
QMessageBox.critical(self, _('Error'),
_("<b>You have missing dependencies!</b>"
"<br><br><tt>%s</tt><br><br>"
"<b>Please install them to avoid this message.</b>"
"<br><br>"
"<i>Note</i>: Spyder could work without some of these "
"dependencies, however to have a smooth experience when "
"using Spyder we <i>strongly</i> recommend you to install "
"all the listed missing dependencies.<br><br>"
"Failing to install these dependencies might result in bugs. "
"Please be sure that any found bugs are not the direct "
"result of missing dependencies, prior to reporting a new "
"issue."
) % missing_deps, QMessageBox.Ok)
def load_window_settings(self, prefix, default=False, section='main'):
"""Load window layout settings from userconfig-based configuration
with *prefix*, under *section*
default: if True, do not restore inner layout"""
get_func = CONF.get_default if default else CONF.get
window_size = get_func(section, prefix+'size')
prefs_dialog_size = get_func(section, prefix+'prefs_dialog_size')
if default:
hexstate = None
else:
hexstate = get_func(section, prefix+'state', None)
pos = get_func(section, prefix+'position')
# It's necessary to verify if the window/position value is valid
# with the current screen. See issue 3748
width = pos[0]
height = pos[1]
screen_shape = QApplication.desktop().geometry()
current_width = screen_shape.width()
current_height = screen_shape.height()
if current_width < width or current_height < height:
pos = CONF.get_default(section, prefix+'position')
is_maximized = get_func(section, prefix+'is_maximized')
is_fullscreen = get_func(section, prefix+'is_fullscreen')
return hexstate, window_size, prefs_dialog_size, pos, is_maximized, \
is_fullscreen
def get_window_settings(self):
"""Return current window settings
Symetric to the 'set_window_settings' setter"""
window_size = (self.window_size.width(), self.window_size.height())
is_fullscreen = self.isFullScreen()
if is_fullscreen:
is_maximized = self.maximized_flag
else:
is_maximized = self.isMaximized()
pos = (self.window_position.x(), self.window_position.y())
prefs_dialog_size = (self.prefs_dialog_size.width(),
self.prefs_dialog_size.height())
hexstate = qbytearray_to_str(self.saveState())
return (hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen)
def set_window_settings(self, hexstate, window_size, prefs_dialog_size,
pos, is_maximized, is_fullscreen):
"""Set window settings
Symetric to the 'get_window_settings' accessor"""
self.setUpdatesEnabled(False)
self.window_size = QSize(window_size[0], window_size[1]) # width,height
self.prefs_dialog_size = QSize(prefs_dialog_size[0],
prefs_dialog_size[1]) # width,height
self.window_position = QPoint(pos[0], pos[1]) # x,y
self.setWindowState(Qt.WindowNoState)
self.resize(self.window_size)
self.move(self.window_position)
# Window layout
if hexstate:
self.restoreState( QByteArray().fromHex(
str(hexstate).encode('utf-8')) )
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow.
for widget in self.children():
if isinstance(widget, QDockWidget) and widget.isFloating():
self.floating_dockwidgets.append(widget)
widget.setFloating(False)
# Is fullscreen?
if is_fullscreen:
self.setWindowState(Qt.WindowFullScreen)
self.__update_fullscreen_action()
# Is maximized?
if is_fullscreen:
self.maximized_flag = is_maximized
elif is_maximized:
self.setWindowState(Qt.WindowMaximized)
self.setUpdatesEnabled(True)
def save_current_window_settings(self, prefix, section='main',
none_state=False):
"""Save current window settings with *prefix* in
the userconfig-based configuration, under *section*"""
win_size = self.window_size
prefs_size = self.prefs_dialog_size
CONF.set(section, prefix+'size', (win_size.width(), win_size.height()))
CONF.set(section, prefix+'prefs_dialog_size',
(prefs_size.width(), prefs_size.height()))
CONF.set(section, prefix+'is_maximized', self.isMaximized())
CONF.set(section, prefix+'is_fullscreen', self.isFullScreen())
pos = self.window_position
CONF.set(section, prefix+'position', (pos.x(), pos.y()))
self.maximize_dockwidget(restore=True)# Restore non-maximized layout
if none_state:
CONF.set(section, prefix + 'state', None)
else:
qba = self.saveState()
CONF.set(section, prefix + 'state', qbytearray_to_str(qba))
CONF.set(section, prefix+'statusbar',
not self.statusBar().isHidden())
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets"""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
# --- Layouts
def setup_layout(self, default=False):
"""Setup window layout"""
prefix = 'window' + '/'
settings = self.load_window_settings(prefix, default)
hexstate = settings[0]
self.first_spyder_run = False
if hexstate is None:
# First Spyder execution:
self.setWindowState(Qt.WindowMaximized)
self.first_spyder_run = True
self.setup_default_layouts('default', settings)
# Now that the initial setup is done, copy the window settings,
# except for the hexstate in the quick layouts sections for the
# default layouts.
# Order and name of the default layouts is found in config.py
section = 'quick_layouts'
get_func = CONF.get_default if default else CONF.get
order = get_func(section, 'order')
# restore the original defaults if reset layouts is called
if default:
CONF.set(section, 'active', order)
CONF.set(section, 'order', order)
CONF.set(section, 'names', order)
for index, name, in enumerate(order):
prefix = 'layout_{0}/'.format(index)
self.save_current_window_settings(prefix, section,
none_state=True)
# store the initial layout as the default in spyder
prefix = 'layout_default/'
section = 'quick_layouts'
self.save_current_window_settings(prefix, section, none_state=True)
self.current_quick_layout = 'default'
# Regenerate menu
self.quick_layout_set_menu()
self.set_window_settings(*settings)
for plugin in self.widgetlist:
try:
plugin.initialize_plugin_in_mainwindow_layout()
except Exception as error:
print("%s: %s" % (plugin, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
def setup_default_layouts(self, index, settings):
"""Setup default layouts when run for the first time"""
self.maximize_dockwidget(restore=True)
self.set_window_settings(*settings)
self.setUpdatesEnabled(False)
# IMPORTANT: order has to be the same as defined in the config file
MATLAB, RSTUDIO, VERTICAL, HORIZONTAL = range(self.DEFAULT_LAYOUTS)
# define widgets locally
editor = self.editor
console_ipy = self.ipyconsole
console_int = self.console
outline = self.outlineexplorer
explorer_project = self.projects
explorer_file = self.explorer
explorer_variable = self.variableexplorer
history = self.historylog
finder = self.findinfiles
help_plugin = self.help
helper = self.onlinehelp
plugins = self.thirdparty_plugins
global_hidden_widgets = [finder, console_int, explorer_project,
helper] + plugins
global_hidden_toolbars = [self.source_toolbar, self.edit_toolbar,
self.search_toolbar]
# Layout definition
# layouts are organized by columns, each colum is organized by rows
# widths have to add 1.0, height per column have to add 1.0
# Spyder Default Initial Layout
s_layout = {'widgets': [
# column 0
[[explorer_project]],
# column 1
[[editor]],
# column 2
[[outline]],
# column 3
[[help_plugin, explorer_variable, helper, explorer_file,
finder] + plugins,
[console_int, console_ipy, history]]
],
'width fraction': [0.0, # column 0 width
0.55, # column 1 width
0.0, # column 2 width
0.45], # column 3 width
'height fraction': [[1.0], # column 0, row heights
[1.0], # column 1, row heights
[1.0], # column 2, row heights
[0.46, 0.54]], # column 3, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
r_layout = {'widgets': [
# column 0
[[editor],
[console_ipy, console_int]],
# column 1
[[explorer_variable, history, outline, finder] + plugins,
[explorer_file, explorer_project, help_plugin, helper]]
],
'width fraction': [0.55, # column 0 width
0.45], # column 1 width
'height fraction': [[0.55, 0.45], # column 0, row heights
[0.55, 0.45]], # column 1, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
# Matlab
m_layout = {'widgets': [
# column 0
[[explorer_file, explorer_project],
[outline]],
# column 1
[[editor],
[console_ipy, console_int]],
# column 2
[[explorer_variable, finder] + plugins,
[history, help_plugin, helper]]
],
'width fraction': [0.20, # column 0 width
0.40, # column 1 width
0.40], # column 2 width
'height fraction': [[0.55, 0.45], # column 0, row heights
[0.55, 0.45], # column 1, row heights
[0.55, 0.45]], # column 2, row heights
'hidden widgets': [],
'hidden toolbars': [],
}
# Vertically split
v_layout = {'widgets': [
# column 0
[[editor],
[console_ipy, console_int, explorer_file,
explorer_project, help_plugin, explorer_variable,
history, outline, finder, helper] + plugins]
],
'width fraction': [1.0], # column 0 width
'height fraction': [[0.55, 0.45]], # column 0, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
# Horizontally split
h_layout = {'widgets': [
# column 0
[[editor]],
# column 1
[[console_ipy, console_int, explorer_file,
explorer_project, help_plugin, explorer_variable,
history, outline, finder, helper] + plugins]
],
'width fraction': [0.55, # column 0 width
0.45], # column 1 width
'height fraction': [[1.0], # column 0, row heights
[1.0]], # column 1, row heights
'hidden widgets': [outline],
'hidden toolbars': []
}
# Layout selection
layouts = {'default': s_layout,
RSTUDIO: r_layout,
MATLAB: m_layout,
VERTICAL: v_layout,
HORIZONTAL: h_layout}
layout = layouts[index]
widgets_layout = layout['widgets']
widgets = []
for column in widgets_layout :
for row in column:
for widget in row:
if widget is not None:
widgets.append(widget)
# Make every widget visible
for widget in widgets:
widget.toggle_view(True)
action = widget.toggle_view_action
try:
action.setChecked(widget.dockwidget.isVisible())
except:
pass
# Set the widgets horizontally
for i in range(len(widgets) - 1):
first, second = widgets[i], widgets[i+1]
if first is not None and second is not None:
self.splitDockWidget(first.dockwidget, second.dockwidget,
Qt.Horizontal)
# Arrange rows vertically
for column in widgets_layout :
for i in range(len(column) - 1):
first_row, second_row = column[i], column[i+1]
if first_row is not None and second_row is not None:
self.splitDockWidget(first_row[0].dockwidget,
second_row[0].dockwidget,
Qt.Vertical)
# Tabify
for column in widgets_layout :
for row in column:
for i in range(len(row) - 1):
first, second = row[i], row[i+1]
if first is not None and second is not None:
self.tabify_plugins(first, second)
# Raise front widget per row
row[0].dockwidget.show()
row[0].dockwidget.raise_()
# Hide toolbars
hidden_toolbars = global_hidden_toolbars + layout['hidden toolbars']
for toolbar in hidden_toolbars:
if toolbar is not None:
toolbar.close()
# Hide widgets
hidden_widgets = global_hidden_widgets + layout['hidden widgets']
for widget in hidden_widgets:
if widget is not None:
widget.dockwidget.close()
# set the width and height
self._layout_widget_info = []
width, height = self.window_size.width(), self.window_size.height()
# fix column width
# for c in range(len(widgets_layout)):
# widget = widgets_layout[c][0][0].dockwidget
# min_width, max_width = widget.minimumWidth(), widget.maximumWidth()
# info = {'widget': widget,
# 'min width': min_width,
# 'max width': max_width}
# self._layout_widget_info.append(info)
# new_width = int(layout['width fraction'][c] * width * 0.95)
# widget.setMinimumWidth(new_width)
# widget.setMaximumWidth(new_width)
# widget.updateGeometry()
# fix column height
for c, column in enumerate(widgets_layout):
for r in range(len(column) - 1):
widget = column[r][0]
dockwidget = widget.dockwidget
dock_min_h = dockwidget.minimumHeight()
dock_max_h = dockwidget.maximumHeight()
info = {'widget': widget,
'dock min height': dock_min_h,
'dock max height': dock_max_h}
self._layout_widget_info.append(info)
# The 0.95 factor is to adjust height based on usefull
# estimated area in the window
new_height = int(layout['height fraction'][c][r]*height*0.95)
dockwidget.setMinimumHeight(new_height)
dockwidget.setMaximumHeight(new_height)
self._custom_layout_timer = QTimer(self)
self._custom_layout_timer.timeout.connect(self.layout_fix_timer)
self._custom_layout_timer.setSingleShot(True)
self._custom_layout_timer.start(5000)
def layout_fix_timer(self):
"""Fixes the height of docks after a new layout is set."""
info = self._layout_widget_info
for i in info:
dockwidget = i['widget'].dockwidget
if 'dock min width' in i:
dockwidget.setMinimumWidth(i['dock min width'])
dockwidget.setMaximumWidth(i['dock max width'])
if 'dock min height' in i:
dockwidget.setMinimumHeight(i['dock min height'])
dockwidget.setMaximumHeight(i['dock max height'])
dockwidget.updateGeometry()
self.setUpdatesEnabled(True)
@Slot()
def toggle_previous_layout(self):
""" """
self.toggle_layout('previous')
@Slot()
def toggle_next_layout(self):
""" """
self.toggle_layout('next')
def toggle_layout(self, direction='next'):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
if len(active) == 0:
return
layout_index = ['default']
for name in order:
if name in active:
layout_index.append(names.index(name))
current_layout = self.current_quick_layout
dic = {'next': 1, 'previous': -1}
if current_layout is None:
# Start from default
current_layout = 'default'
if current_layout in layout_index:
current_index = layout_index.index(current_layout)
else:
current_index = 0
new_index = (current_index + dic[direction]) % len(layout_index)
self.quick_layout_switch(layout_index[new_index])
def quick_layout_set_menu(self):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
ql_actions = []
ql_actions = [create_action(self, _('Spyder Default Layout'),
triggered=lambda:
self.quick_layout_switch('default'))]
for name in order:
if name in active:
index = names.index(name)
# closure required so lambda works with the default parameter
def trigger(i=index, self=self):
return lambda: self.quick_layout_switch(i)
qli_act = create_action(self, name, triggered=trigger())
# closure above replaces the following which stopped working
# qli_act = create_action(self, name, triggered=lambda i=index:
# self.quick_layout_switch(i)
ql_actions += [qli_act]
self.ql_save = create_action(self, _("Save current layout"),
triggered=lambda:
self.quick_layout_save(),
context=Qt.ApplicationShortcut)
self.ql_preferences = create_action(self, _("Layout preferences"),
triggered=lambda:
self.quick_layout_settings(),
context=Qt.ApplicationShortcut)
self.ql_reset = create_action(self, _('Reset to spyder default'),
triggered=self.reset_window_layout)
self.register_shortcut(self.ql_save, "_", "Save current layout")
self.register_shortcut(self.ql_preferences, "_", "Layout preferences")
ql_actions += [None]
ql_actions += [self.ql_save, self.ql_preferences, self.ql_reset]
self.quick_layout_menu.clear()
add_actions(self.quick_layout_menu, ql_actions)
if len(order) == 0:
self.ql_preferences.setEnabled(False)
else:
self.ql_preferences.setEnabled(True)
@Slot()
def reset_window_layout(self):
"""Reset window layout to default"""
answer = QMessageBox.warning(self, _("Warning"),
_("Window layout will be reset to default settings: "
"this affects window position, size and dockwidgets.\n"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.setup_layout(default=True)
def quick_layout_save(self):
"""Save layout dialog"""
get = CONF.get
set_ = CONF.set
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
dlg = self.dialog_layout_save(self, names)
if dlg.exec_():
name = dlg.combo_box.currentText()
if name in names:
answer = QMessageBox.warning(self, _("Warning"),
_("Layout <b>%s</b> will be \
overwritten. Do you want to \
continue?") % name,
QMessageBox.Yes | QMessageBox.No)
index = order.index(name)
else:
answer = True
if None in names:
index = names.index(None)
names[index] = name
else:
index = len(names)
names.append(name)
order.append(name)
# Always make active a new layout even if it overwrites an inactive
# layout
if name not in active:
active.append(name)
if answer:
self.save_current_window_settings('layout_{}/'.format(index),
section='quick_layouts')
set_('quick_layouts', 'names', names)
set_('quick_layouts', 'order', order)
set_('quick_layouts', 'active', active)
self.quick_layout_set_menu()
def quick_layout_settings(self):
"""Layout settings dialog"""
get = CONF.get
set_ = CONF.set
section = 'quick_layouts'
names = get(section, 'names')
order = get(section, 'order')
active = get(section, 'active')
dlg = self.dialog_layout_settings(self, names, order, active)
if dlg.exec_():
set_(section, 'names', dlg.names)
set_(section, 'order', dlg.order)
set_(section, 'active', dlg.active)
self.quick_layout_set_menu()
def quick_layout_switch(self, index):
"""Switch to quick layout number *index*"""
section = 'quick_layouts'
try:
settings = self.load_window_settings('layout_{}/'.format(index),
section=section)
(hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen) = settings
# The defaults layouts will always be regenerated unless there was
# an overwrite, either by rewriting with same name, or by deleting
# and then creating a new one
if hexstate is None:
# The value for hexstate shouldn't be None for a custom saved
# layout (ie, where the index is greater than the number of
# defaults). See issue 6202.
if index != 'default' and index >= self.DEFAULT_LAYOUTS:
QMessageBox.critical(
self, _("Warning"),
_("Error opening the custom layout. Please close"
" Spyder and try again. If the issue persists,"
" then you must use 'Reset to Spyder default' "
"from the layout menu."))
return
self.setup_default_layouts(index, settings)
except cp.NoOptionError:
QMessageBox.critical(self, _("Warning"),
_("Quick switch layout #%s has not yet "
"been defined.") % str(index))
return
# TODO: is there any real use in calling the previous layout
# setting?
# self.previous_layout_settings = self.get_window_settings()
self.set_window_settings(*settings)
self.current_quick_layout = index
# make sure the flags are correctly set for visible panes
for plugin in self.widgetlist:
action = plugin.toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
# --- Show/Hide toolbars
def _update_show_toolbars_action(self):
"""Update the text displayed in the menu entry."""
if self.toolbars_visible:
text = _("Hide toolbars")
tip = _("Hide toolbars")
else:
text = _("Show toolbars")
tip = _("Show toolbars")
self.show_toolbars_action.setText(text)
self.show_toolbars_action.setToolTip(tip)
def save_visible_toolbars(self):
"""Saves the name of the visible toolbars in the .ini file."""
toolbars = []
for toolbar in self.visible_toolbars:
toolbars.append(toolbar.objectName())
CONF.set('main', 'last_visible_toolbars', toolbars)
def get_visible_toolbars(self):
"""Collects the visible toolbars."""
toolbars = []
for toolbar in self.toolbarslist:
if toolbar.toggleViewAction().isChecked():
toolbars.append(toolbar)
self.visible_toolbars = toolbars
def load_last_visible_toolbars(self):
"""Loads the last visible toolbars from the .ini file."""
toolbars_names = CONF.get('main', 'last_visible_toolbars', default=[])
if toolbars_names:
dic = {}
for toolbar in self.toolbarslist:
dic[toolbar.objectName()] = toolbar
toolbars = []
for name in toolbars_names:
if name in dic:
toolbars.append(dic[name])
self.visible_toolbars = toolbars
else:
self.get_visible_toolbars()
self._update_show_toolbars_action()
@Slot()
def show_toolbars(self):
"""Show/Hides toolbars."""
value = not self.toolbars_visible
CONF.set('main', 'toolbars_visible', value)
if value:
self.save_visible_toolbars()
else:
self.get_visible_toolbars()
for toolbar in self.visible_toolbars:
toolbar.toggleViewAction().setChecked(value)
toolbar.setVisible(value)
self.toolbars_visible = value
self._update_show_toolbars_action()
# --- Other
def valid_project(self):
"""Handle an invalid active project."""
try:
path = self.projects.get_active_project_path()
except AttributeError:
return
if bool(path):
if not self.projects.is_valid_project(path):
if path:
QMessageBox.critical(
self,
_('Error'),
_("<b>{}</b> is no longer a valid Spyder project! "
"Since it is the current active project, it will "
"be closed automatically.").format(path))
self.projects.close_project()
def free_memory(self):
"""Free memory after event."""
gc.collect()
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
def show_shortcuts(self, menu):
"""Show action shortcuts in menu"""
for element in getattr(self, menu + '_menu_actions'):
if element and isinstance(element, QAction):
if element._shown_shortcut is not None:
element.setShortcut(element._shown_shortcut)
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu"""
for element in getattr(self, menu + '_menu_actions'):
if element and isinstance(element, QAction):
if element._shown_shortcut is not None:
element.setShortcut(QKeySequence())
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
from spyder.widgets.editor import TextEditBaseWidget
from spyder.widgets.ipythonconsole import ControlWidget
widget = QApplication.focusWidget()
textedit_properties = None
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
console = isinstance(widget, ControlWidget)
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Editor has focus and there is no file opened in it
if not console and not_readonly and not self.editor.is_file_opened():
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
# Disabling all actions except the last one
# (which is Find in files) to begin with
for child in self.search_menu.actions()[:-1]:
child.setEnabled(False)
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Find actions only trigger an effect in the Editor
if not console:
for action in self.search_menu.actions():
try:
action.setEnabled(True)
except RuntimeError:
pass
# Disable the replace action for read-only files
self.search_menu_actions[3].setEnabled(readwrite_editor)
def create_plugins_menu(self):
order = ['editor', 'console', 'ipython_console', 'variable_explorer',
'help', None, 'explorer', 'outline_explorer',
'project_explorer', 'find_in_files', None, 'historylog',
'profiler', 'breakpoints', 'pylint', None,
'onlinehelp', 'internal_console']
for plugin in self.widgetlist:
action = plugin.toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
try:
name = plugin.CONF_SECTION
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
actions = order[:]
for action in order:
if type(action) is str:
actions.remove(action)
self.plugins_menu_actions = actions
add_actions(self.plugins_menu, actions)
def create_toolbars_menu(self):
order = ['file_toolbar', 'run_toolbar', 'debug_toolbar',
'main_toolbar', 'Global working directory', None,
'search_toolbar', 'edit_toolbar', 'source_toolbar']
for toolbar in self.toolbarslist:
action = toolbar.toggleViewAction()
name = toolbar.objectName()
try:
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
add_actions(self.toolbars_menu, order)
def createPopupMenu(self):
menu = QMenu('', self)
actions = self.help_menu_actions[:3] + \
[None, self.help_menu_actions[-1]]
add_actions(menu, actions)
return menu
def set_splash(self, message):
"""Set splash message"""
if self.splash is None:
return
if message:
self.debug_print(message)
self.splash.show()
self.splash.showMessage(message, Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute, QColor(Qt.black))
QApplication.processEvents()
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
try:
for plugin in self.widgetlist:
if plugin.isAncestorOf(self.last_focused_widget):
plugin.visibility_changed(True)
QMainWindow.hideEvent(self, event)
except RuntimeError:
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
prefix = 'window' + '/'
self.save_current_window_settings(prefix)
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close()
for plugin in self.thirdparty_plugins:
if not plugin.closing_plugin(cancelable):
return False
for widget in self.widgetlist:
if not widget.closing_plugin(cancelable):
return False
self.dialog_manager.close_all()
if self.toolbars_visible:
self.save_visible_toolbars()
self.already_closed = True
return True
def add_dockwidget(self, child):
"""Add QDockWidget and toggleViewAction"""
dockwidget, location = child.create_dockwidget()
if CONF.get('main', 'vertical_dockwidget_titlebars'):
dockwidget.setFeatures(dockwidget.features()|
QDockWidget.DockWidgetVerticalTitleBar)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(child)
@Slot()
def close_current_dockwidget(self):
widget = QApplication.focusWidget()
for plugin in self.widgetlist:
if plugin.isAncestorOf(widget):
plugin.dockwidget.hide()
break
def toggle_lock_dockwidgets(self, value):
"""Lock/Unlock dockwidgets"""
self.dockwidgets_locked = value
self.apply_panes_settings()
CONF.set('main', 'panes_locked', value)
def __update_maximize_action(self):
if self.state_before_maximizing is None:
text = _("Maximize current pane")
tip = _("Maximize current pane")
icon = ima.icon('maximize')
else:
text = _("Restore current pane")
tip = _("Restore pane to its original size")
icon = ima.icon('unmaximize')
self.maximize_action.setText(text)
self.maximize_action.setIcon(icon)
self.maximize_action.setToolTip(tip)
@Slot()
@Slot(bool)
def maximize_dockwidget(self, restore=False):
"""Shortcut: Ctrl+Alt+Shift+M
First call: maximize current dockwidget
Second call (or restore=True): restore original window layout"""
if self.state_before_maximizing is None:
if restore:
return
# Select plugin to maximize
self.state_before_maximizing = self.saveState()
focus_widget = QApplication.focusWidget()
for plugin in self.widgetlist:
plugin.dockwidget.hide()
if plugin.isAncestorOf(focus_widget):
self.last_plugin = plugin
# Only plugins that have a dockwidget are part of widgetlist,
# so last_plugin can be None after the above "for" cycle.
# For example, this happens if, after Spyder has started, focus
# is set to the Working directory toolbar (which doesn't have
# a dockwidget) and then you press the Maximize button
if self.last_plugin is None:
# Using the Editor as default plugin to maximize
self.last_plugin = self.editor
# Maximize last_plugin
self.last_plugin.dockwidget.toggleViewAction().setDisabled(True)
self.setCentralWidget(self.last_plugin)
self.last_plugin.ismaximized = True
# Workaround to solve an issue with editor's outline explorer:
# (otherwise the whole plugin is hidden and so is the outline explorer
# and the latter won't be refreshed if not visible)
self.last_plugin.show()
self.last_plugin.visibility_changed(True)
if self.last_plugin is self.editor:
# Automatically show the outline if the editor was maximized:
self.addDockWidget(Qt.RightDockWidgetArea,
self.outlineexplorer.dockwidget)
self.outlineexplorer.dockwidget.show()
else:
# Restore original layout (before maximizing current dockwidget)
self.last_plugin.dockwidget.setWidget(self.last_plugin)
self.last_plugin.dockwidget.toggleViewAction().setEnabled(True)
self.setCentralWidget(None)
self.last_plugin.ismaximized = False
self.restoreState(self.state_before_maximizing)
self.state_before_maximizing = None
self.last_plugin.get_focus_widget().setFocus()
self.__update_maximize_action()
def __update_fullscreen_action(self):
if self.fullscreen_flag:
icon = ima.icon('window_nofullscreen')
else:
icon = ima.icon('window_fullscreen')
if is_text_string(icon):
icon = get_icon(icon)
self.fullscreen_action.setIcon(icon)
@Slot()
def toggle_fullscreen(self):
if self.fullscreen_flag:
self.fullscreen_flag = False
if os.name == 'nt':
self.setWindowFlags(
self.windowFlags()
^ (Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint))
self.setGeometry(self.saved_normal_geometry)
self.showNormal()
if self.maximized_flag:
self.showMaximized()
else:
self.maximized_flag = self.isMaximized()
self.fullscreen_flag = True
self.saved_normal_geometry = self.normalGeometry()
if os.name == 'nt':
# Due to limitations of the Windows DWM, compositing is not
# handled correctly for OpenGL based windows when going into
# full screen mode, so we need to use this workaround.
# See Issue #4291.
self.setWindowFlags(self.windowFlags()
| Qt.FramelessWindowHint
| Qt.WindowStaysOnTopHint)
r = QApplication.desktop().screenGeometry()
self.setGeometry(
r.left() - 1, r.top() - 1, r.width() + 2, r.height() + 2)
self.showNormal()
else:
self.showFullScreen()
self.__update_fullscreen_action()
def add_to_toolbar(self, toolbar, widget):
"""Add widget actions to toolbar"""
actions = widget.toolbar_actions
if actions is not None:
add_actions(toolbar, actions)
@Slot()
def about(self):
"""Create About Spyder dialog with general information."""
versions = get_versions()
# Show Git revision for development version
revlink = ''
if versions['revision']:
rev = versions['revision']
revlink = " (<a href='https://github.com/spyder-ide/spyder/"\
"commit/%s'>Commit: %s</a>)" % (rev, rev)
msgBox = QMessageBox(self)
msgBox.setText(
"""
<b>Spyder {spyder_ver}</b> {revision}
<br>The Scientific Python Development Environment |
<a href="{website_url}">Spyder-IDE.org</a>
<br>Copyright © 2009-2019 Spyder Project Contributors and
<a href="{github_url}/blob/master/AUTHORS.txt">others</a>
<br>Distributed under the terms of the
<a href="{github_url}/blob/master/LICENSE.txt">MIT License</a>.
<p>Created by Pierre Raybaut; current maintainer is Carlos Cordoba.
<br>Developed by the
<a href="{github_url}/graphs/contributors">international
Spyder community</a>.
<br>Many thanks to all the Spyder beta testers and dedicated users.
<p>For help with Spyder errors and crashes, please read our
<a href="{trouble_url}">Troubleshooting Guide</a>, and for bug
reports and feature requests, visit our
<a href="{github_url}">Github site</a>.
For project discussion, see our
<a href="{forum_url}">Google Group</a>.
<p>This project is part of a larger effort to promote and
facilitate the use of Python for scientific and engineering
software development.
The popular Python distributions
<a href="https://www.anaconda.com/download/">Anaconda</a> and
<a href="https://winpython.github.io/">WinPython</a>
also contribute to this plan.
<p>Python {python_ver} {bitness}-bit | Qt {qt_ver} |
{qt_api} {qt_api_ver} | {os_name} {os_ver}
<small><p>Certain source files under other compatible permissive
licenses and/or originally by other authors.
Spyder 3 theme icons derived from
<a href="https://fontawesome.com/">Font Awesome</a> 4.7
(© 2016 David Gandy; SIL OFL 1.1).
Most Spyder 2 theme icons sourced from the
<a href="https://www.everaldo.com">Crystal Project iconset</a>
(© 2006-2007 Everaldo Coelho; LGPL 2.1+).
Other icons from
<a href="http://p.yusukekamiyamane.com/">Yusuke Kamiyamane</a>
(© 2013 Yusuke Kamiyamane; CC-BY 3.0),
the <a href="http://www.famfamfam.com/lab/icons/silk/">FamFamFam
Silk icon set</a> 1.3 (© 2006 Mark James; CC-BY 2.5), and
the <a href="https://www.kde.org/">KDE Oxygen icons</a>
(© 2007 KDE Artists; LGPL 3.0+).</small>
<p>See the <a href="{github_url}/blob/master/NOTICE.txt">NOTICE</a>
file for full legal information.
"""
.format(spyder_ver=versions['spyder'],
revision=revlink,
website_url=__website_url__,
github_url=__project_url__,
trouble_url=__trouble_url__,
forum_url=__forum_url__,
python_ver=versions['python'],
bitness=versions['bitness'],
qt_ver=versions['qt'],
qt_api=versions['qt_api'],
qt_api_ver=versions['qt_api_ver'],
os_name=versions['system'],
os_ver=versions['release'])
)
msgBox.setWindowTitle(_("About %s") % "Spyder")
msgBox.setStandardButtons(QMessageBox.Ok)
msgBox.setIconPixmap(APP_ICON.pixmap(QSize(64, 64)))
msgBox.setTextInteractionFlags(
Qt.LinksAccessibleByMouse | Qt.TextSelectableByMouse)
msgBox.exec_()
@Slot()
def show_dependencies(self):
"""Show Spyder's Dependencies dialog box"""
from spyder.widgets.dependencies import DependenciesDialog
dlg = DependenciesDialog(None)
dlg.set_data(dependencies.DEPENDENCIES)
dlg.exec_()
def render_issue(self, description='', traceback=''):
"""Render issue before sending it to Github"""
# Get component versions
versions = get_versions()
# Get git revision for development version
revision = ''
if versions['revision']:
revision = versions['revision']
# Make a description header in case no description is supplied
if not description:
description = "### What steps reproduce the problem?"
# Make error section from traceback and add appropriate reminder header
if traceback:
error_section = ("### Traceback\n"
"```python-traceback\n"
"{}\n"
"```".format(traceback))
else:
error_section = ''
issue_template = """\
## Description
{description}
{error_section}
## Versions
* Spyder version: {spyder_version} {commit}
* Python version: {python_version}
* Qt version: {qt_version}
* {qt_api_name} version: {qt_api_version}
* Operating System: {os_name} {os_version}
### Dependencies
```
{dependencies}
```
""".format(description=description,
error_section=error_section,
spyder_version=versions['spyder'],
commit=revision,
python_version=versions['python'],
qt_version=versions['qt'],
qt_api_name=versions['qt_api'],
qt_api_version=versions['qt_api_ver'],
os_name=versions['system'],
os_version=versions['release'],
dependencies=dependencies.status())
return issue_template
@Slot()
def report_issue(self, body=None, title=None, open_webpage=False):
"""Report a Spyder issue to github, generating body text if needed."""
if body is None:
from spyder.widgets.reporterror import SpyderErrorDialog
report_dlg = SpyderErrorDialog(self, is_report=True)
report_dlg.show()
else:
if open_webpage:
if PY3:
from urllib.parse import quote
else:
from urllib import quote # analysis:ignore
from qtpy.QtCore import QUrlQuery
url = QUrl(__project_url__ + '/issues/new')
query = QUrlQuery()
query.addQueryItem("body", quote(body))
if title:
query.addQueryItem("title", quote(title))
url.setQuery(query)
QDesktopServices.openUrl(url)
@Slot()
def trouble_guide(self):
"""Open Spyder troubleshooting guide in a web browser."""
url = QUrl(__trouble_url__)
QDesktopServices.openUrl(url)
@Slot()
def google_group(self):
"""Open Spyder Google Group in a web browser."""
url = QUrl(__forum_url__)
QDesktopServices.openUrl(url)
@Slot()
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.widgets.editor import TextEditBaseWidget
from spyder.widgets.ipythonconsole import ControlWidget
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
getattr(widget, callback)()
else:
return
def redirect_internalshell_stdio(self, state):
if state:
self.console.shell.interpreter.redirect_stds()
else:
self.console.shell.interpreter.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
def execute_in_external_console(self, lines, focus_to_editor):
"""
Execute lines in IPython console and eventually set focus
to the Editor.
"""
console = self.ipyconsole
console.switch_to_plugin()
console.execute_code(lines)
if focus_to_editor:
self.editor.switch_to_plugin()
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
programs.start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
fname = encoding.to_unicode_from_fs(fname)
if osp.isfile(fname):
self.open_file(fname, external=True)
elif osp.isfile(osp.join(CWD, fname)):
self.open_file(osp.join(CWD, fname), external=True)
# ---- PYTHONPATH management, etc.
def get_spyder_pythonpath(self):
"""Return Spyder PYTHONPATH"""
active_path = [p for p in self.path if p not in self.not_active_path]
return active_path + self.project_path
def add_path_to_sys_path(self):
"""Add Spyder path to sys.path"""
for path in reversed(self.get_spyder_pythonpath()):
sys.path.insert(1, path)
def remove_path_from_sys_path(self):
"""Remove Spyder path from sys.path"""
for path in self.path + self.project_path:
while path in sys.path:
sys.path.remove(path)
@Slot()
def path_manager_callback(self):
"""Spyder path manager"""
from spyder.widgets.pathmanager import PathManager
self.remove_path_from_sys_path()
project_path = self.projects.get_pythonpath()
dialog = PathManager(self, self.path, project_path,
self.not_active_path, sync=True)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.exec_()
self.add_path_to_sys_path()
try:
encoding.writelines(self.path, self.SPYDER_PATH) # Saving path
encoding.writelines(self.not_active_path,
self.SPYDER_NOT_ACTIVE_PATH)
except EnvironmentError:
pass
self.sig_pythonpath_changed.emit()
def pythonpath_changed(self):
"""Projects PYTHONPATH contribution has changed"""
self.remove_path_from_sys_path()
self.project_path = self.projects.get_pythonpath()
self.add_path_to_sys_path()
self.sig_pythonpath_changed.emit()
@Slot()
def win_env(self):
"""Show Windows current user environment variables"""
self.dialog_manager.show(WinUserEnvDialog(self))
#---- Preferences
def apply_settings(self):
"""Apply settings changed in 'Preferences' dialog box"""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes Issue 2036
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
else:
style_name = CONF.get('main', 'windows_style',
self.default_style)
style = QStyleFactory.create(style_name)
if style is not None:
style.setProperty('name', style_name)
qapp.setStyle(style)
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
if CONF.get('main', 'animated_docks'):
default = default|QMainWindow.AnimatedDocks
self.setDockOptions(default)
self.apply_panes_settings()
self.apply_statusbar_settings()
if CONF.get('main', 'use_custom_cursor_blinking'):
qapp.setCursorFlashTime(CONF.get('main', 'custom_cursor_blinking'))
else:
qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT)
def apply_panes_settings(self):
"""Update dockwidgets features settings"""
# Update toggle action on menu
for child in self.widgetlist:
features = child.FEATURES
if CONF.get('main', 'vertical_dockwidget_titlebars'):
features = features | QDockWidget.DockWidgetVerticalTitleBar
if not self.dockwidgets_locked:
features = features | QDockWidget.DockWidgetMovable
child.dockwidget.setFeatures(features)
child.update_margins()
def apply_statusbar_settings(self):
"""Update status bar widgets settings"""
show_status_bar = CONF.get('main', 'show_status_bar')
self.statusBar().setVisible(show_status_bar)
if show_status_bar:
for widget, name in ((self.mem_status, 'memory_usage'),
(self.cpu_status, 'cpu_usage')):
if widget is not None:
widget.setVisible(CONF.get('main', '%s/enable' % name))
widget.set_interval(CONF.get('main', '%s/timeout' % name))
else:
return
@Slot()
def edit_preferences(self):
"""Edit Spyder preferences"""
from spyder.plugins.configdialog import ConfigDialog
dlg = ConfigDialog(self)
dlg.size_change.connect(self.set_prefs_size)
if self.prefs_dialog_size is not None:
dlg.resize(self.prefs_dialog_size)
for PrefPageClass in self.general_prefs:
widget = PrefPageClass(dlg, main=self)
widget.initialize()
dlg.add_page(widget)
for plugin in [self.workingdirectory, self.editor,
self.projects, self.ipyconsole,
self.historylog, self.help, self.variableexplorer,
self.onlinehelp, self.explorer, self.findinfiles
]+self.thirdparty_plugins:
if plugin is not None:
try:
widget = plugin.create_configwidget(dlg)
if widget is not None:
dlg.add_page(widget)
except Exception:
traceback.print_exc(file=sys.stderr)
if self.prefs_index is not None:
dlg.set_current_index(self.prefs_index)
dlg.show()
dlg.check_all_settings()
dlg.pages_widget.currentChanged.connect(self.__preference_page_changed)
dlg.exec_()
def __preference_page_changed(self, index):
"""Preference page index has changed"""
self.prefs_index = index
def set_prefs_size(self, size):
"""Save preferences dialog size"""
self.prefs_dialog_size = size
#---- Shortcuts
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_sc_to_tip=False):
"""
Register QAction or QShortcut to Spyder main application,
with shortcut (context, name, default)
"""
self.shortcut_data.append( (qaction_or_qshortcut, context,
name, add_sc_to_tip) )
def apply_shortcuts(self):
"""Apply shortcuts settings to all widgets/plugins"""
toberemoved = []
for index, (qobject, context, name,
add_sc_to_tip) in enumerate(self.shortcut_data):
keyseq = QKeySequence( get_shortcut(context, name) )
try:
if isinstance(qobject, QAction):
if sys.platform == 'darwin' and \
qobject._shown_shortcut == 'missing':
qobject._shown_shortcut = keyseq
else:
qobject.setShortcut(keyseq)
if add_sc_to_tip:
add_shortcut_to_tooltip(qobject, context, name)
elif isinstance(qobject, QShortcut):
qobject.setKey(keyseq)
except RuntimeError:
# Object has been deleted
toberemoved.append(index)
for index in sorted(toberemoved, reverse=True):
self.shortcut_data.pop(index)
@Slot()
def show_shortcuts_dialog(self):
from spyder.widgets.shortcutssummary import ShortcutsSummaryDialog
dlg = ShortcutsSummaryDialog(None)
dlg.exec_()
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See Issue 1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Quit and restart, and reset spyder defaults
@Slot()
def reset_spyder(self):
"""
Quit and reset Spyder and then Restart application.
"""
answer = QMessageBox.warning(self, _("Warning"),
_("Spyder will restart and reset to default settings: <br><br>"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.restart(reset=True)
@Slot()
def restart(self, reset=False):
"""
Quit and Restart Spyder application.
If reset True it allows to reset spyder on restart.
"""
# Get start path to use in restart script
spyder_start_directory = get_module_path('spyder')
restart_script = osp.join(spyder_start_directory, 'app', 'restart.py')
# Get any initial argument passed when spyder was started
# Note: Variables defined in bootstrap.py and spyder/app/start.py
env = os.environ.copy()
bootstrap_args = env.pop('SPYDER_BOOTSTRAP_ARGS', None)
spyder_args = env.pop('SPYDER_ARGS')
# Get current process and python running spyder
pid = os.getpid()
python = sys.executable
# Check if started with bootstrap.py
if bootstrap_args is not None:
spyder_args = bootstrap_args
is_bootstrap = True
else:
is_bootstrap = False
# Pass variables as environment variables (str) to restarter subprocess
env['SPYDER_ARGS'] = spyder_args
env['SPYDER_PID'] = str(pid)
env['SPYDER_IS_BOOTSTRAP'] = str(is_bootstrap)
env['SPYDER_RESET'] = str(reset)
if DEV:
if os.name == 'nt':
env['PYTHONPATH'] = ';'.join(sys.path)
else:
env['PYTHONPATH'] = ':'.join(sys.path)
# Build the command and popen arguments depending on the OS
if os.name == 'nt':
# Hide flashing command prompt
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
shell = False
else:
startupinfo = None
shell = True
command = '"{0}" "{1}"'
command = command.format(python, restart_script)
try:
if self.closing(True):
subprocess.Popen(command, shell=shell, env=env,
startupinfo=startupinfo)
self.console.quit()
except Exception as error:
# If there is an error with subprocess, Spyder should not quit and
# the error can be inspected in the internal console
print(error) # spyder: test-skip
print(command) # spyder: test-skip
# ---- Interactive Tours
def show_tour(self, index):
"""Show interactive tour."""
self.maximize_dockwidget(restore=True)
frames = self.tours_available[index]
self.tour.set_tour(index, frames, self)
self.tour.start_tour()
# ---- Global File Switcher
def open_fileswitcher(self, symbol=False):
"""Open file list management dialog box."""
if self.fileswitcher is not None and \
self.fileswitcher.is_visible:
self.fileswitcher.hide()
self.fileswitcher.is_visible = False
return
if symbol:
self.fileswitcher.plugin = self.editor
self.fileswitcher.set_search_text('@')
else:
self.fileswitcher.set_search_text('')
self.fileswitcher.show()
self.fileswitcher.is_visible = True
def open_symbolfinder(self):
"""Open symbol list management dialog box."""
self.open_fileswitcher(symbol=True)
def add_to_fileswitcher(self, plugin, tabs, data, icon):
"""Add a plugin to the File Switcher."""
if self.fileswitcher is None:
self.fileswitcher = FileSwitcher(self, plugin, tabs, data, icon)
else:
self.fileswitcher.add_plugin(plugin, tabs, data, icon)
self.fileswitcher.sig_goto_file.connect(
plugin.get_current_tab_manager().set_stack_index)
# ---- Check for Spyder Updates
def _check_updates_ready(self):
"""Called by WorkerUpdates when ready"""
from spyder.widgets.helperwidgets import MessageCheckBox
# feedback` = False is used on startup, so only positive feedback is
# given. `feedback` = True is used when after startup (when using the
# menu action, and gives feeback if updates are, or are not found.
feedback = self.give_updates_feedback
# Get results from worker
update_available = self.worker_updates.update_available
latest_release = self.worker_updates.latest_release
error_msg = self.worker_updates.error
url_r = __project_url__ + '/releases'
url_i = 'https://docs.spyder-ide.org/installation.html'
# Define the custom QMessageBox
box = MessageCheckBox(icon=QMessageBox.Information,
parent=self)
box.setWindowTitle(_("Spyder updates"))
box.set_checkbox_text(_("Check for updates on startup"))
box.setStandardButtons(QMessageBox.Ok)
box.setDefaultButton(QMessageBox.Ok)
# Adjust the checkbox depending on the stored configuration
section, option = 'main', 'check_updates_on_startup'
check_updates = CONF.get(section, option)
box.set_checked(check_updates)
if error_msg is not None:
msg = error_msg
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
else:
if update_available:
anaconda_msg = ''
if 'Anaconda' in sys.version or 'conda-forge' in sys.version:
anaconda_msg = _("<hr><b>IMPORTANT NOTE:</b> It seems "
"that you are using Spyder with "
"<b>Anaconda/Miniconda</b>. Please "
"<b>don't</b> use <code>pip</code> to "
"update it as that will probably break "
"your installation.<br><br>"
"Instead, please wait until new conda "
"packages are available and use "
"<code>conda</code> to perform the "
"update.<hr>")
msg = _("<b>Spyder %s is available!</b> <br><br>Please use "
"your package manager to update Spyder or go to our "
"<a href=\"%s\">Releases</a> page to download this "
"new version. <br><br>If you are not sure how to "
"proceed to update Spyder please refer to our "
" <a href=\"%s\">Installation</a> instructions."
"") % (latest_release, url_r, url_i)
msg += '<br>' + anaconda_msg
box.setText(msg)
box.set_check_visible(True)
box.exec_()
check_updates = box.is_checked()
elif feedback:
msg = _("Spyder is up to date.")
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
# Update checkbox based on user interaction
CONF.set(section, option, check_updates)
# Enable check_updates_action after the thread has finished
self.check_updates_action.setDisabled(False)
# Provide feeback when clicking menu if check on startup is on
self.give_updates_feedback = True
@Slot()
def check_updates(self, startup=False):
"""
Check for spyder updates on github releases using a QThread.
"""
from spyder.workers.updates import WorkerUpdates
# Disable check_updates_action while the thread is working
self.check_updates_action.setDisabled(True)
if self.thread_updates is not None:
self.thread_updates.terminate()
self.thread_updates = QThread(self)
self.worker_updates = WorkerUpdates(self, startup=startup)
self.worker_updates.sig_ready.connect(self._check_updates_ready)
self.worker_updates.sig_ready.connect(self.thread_updates.quit)
self.worker_updates.moveToThread(self.thread_updates)
self.thread_updates.started.connect(self.worker_updates.start)
self.thread_updates.start()
# --- For OpenGL
def _test_setting_opengl(self, option):
"""Get the current OpenGL implementation in use"""
if option == 'software':
return QCoreApplication.testAttribute(Qt.AA_UseSoftwareOpenGL)
elif option == 'desktop':
return QCoreApplication.testAttribute(Qt.AA_UseDesktopOpenGL)
elif option == 'gles':
return QCoreApplication.testAttribute(Qt.AA_UseOpenGLES)
#==============================================================================
# Utilities to create the 'main' function
#==============================================================================
def initialize():
"""Initialize Qt, patching sys.exit and eventually setting up ETS"""
# This doesn't create our QApplication, just holds a reference to
# MAIN_APP, created above to show our splash screen as early as
# possible
app = qapplication()
# --- Set application icon
app.setWindowIcon(APP_ICON)
#----Monkey patching QApplication
class FakeQApplication(QApplication):
"""Spyder's fake QApplication"""
def __init__(self, args):
self = app # analysis:ignore
@staticmethod
def exec_():
"""Do nothing because the Qt mainloop is already running"""
pass
from qtpy import QtWidgets
QtWidgets.QApplication = FakeQApplication
# ----Monkey patching sys.exit
def fake_sys_exit(arg=[]):
pass
sys.exit = fake_sys_exit
# ----Monkey patching sys.excepthook to avoid crashes in PyQt 5.5+
if PYQT5:
def spy_excepthook(type_, value, tback):
sys.__excepthook__(type_, value, tback)
sys.excepthook = spy_excepthook
# Removing arguments from sys.argv as in standard Python interpreter
sys.argv = ['']
# Selecting Qt4 backend for Enthought Tool Suite (if installed)
try:
from enthought.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'qt4'
except ImportError:
pass
return app
class Spy(object):
"""
Inspect Spyder internals
Attributes:
app Reference to main QApplication object
window Reference to spyder.MainWindow widget
"""
def __init__(self, app, window):
self.app = app
self.window = window
def __dir__(self):
return list(self.__dict__.keys()) +\
[x for x in dir(self.__class__) if x[0] != '_']
def versions(self):
return get_versions()
def run_spyder(app, options, args):
"""
Create and show Spyder's main window
Start QApplication event loop
"""
#TODO: insert here
# Main window
main = MainWindow(options)
try:
main.setup()
except BaseException:
if main.console is not None:
try:
main.console.shell.exit_interpreter()
except BaseException:
pass
raise
main.show()
main.post_visible_setup()
if main.console:
main.console.shell.interpreter.namespace['spy'] = \
Spy(app=app, window=main)
# Open external files passed as args
if args:
for a in args:
main.open_external_file(a)
# Don't show icons in menus for Mac
if sys.platform == 'darwin':
QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True)
# Open external files with our Mac app
if running_in_mac_app():
app.sig_open_external_file.connect(main.open_external_file)
# To give focus again to the last focused widget after restoring
# the window
app.focusChanged.connect(main.change_last_focused_widget)
if not running_under_pytest():
app.exec_()
return main
#==============================================================================
# Main
#==============================================================================
def main():
"""Main function"""
# **** For Pytest ****
# We need to create MainWindow **here** to avoid passing pytest
# options to Spyder
if running_under_pytest():
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
options = Mock()
options.working_directory = None
options.profile = False
options.multithreaded = False
options.new_instance = False
options.project = None
options.window_title = None
options.opengl_implementation = None
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
app = initialize()
window = run_spyder(app, options, None)
return window
# **** Collect command line options ****
# Note regarding Options:
# It's important to collect options before monkey patching sys.exit,
# otherwise, optparse won't be able to exit if --help option is passed
options, args = get_options()
# **** Set OpenGL implementation to use ****
if options.opengl_implementation:
option = options.opengl_implementation
set_opengl_implementation(option)
else:
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
# **** Handle hide_console option ****
if options.show_console:
print("(Deprecated) --show console does nothing, now the default "
" behavior is to show the console, use --hide-console if you "
"want to hide it")
if set_attached_console_visible is not None:
set_attached_console_visible(not options.hide_console
or options.reset_config_files
or options.reset_to_defaults
or options.optimize or bool(DEBUG))
# **** Create the application ****
app = initialize()
# **** Handle other options ****
if options.reset_config_files:
# <!> Remove all configuration files!
reset_config_files()
return
elif options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults(save=True)
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# **** Show crash dialog ****
if CONF.get('main', 'crash', False) and not DEV:
CONF.set('main', 'crash', False)
if SPLASH is not None:
SPLASH.hide()
QMessageBox.information(
None, "Spyder",
"Spyder crashed during last session.<br><br>"
"If Spyder does not start at all and <u>before submitting a "
"bug report</u>, please try to reset settings to defaults by "
"running Spyder with the command line option '--reset':<br>"
"<span style=\'color: #555555\'><b>spyder --reset</b></span>"
"<br><br>"
"<span style=\'color: #ff5555\'><b>Warning:</b></span> "
"this command will remove all your Spyder configuration files "
"located in '%s').<br><br>"
"If Spyder still fails to launch, you should consult our "
"comprehensive <b><a href=\"%s\">Troubleshooting Guide</a></b>, "
"which when followed carefully solves the vast majority of "
"crashes; also, take "
"the time to search for <a href=\"%s\">known bugs</a> or "
"<a href=\"%s\">discussions</a> matching your situation before "
"submitting a report to our <a href=\"%s\">issue tracker</a>. "
"Your feedback will always be greatly appreciated."
"" % (get_conf_path(), __trouble_url__, __project_url__,
__forum_url__, __project_url__))
# **** Create main window ****
mainwindow = None
try:
mainwindow = run_spyder(app, options, args)
except FontError as fontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('main', 'icon_theme', 'spyder 2')
except BaseException:
CONF.set('main', 'crash', True)
import traceback
traceback.print_exc(file=STDERR)
traceback.print_exc(file=open('spyder_crash.log', 'w'))
if mainwindow is None:
# An exception occured
if SPLASH is not None:
SPLASH.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
sys-bio/tellurium
|
spyder_mod/Spyder 3.3.6/site-packages/spyder/app/mainwindow.py
|
Python
|
apache-2.0
| 138,222
|
[
"CRYSTAL",
"VisIt"
] |
554aea01d8c41088f2aa1ee1cd9038850b6c9307a40a7933cf4be3228268dcb0
|
#=======================================================================
# sim_utils.py
#=======================================================================
import warnings
import greenlet
from ..ast_helpers import get_method_ast
from ...datatypes.SignalValue import SignalValue
from ast_visitor import (
DetectLoadsAndStores,
DetectDecorators,
DetectIncorrectValueNext,
DetectMissingValueNext
)
#-----------------------------------------------------------------------
# collect_signals
#-----------------------------------------------------------------------
# Utility function to collect all the Signal type objects (ports,
# wires, constants) in the model.
def collect_signals( model ):
#self.metrics.reg_model( model )
signals = set( model.get_ports() + model.get_wires() )
for m in model.get_submodules():
signals.update( collect_signals( m ) )
return signals
#-----------------------------------------------------------------------
# signals_to_nets
#-----------------------------------------------------------------------
# Generate nets describing structural connections in the model. Each
# net describes a set of Signal objects which have been interconnected,
# either directly or indirectly, by calls to connect().
def signals_to_nets( signals ):
nets = []
slice_connects = set()
#---------------------------------------------------------------------
# valid_connection
#---------------------------------------------------------------------
# Utility function to filter only supported connections (ports/wires),
# ignore slices and constants.
def valid_connection( c ):
if c.src_slice != None or c.dest_slice != None:
# TODO: collect slice connections somewhere else
slice_connects.add( c )
return False
else:
return True
#---------------------------------------------------------------------
# iter_dfs
#---------------------------------------------------------------------
# Iterative Depth-First-Search algorithm, borrowed from Listing 5-5
# in 'Python Algorithms': http://www.apress.com/9781430232377/
def iter_dfs( s ):
S, Q = set(), []
Q.append( s )
while Q:
u = Q.pop()
if u in S: continue
S.add( u )
connected_signals = [ x.other( u ) for x in u.connections
if valid_connection( x ) ]
Q.extend( connected_signals )
#yield u
return S
# Initially signals contains all the Signal type objects in the model.
# We perform a depth-first search on the connections of each Signal
# object, and remove connected objects from the signals set. The
# result is a collection of nets describing structural connections in
# the design. Each independent net will later be transformed into a
# single SignalValue object.
while signals:
s = signals.pop()
net = iter_dfs( s )
for i in net:
#if i is not s: signals.remove( i )
signals.discard( i )
nets.append( net )
return nets, slice_connects
#---------------------------------------------------------------------
# insert_signal_values
#---------------------------------------------------------------------
# Transform each net into a single SignalValue object. Model attributes
# currently referencing Signal objects will be modified to reference
# the SignalValue object of their associated net instead.
def insert_signal_values( sim, nets ):
# Utility functions which create SignalValue callbacks.
#-------------------------------------------------------------------
# create_comb_update_cb
#-------------------------------------------------------------------
def create_comb_update_cb( sim, svalue ):
def notify_sim_comb_update():
sim.add_event( svalue )
return notify_sim_comb_update
#-------------------------------------------------------------------
# create_seq_update_cb
#-------------------------------------------------------------------
def create_seq_update_cb( sim, svalue ):
def notify_sim_seq_update():
sim._register_queue.append( svalue )
return notify_sim_seq_update
# Each grouping represents a single SignalValue object. Perform a swap
# so that all attributes currently pointing to Signal objects in this
# grouping instead point to the SignalValue.
for group in nets:
# Get an element out of the set and use it to determine the bitwidth
# of the net, needed to create a properly sized SignalValue object.
# TODO: no peek() so have to pop() then reinsert it! Another way?
# TODO: what about BitStructs?
temp = group.pop()
group.add( temp )
# TODO: should this be visible to sim?
svalue = temp.dtype()
svalue._next = temp.dtype()
#svalue._DEBUG_signal_names = group
# Add a callback to the SignalValue to notify SimulationTool every
# time a sequential update occurs (.next is written).
# TODO: currently all signals get this, necessary?
svalue.notify_sim_seq_update = create_seq_update_cb ( sim, svalue )
# Create a callback for the SignalValue to notify SimulationTool
# every time a combinational update occurs (.value is written).
# We just store the callback for now, only add it later if we detect
# that a combinational block is sensitive to us.
svalue._ucb = create_comb_update_cb( sim, svalue )
# Modify model attributes currently referencing Signal objects to
# reference SignalValue objects instead.
for x in group:
# Set the value of the SignalValue object if we encounter a
# constant (check for Constant object instead?)
if isinstance( x._signalvalue, int ):
svalue.write_value( x._signalvalue )
svalue.constant = True
# Otherwise swap the value
else:
# We need 'in locals()' because of the nested function above,
# see: http://stackoverflow.com/a/4484946
exec( "x.parent.{} = svalue".format( x.name ) ) in locals()
# Also give signals a pointer to the SignalValue object.
# (Needed for VCD tracing and slice logic generator).
x._signalvalue = svalue
#---------------------------------------------------------------------
# register_seq_blocks
#---------------------------------------------------------------------
# Register all decorated @tick and @posedge_clk functions.
# Sequential logic blocks get executed any time cycle() is called.
def register_seq_blocks( model ):
all_models = []
def create_model_list( current ):
all_models.append( current )
for m in current.get_submodules():
create_model_list( m )
create_model_list( model )
sequential_blocks = []
for i in all_models:
for func in i.get_tick_blocks() + i.get_posedge_clk_blocks():
# Grab the AST and src code of each function
tree, src = get_method_ast( func )
# Check there were no mistakes in use of .value/.next
DetectIncorrectValueNext( func, 'value' ).visit( tree )
DetectMissingValueNext ( func, 'next' ).visit( tree )
# If function is decorated with tick_fl, wrap it with a greenlet
if 'tick_fl' in DetectDecorators().enter( tree ):
func = _pausable_tick( func )
sequential_blocks.append( func )
for func in i.get_combinational_blocks():
tree, _ = get_method_ast( func )
DetectIncorrectValueNext( func, 'next' ).visit( tree )
DetectMissingValueNext ( func, 'value' ).visit( tree )
return sequential_blocks
#---------------------------------------------------------------------
# register_comb_blocks
#---------------------------------------------------------------------
# Register all decorated @combinational functions with the simulator.
# Combinational logic blocks are registered with SignalValue objects
# and get added to the event queue when values are updated.
def register_comb_blocks( model, event_queue ):
# Get the sensitivity list of each event driven (combinational) block
# TODO: do before or after we swap value nodes?
for func in model.get_combinational_blocks():
tree, _ = get_method_ast( func )
loads, stores = DetectLoadsAndStores().enter( tree )
for name in loads:
_add_senses( func, model, name )
# Iterate through all @combinational decorated function names we
# detected, retrieve their associated function pointer, then add
# entries for each item in the function's sensitivity list to
# svalue_callbacks
# TODO: merge this code with above to reduce mem of data structures?
# TODO: sensitivity_list contains duplicate items if a signal is
# accessed via slices or bitstruct accesses, use set instead?
for func_ptr, sensitivity_list in model._newsenses.items():
func_ptr.id = event_queue.get_id()
func_ptr.cb = func_ptr
#self.metrics.reg_eval( func_ptr.cb )
for signal_value in sensitivity_list:
# Only add "notify_sim" funcs if @comb blocks are sensitive to us
signal_value.notify_sim_comb_update = signal_value._ucb
# Prime the simulation by putting all events on the event_queue
# This will make sure all nodes come out of reset in a consistent
# state. TODO: put this in reset() instead?
signal_value.register_callback( func_ptr )
event_queue.enq( func_ptr.cb, func_ptr.id )
#self._DEBUG_signal_cbs[ signal_value ].append( func_ptr )
# Recursively perform for submodules
for m in model.get_submodules():
register_comb_blocks( m, event_queue )
#-----------------------------------------------------------------------
# _add_senses
#-----------------------------------------------------------------------
# Utility function to recursively add signals/lists of signals to
# the sensitivity list.
def _add_senses( func, model, name ):
obj = _attr_name_to_object( model, name )
# If name_to_object returned a tuple, this is a list inside of a
# for loop. Iteratively go through each object in the list and
# recursively call add_senses on it.
if isinstance( obj, tuple ):
obj_list, list_name, attr = obj
for i, o in enumerate( obj_list ):
obj_name = "{}[{}]{}".format( list_name, i, attr )
_add_senses( func, model, obj_name )
# If this is a signal value, add it to the sensitivity list
elif isinstance( obj, SignalValue ):
# Distinguish between attributes storing signals (InPort/OutPort/Wire)
# and SignalValues (e.g., Bits), by checking the _ucb attribute.
target_bits = obj._target_bits
if hasattr( target_bits, '_ucb' ):
model._newsenses[ func ].append( target_bits )
elif model._debug:
warnings.warn( "Cannot add SignalValue '{}' to sensitivity list."
"".format( name ), Warning )
#-----------------------------------------------------------------------
# _attr_name_to_object
#-----------------------------------------------------------------------
# Utility function to turn attributes/names acquired from the ast
# into Python objects
# TODO: should never use eval... but this is easy
# TODO: how to handle when self is neither 's' nor 'self'?
# TODO: how to handle temps!
def _attr_name_to_object( model, name ):
# Temporarily creates the names 'self' and 's' in the current
# scope. SUPER HACKY
self = s = model
# If slice or list, get name components previous to indexing
if '[?]' in name:
name, extra = name.split('[?]', 1)
# Try to return the Python object attached to the name. If the
# object is not a SignalValue or a list, we can't add it to
# the sensitivity list. Sometimes this is okay (eg. constants),
# but sometimes this indicates an error in the user's code, so
# display a warning.
# In the case of a list, we need to reconstruct the name of each
# item in the list so we can try to add it to the sensitivity
# list. Return a tuple containing the list object, the list name
# and the attribute string the appears after the list indexing.
try:
x = eval( name )
if isinstance( x, SignalValue ): return x
elif isinstance( x, list ): return ( x, name, extra )
else: raise NameError
except NameError:
if model._debug:
warnings.warn( "Cannot add variable '{}' to sensitivity list."
"".format( name ), Warning )
return None
#-----------------------------------------------------------------------
# create_slice_callbacks
#-----------------------------------------------------------------------
# All ConnectionEdges that contain bit slicing need to be turned into
# combinational blocks. This significantly simplifies the connection
# graph update logic.
def create_slice_callbacks( slice_connects, event_queue ):
for c in slice_connects:
src = c.src_node._signalvalue
# If slice is connect to a Constant, don't create a callback.
# Just write the constant value now.
if isinstance( src, int ):
dest = c.dest_node._signalvalue
dest_addr = c.dest_slice if c.dest_slice != None else slice( None )
dest[ dest_addr ].v = src
# If slice is connected to another Signal, create a callback
# and put it on the combinational event queue.
else:
func_ptr = _create_slice_cb_closure( c )
signal_value = c.src_node._signalvalue
signal_value.register_slice( func_ptr )
func_ptr.id = event_queue.get_id()
func_ptr.cb = func_ptr
event_queue.enq( func_ptr.cb, func_ptr.id )
#self.metrics.reg_eval( func_ptr.cb, is_slice = True )
#self._DEBUG_signal_cbs[ signal_value ].append( func_ptr )
#-----------------------------------------------------------------------
# _create_slice_cb_closure
#-----------------------------------------------------------------------
# Utility function to create our callback
def _create_slice_cb_closure( c ):
src = c.src_node._signalvalue
dest = c.dest_node._signalvalue
src_addr = c.src_slice if c.src_slice != None else slice( None )
dest_bits = dest[ c.dest_slice ] if c.dest_slice != None else dest
def slice_cb():
# We need to slice the src each time. This is because writing
# to a BitSlice will updates the Bits it was sliced from, but
# not vice versa.
dest_bits.v = src[ src_addr ]
return slice_cb
#---------------------------------------------------------------------
# _pausable_tick
#---------------------------------------------------------------------
# Experimental support for creating tick blocks where we can pause the
# execution within the tick block. This avoids the need for creating a
# GreenletWrapper explicitly.
def _pausable_tick( func ):
# The inner_wrapper function is the one which we will wrap in a
# greenlet. It calls the tick function forever. It pauses after each
# call to the tick function, but the tick function itself can also
# pause.
def inner_wrapper():
while True:
# Call the tick function
func()
# Yield so we always only do one tick per cycle
greenlet.greenlet.getcurrent().parent.switch(0)
# Create a greenlet and save it with the model. Note that we
# currently only allow a single pausable_tick per model.
func._pausable_tick = greenlet.greenlet(inner_wrapper)
# The outer_wrapper is what will become the new tick function. This
# is what gets added to the tick list.
def outer_wrapper():
func._pausable_tick.switch()
return outer_wrapper
#-----------------------------------------------------------------------
# register_cffi_updates
#-----------------------------------------------------------------------
def register_cffi_updates( model ):
def visit_models( m ):
if hasattr( m, '_cffi_update' ):
for port, func_ptr in m._cffi_update.items():
signal_value = port._signalvalue
signal_value.register_slice( func_ptr )
else:
for subm in m.get_submodules():
visit_models( subm )
visit_models( model )
|
Abhinav117/pymtl
|
pymtl/tools/simulation/sim_utils.py
|
Python
|
bsd-3-clause
| 15,918
|
[
"VisIt"
] |
47adcbd38401effc214b36ebb38d76fc509efcc4ba9bb7df7624f09cc3127da3
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2008 Jerome Rapinat
# Copyright (C) 2008 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# gen.filters.rules/Person/_HasAssociation.py
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .. import Rule
#-------------------------------------------------------------------------
#
# HasAssociation
#
#-------------------------------------------------------------------------
class HasAssociation(Rule):
"""Rule that checks for a person with a personal association"""
labels = [ _('Number of instances:'), _('Number must be:')]
name = _('People with <count> associations')
description = _("Matches people with a certain number of associations")
category = _('General filters')
def prepare(self, db):
# things we want to do just once, not for every handle
if self.list[1] == 'lesser than':
self.count_type = 0
elif self.list[1] == 'greater than':
self.count_type = 2
else:
self.count_type = 1 # "equal to"
self.selected_count = int(self.list[0])
def apply(self, db, person):
count = len(person.get_person_ref_list())
if self.count_type == 0: # "lesser than"
return count < self.selected_count
elif self.count_type == 2: # "greater than"
return count > self.selected_count
# "equal to"
return count == self.selected_count
|
pmghalvorsen/gramps_branch
|
gramps/gen/filters/rules/person/_hasassociation.py
|
Python
|
gpl-2.0
| 2,598
|
[
"Brian"
] |
ab94befef1bede0b4a1014abeecfe6927ed374d4b22f120a58111792c61cc29b
|
from ase.db.core import connect
|
suttond/MODOI
|
ase/db/__init__.py
|
Python
|
lgpl-3.0
| 32
|
[
"ASE"
] |
86588af360e23b4b22c6cbcaa9618431783cc1131cd9521e6262e242e53f91ef
|
# $HeadURL$
__RCSID__ = "$Id$"
from DIRAC import gLogger, S_OK
from DIRAC.Core.Base.AgentModule import AgentModule
#from DIRAC.StorageManagementSystem.Client.StorageManagerClient import StorageManagerClient
from DIRAC.Core.Utilities.List import sortList
from DIRAC.DataManagementSystem.Client.DataIntegrityClient import DataIntegrityClient
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.StorageManagementSystem.DB.StorageManagementDB import THROTTLING_STEPS, THROTTLING_TIME
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources
import re
AGENT_NAME = 'StorageManagement/StageRequestAgent'
class StageRequestAgent( AgentModule ):
def initialize( self ):
self.stagerClient = StorageManagerClient()
self.dataIntegrityClient = DataIntegrityClient()
#self.storageDB = StorageManagementDB()
# pin lifetime = 1 day
self.pinLifetime = self.am_getOption( 'PinLifetime', THROTTLING_TIME )
# Resources helper
self.resources = Resources()
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/DataManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'DataManager' )
return S_OK()
def execute( self ):
# Get the current submitted stage space and the amount of pinned space for each storage element
res = self.getStorageUsage()
if not res['OK']:
return res
return self.submitStageRequests()
def getStorageUsage( self ):
""" Fill the current Status of the SE Caches from the DB
"""
self.storageElementCache = {}
res = self.stagerClient.getSubmittedStagePins()
if not res['OK']:
gLogger.fatal( "StageRequest.getStorageUsage: Failed to obtain submitted requests from StorageManagementDB.", res['Message'] )
return res
self.storageElementUsage = res['Value']
if self.storageElementUsage:
gLogger.info( "StageRequest.getStorageUsage: Active stage/pin requests found at the following sites:" )
for storageElement in sortList( self.storageElementUsage.keys() ):
seDict = self.storageElementUsage[storageElement]
# Convert to GB for printout
seDict['TotalSize'] = seDict['TotalSize'] / ( 1000 * 1000 * 1000.0 )
gLogger.info( "StageRequest.getStorageUsage: %s: %s replicas with a size of %.3f GB." %
( storageElement.ljust( 15 ), str( seDict['Replicas'] ).rjust( 6 ), seDict['TotalSize'] ) )
if not self.storageElementUsage:
gLogger.info( "StageRequest.getStorageUsage: No active stage/pin requests found." )
return S_OK()
def submitStageRequests( self ):
""" This manages the following transitions of the Replicas
* Waiting -> Offline (if the file is not found Cached)
* Waiting -> StageSubmitted (if the file is found Cached)
* Offline -> StageSubmitted (if there are not more Waiting replicas)
"""
# Retry Replicas that have not been Staged in a previous attempt
res = self._getMissingReplicas()
if not res['OK']:
gLogger.fatal( "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
seReplicas = res['Value']['SEReplicas']
allReplicaInfo = res['Value']['AllReplicaInfo']
if seReplicas:
gLogger.info( "StageRequest.submitStageRequests: Completing partially Staged Tasks" )
for storageElement, seReplicaIDs in seReplicas.items():
gLogger.debug( 'Staging at %s:' % storageElement, seReplicaIDs )
self._issuePrestageRequests( storageElement, seReplicaIDs, allReplicaInfo )
# Check Waiting Replicas and select those found Online and all other Replicas from the same Tasks
res = self._getOnlineReplicas()
if not res['OK']:
gLogger.fatal( "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
seReplicas = res['Value']['SEReplicas']
allReplicaInfo = res['Value']['AllReplicaInfo']
# Check Offline Replicas that fit in the Cache and all other Replicas from the same Tasks
res = self._getOfflineReplicas()
if not res['OK']:
gLogger.fatal( "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
# Merge info from both results
for storageElement, seReplicaIDs in res['Value']['SEReplicas'].items():
if storageElement not in seReplicas:
seReplicas[storageElement] = seReplicaIDs
else:
for replicaID in seReplicaIDs:
if replicaID not in seReplicas[storageElement]:
seReplicas[storageElement].append( replicaID )
allReplicaInfo.update( res['Value']['AllReplicaInfo'] )
gLogger.info( "StageRequest.submitStageRequests: Obtained %s replicas for staging." % len( allReplicaInfo ) )
for storageElement, seReplicaIDs in seReplicas.items():
gLogger.debug( 'Staging at %s:' % storageElement, seReplicaIDs )
self._issuePrestageRequests( storageElement, seReplicaIDs, allReplicaInfo )
return S_OK()
def _getMissingReplicas( self ):
""" This recovers Replicas that were not Staged on a previous attempt (the stage request failed or timed out),
while other Replicas of the same task are already Staged. If left behind they can produce a deadlock.
All SEs are considered, even if their Cache is full
"""
# Get Replicas that are in Staged/StageSubmitted
gLogger.info( 'StageRequest._getMissingReplicas: Checking Staged Replicas' )
res = self.__getStagedReplicas()
if not res['OK']:
gLogger.fatal( "StageRequest._getMissingReplicas: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
seReplicas = {}
allReplicaInfo = res['Value']['AllReplicaInfo']
replicasToStage = []
for _storageElement, seReplicaIDs in res['Value']['SEReplicas'].items():
# Consider all SEs
replicasToStage.extend( seReplicaIDs )
# Get Replicas from the same Tasks as those selected
res = self.__addAssociatedReplicas( replicasToStage, seReplicas, allReplicaInfo )
if not res['OK']:
gLogger.fatal( "StageRequest._getMissingReplicas: Failed to get associated Replicas.", res['Message'] )
return res
def _getOnlineReplicas( self ):
""" This manages the transition
* Waiting -> Offline (if the file is not found Cached)
and returns the list of Cached Replicas for which the pin time has to be extended
SEs for which the cache is currently full are not considered
"""
# Get all Replicas in Waiting Status associated to Staging Tasks
gLogger.verbose( 'StageRequest._getOnlineReplicas: Checking Online Replicas to be handled' )
res = self.__getWaitingReplicas()
if not res['OK']:
gLogger.fatal( "StageRequest._getOnlineReplicas: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
seReplicas = {}
allReplicaInfo = res['Value']['AllReplicaInfo']
if not len( allReplicaInfo ):
gLogger.info( "StageRequest._getOnlineReplicas: There were no Waiting replicas found" )
return res
gLogger.info( "StageRequest._getOnlineReplicas: Obtained %s replicas Waiting for staging." % len( allReplicaInfo ) )
replicasToStage = []
for storageElement, seReplicaIDs in res['Value']['SEReplicas'].items():
if not self.__usage( storageElement ) < self.__cache( storageElement ):
gLogger.info( 'StageRequest._getOnlineReplicas: Skipping %s, current usage above limit ( %s GB )' % ( storageElement, self.__cache( storageElement ) ) )
# Do not consider those SE that have the Cache full
continue
# Check if the Replica Metadata is OK and find out if they are Online or Offline
res = self.__checkIntegrity( storageElement, seReplicaIDs, allReplicaInfo )
if not res['OK']:
gLogger.error( 'StageRequest._getOnlineReplicas: Failed to check Replica Metadata', '(%s): %s' % ( storageElement, res['Message'] ) )
else:
# keep only Online Replicas
seReplicas[storageElement] = res['Value']['Online']
replicasToStage.extend( res['Value']['Online'] )
# Get Replicas from the same Tasks as those selected
res = self.__addAssociatedReplicas( replicasToStage, seReplicas, allReplicaInfo )
if not res['OK']:
gLogger.fatal( "StageRequest._getOnlineReplicas: Failed to get associated Replicas.", res['Message'] )
return res
def _getOfflineReplicas( self ):
""" This checks Replicas in Offline status
and returns the list of Replicas to be Staged
SEs for which the cache is currently full are not considered
"""
# Get all Replicas in Waiting Status associated to Staging Tasks
gLogger.verbose( 'StageRequest._getOfflineReplicas: Checking Offline Replicas to be handled' )
res = self.__getOfflineReplicas()
if not res['OK']:
gLogger.fatal( "StageRequest._getOfflineReplicas: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
seReplicas = {}
allReplicaInfo = res['Value']['AllReplicaInfo']
if not len( allReplicaInfo ):
gLogger.info( "StageRequest._getOfflineReplicas: There were no Offline replicas found" )
return res
gLogger.info( "StageRequest._getOfflineReplicas: Obtained %s replicas Offline for staging." % len( allReplicaInfo ) )
replicasToStage = []
for storageElement, seReplicaIDs in res['Value']['SEReplicas'].items():
if not self.__usage( storageElement ) < self.__cache( storageElement ):
gLogger.info( 'StageRequest._getOfflineReplicas: Skipping %s, current usage above limit ( %s GB )' % ( storageElement, self.__cache( storageElement ) ) )
# Do not consider those SE that have the Cache full
continue
seReplicas[storageElement] = []
for replicaID in sorted( seReplicaIDs ):
seReplicas[storageElement].append( replicaID )
replicasToStage.append( replicaID )
self.__add( storageElement, allReplicaInfo[replicaID]['Size'] )
if not self.__usage( storageElement ) < self.__cache( storageElement ):
# Stop adding Replicas when the cache is full
break
# Get Replicas from the same Tasks as those selected
res = self.__addAssociatedReplicas( replicasToStage, seReplicas, allReplicaInfo )
if not res['OK']:
gLogger.fatal( "StageRequest._getOfflineReplicas: Failed to get associated Replicas.", res['Message'] )
return res
def __usage( self, storageElement ):
""" Retrieve current usage of SE
"""
if not storageElement in self.storageElementUsage:
self.storageElementUsage[storageElement] = {'TotalSize': 0.}
return self.storageElementUsage[storageElement]['TotalSize']
def __cache( self, storageElement ):
""" Retrieve cache size for SE
"""
if not storageElement in self.storageElementCache:
diskCache = self.resources.getStorageElementValue( storageElement, 'DiskCacheTB', 1. )
self.storageElementCache[storageElement] = diskCache * 1000. / THROTTLING_STEPS
return self.storageElementCache[storageElement]
def __add( self, storageElement, size ):
""" Add size (in bytes) to current usage of storageElement (in GB)
"""
if not storageElement in self.storageElementUsage:
self.storageElementUsage[storageElement] = {'TotalSize': 0.}
size = size / ( 1000 * 1000 * 1000.0 )
self.storageElementUsage[storageElement]['TotalSize'] += size
return size
def _issuePrestageRequests( self, storageElement, seReplicaIDs, allReplicaInfo ):
""" Make the request to the SE and update the DB
"""
pfnRepIDs = {}
for replicaID in seReplicaIDs:
pfn = allReplicaInfo[replicaID]['PFN']
pfnRepIDs[pfn] = replicaID
# Now issue the prestage requests for the remaining replicas
stageRequestMetadata = {}
updatedPfnIDs = []
if pfnRepIDs:
gLogger.info( "StageRequest._issuePrestageRequests: Submitting %s stage requests for %s." % ( len( pfnRepIDs ), storageElement ) )
res = StorageElement( storageElement ).prestageFile( pfnRepIDs, lifetime = self.pinLifetime )
gLogger.debug( "StageRequest._issuePrestageRequests: StorageElement.prestageStorageFile: res=", res )
#Daniela: fishy result from ReplicaManager!!! Should NOT return OK
#res= {'OK': True, 'Value': {'Successful': {}, 'Failed': {'srm://srm-lhcb.cern.ch/castor/cern.ch/grid/lhcb/data/2010/RAW/EXPRESS/LHCb/COLLISION10/71476/071476_0000000241.raw': ' SRM2Storage.__gfal_exec: Failed to perform gfal_prestage.[SE][BringOnline][SRM_INVALID_REQUEST] httpg://srm-lhcb.cern.ch:8443/srm/managerv2: User not able to access specified space token\n'}}}
#res= {'OK': True, 'Value': {'Successful': {'srm://gridka-dCache.fzk.de/pnfs/gridka.de/lhcb/data/2009/RAW/FULL/LHCb/COLLISION09/63495/063495_0000000001.raw': '-2083846379'}, 'Failed': {}}}
if not res['OK']:
gLogger.error( "StageRequest._issuePrestageRequests: Completely failed to submit stage requests for replicas.", res['Message'] )
else:
for pfn, requestID in res['Value']['Successful'].items():
if not stageRequestMetadata.has_key( requestID ):
stageRequestMetadata[requestID] = []
stageRequestMetadata[requestID].append( pfnRepIDs[pfn] )
updatedPfnIDs.append( pfnRepIDs[pfn] )
if stageRequestMetadata:
gLogger.info( "StageRequest._issuePrestageRequests: %s stage request metadata to be updated." % len( stageRequestMetadata ) )
res = self.stagerClient.insertStageRequest( stageRequestMetadata, self.pinLifetime )
if not res['OK']:
gLogger.error( "StageRequest._issuePrestageRequests: Failed to insert stage request metadata.", res['Message'] )
return res
res = self.stagerClient.updateReplicaStatus( updatedPfnIDs, 'StageSubmitted' )
if not res['OK']:
gLogger.error( "StageRequest._issuePrestageRequests: Failed to insert replica status.", res['Message'] )
return
def __sortBySE( self, replicaDict ):
seReplicas = {}
replicaIDs = {}
for replicaID, info in replicaDict.items():
lfn = info['LFN']
storageElement = info['SE']
size = info['Size']
pfn = info['PFN']
replicaIDs[replicaID] = {'LFN':lfn, 'PFN':pfn, 'Size':size, 'StorageElement':storageElement}
if not seReplicas.has_key( storageElement ):
seReplicas[storageElement] = []
seReplicas[storageElement].append( replicaID )
return S_OK( {'SEReplicas':seReplicas, 'AllReplicaInfo':replicaIDs} )
def __getStagedReplicas( self ):
""" This obtains the Staged replicas from the Replicas table and for each LFN the requested storage element """
# First obtain the Waiting replicas from the Replicas table
res = self.stagerClient.getStagedReplicas()
if not res['OK']:
gLogger.error( "StageRequest.__getStagedReplicas: Failed to get replicas with Waiting status.", res['Message'] )
return res
if not res['Value']:
gLogger.debug( "StageRequest.__getStagedReplicas: No Waiting replicas found to process." )
else:
gLogger.debug( "StageRequest.__getStagedReplicas: Obtained %s Waiting replicas(s) to process." % len( res['Value'] ) )
return self.__sortBySE( res['Value'] )
def __getWaitingReplicas( self ):
""" This obtains the Waiting replicas from the Replicas table and for each LFN the requested storage element """
# First obtain the Waiting replicas from the Replicas table
res = self.stagerClient.getWaitingReplicas()
if not res['OK']:
gLogger.error( "StageRequest.__getWaitingReplicas: Failed to get replicas with Waiting status.", res['Message'] )
return res
if not res['Value']:
gLogger.debug( "StageRequest.__getWaitingReplicas: No Waiting replicas found to process." )
else:
gLogger.debug( "StageRequest.__getWaitingReplicas: Obtained %s Waiting replicas(s) to process." % len( res['Value'] ) )
return self.__sortBySE( res['Value'] )
def __getOfflineReplicas( self ):
""" This obtains the Offline replicas from the Replicas table and for each LFN the requested storage element """
# First obtain the Waiting replicas from the Replicas table
res = self.stagerClient.getOfflineReplicas()
if not res['OK']:
gLogger.error( "StageRequest.__getOfflineReplicas: Failed to get replicas with Waiting status.", res['Message'] )
return res
if not res['Value']:
gLogger.debug( "StageRequest.__getOfflineReplicas: No Waiting replicas found to process." )
else:
gLogger.debug( "StageRequest.__getOfflineReplicas: Obtained %s Waiting replicas(s) to process." % len( res['Value'] ) )
return self.__sortBySE( res['Value'] )
def __addAssociatedReplicas( self, replicasToStage, seReplicas, allReplicaInfo ):
""" Retrieve the list of Replicas that belong to the same Tasks as the provided list
"""
res = self.stagerClient.getAssociatedReplicas( replicasToStage )
if not res['OK']:
gLogger.fatal( "StageRequest.__addAssociatedReplicas: Failed to get associated Replicas.", res['Message'] )
return res
addReplicas = {'Offline': {}, 'Waiting': {}}
replicaIDs = {}
for replicaID, info in res['Value'].items():
lfn = info['LFN']
storageElement = info['SE']
size = info['Size']
pfn = info['PFN']
status = info['Status']
if status not in ['Waiting', 'Offline']:
continue
if not addReplicas[status].has_key( storageElement ):
addReplicas[status][storageElement] = []
replicaIDs[replicaID] = {'LFN':lfn, 'PFN':pfn, 'Size':size, 'StorageElement':storageElement }
addReplicas[status][storageElement].append( replicaID )
waitingReplicas = addReplicas['Waiting']
offlineReplicas = addReplicas['Offline']
newReplicaInfo = replicaIDs
allReplicaInfo.update( newReplicaInfo )
# First handle Waiting Replicas for which metadata is to be checked
for storageElement, seReplicaIDs in waitingReplicas.items():
for replicaID in list( seReplicaIDs ):
if replicaID in replicasToStage:
seReplicaIDs.remove( replicaID )
res = self.__checkIntegrity( storageElement, seReplicaIDs, allReplicaInfo )
if not res['OK']:
gLogger.error( 'StageRequest.__addAssociatedReplicas: Failed to check Replica Metadata', '(%s): %s' % ( storageElement, res['Message'] ) )
else:
# keep all Replicas (Online and Offline)
if not storageElement in seReplicas:
seReplicas[storageElement] = []
seReplicas[storageElement].extend( res['Value']['Online'] )
replicasToStage.extend( res['Value']['Online'] )
seReplicas[storageElement].extend( res['Value']['Offline'] )
replicasToStage.extend( res['Value']['Offline'] )
# Then handle Offline Replicas for which metadata is already checked
for storageElement, seReplicaIDs in offlineReplicas.items():
if not storageElement in seReplicas:
seReplicas[storageElement] = []
for replicaID in sorted( seReplicaIDs ):
if replicaID in replicasToStage:
seReplicaIDs.remove( replicaID )
seReplicas[storageElement].extend( seReplicaIDs )
replicasToStage.extend( seReplicaIDs )
for replicaID in allReplicaInfo.keys():
if replicaID not in replicasToStage:
del allReplicaInfo[replicaID]
totalSize = 0
for storageElement in sorted( seReplicas.keys() ):
replicaIDs = seReplicas[storageElement]
size = 0
for replicaID in replicaIDs:
size += self.__add( storageElement, allReplicaInfo[replicaID]['Size'] )
gLogger.info( 'StageRequest.__addAssociatedReplicas: Considering %s GB to be staged at %s' % ( size, storageElement ) )
totalSize += size
gLogger.info( "StageRequest.__addAssociatedReplicas: Obtained %s GB for staging." % totalSize )
return S_OK( {'SEReplicas':seReplicas, 'AllReplicaInfo':allReplicaInfo} )
def __checkIntegrity( self, storageElement, seReplicaIDs, allReplicaInfo ):
""" Check the integrity of the files to ensure they are available
Updates status of Offline Replicas for a later pass
Return list of Online replicas to be Stage
"""
if not seReplicaIDs:
return S_OK( {'Online': [], 'Offline': []} )
pfnRepIDs = {}
for replicaID in seReplicaIDs:
pfn = allReplicaInfo[replicaID]['PFN']
pfnRepIDs[pfn] = replicaID
gLogger.info( "StageRequest.__checkIntegrity: Checking the integrity of %s replicas at %s." % ( len( pfnRepIDs ), storageElement ) )
res = StorageElement( storageElement ).getFileMetadata( pfnRepIDs )
if not res['OK']:
gLogger.error( "StageRequest.__checkIntegrity: Completely failed to obtain metadata for replicas.", res['Message'] )
return res
terminalReplicaIDs = {}
onlineReplicaIDs = []
offlineReplicaIDs = []
for pfn, metadata in res['Value']['Successful'].items():
if metadata['Size'] != allReplicaInfo[pfnRepIDs[pfn]]['Size']:
gLogger.error( "StageRequest.__checkIntegrity: PFN StorageElement size does not match FileCatalog", pfn )
terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN StorageElement size does not match FileCatalog'
pfnRepIDs.pop( pfn )
elif metadata['Lost']:
gLogger.error( "StageRequest.__checkIntegrity: PFN has been Lost by the StorageElement", pfn )
terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN has been Lost by the StorageElement'
pfnRepIDs.pop( pfn )
elif metadata['Unavailable']:
gLogger.error( "StageRequest.__checkIntegrity: PFN is declared Unavailable by the StorageElement", pfn )
terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN is declared Unavailable by the StorageElement'
pfnRepIDs.pop( pfn )
else:
if metadata['Cached']:
gLogger.verbose( "StageRequest.__checkIntegrity: Cache hit for file." )
onlineReplicaIDs.append( pfnRepIDs[pfn] )
else:
offlineReplicaIDs.append( pfnRepIDs[pfn] )
for pfn, reason in res['Value']['Failed'].items():
if re.search( 'File does not exist', reason ):
gLogger.error( "StageRequest.__checkIntegrity: PFN does not exist in the StorageElement", pfn )
terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN does not exist in the StorageElement'
pfnRepIDs.pop( pfn )
# Update the states of the replicas in the database #TODO Sent status to integrity DB
if terminalReplicaIDs:
gLogger.info( "StageRequest.__checkIntegrity: %s replicas are terminally failed." % len( terminalReplicaIDs ) )
res = self.stagerClient.updateReplicaFailure( terminalReplicaIDs )
if not res['OK']:
gLogger.error( "StageRequest.__checkIntegrity: Failed to update replica failures.", res['Message'] )
if onlineReplicaIDs:
gLogger.info( "StageRequest.__checkIntegrity: %s replicas found Online." % len( onlineReplicaIDs ) )
if offlineReplicaIDs:
gLogger.info( "StageRequest.__checkIntegrity: %s replicas found Offline." % len( offlineReplicaIDs ) )
res = self.stagerClient.updateReplicaStatus( offlineReplicaIDs, 'Offline' )
return S_OK( {'Online': onlineReplicaIDs, 'Offline': offlineReplicaIDs} )
def __reportProblematicFiles( self, lfns, reason ):
return S_OK()
#res = self.dataIntegrityClient.setFileProblematic( lfns, reason, sourceComponent = 'StageRequestAgent' )
#if not res['OK']:
# gLogger.error( "RequestPreparation.__reportProblematicFiles: Failed to report missing files.", res['Message'] )
# return res
#if res['Value']['Successful']:
# gLogger.info( "RequestPreparation.__reportProblematicFiles: Successfully reported %s missing files." % len( res['Value']['Successful'] ) )
#if res['Value']['Failed']:
# gLogger.info( "RequestPreparation.__reportProblematicFiles: Failed to report %s problematic files." % len( res['Value']['Failed'] ) )
#return res
|
avedaee/DIRAC
|
StorageManagementSystem/Agent/StageRequestAgent.py
|
Python
|
gpl-3.0
| 24,204
|
[
"DIRAC"
] |
47b41b10646823cad1cf16c3ce1d34c864db7cef5b95f7ccc966a6a531f287a7
|
from math import sqrt
import numpy as np
from ase.data import covalent_radii
from ase.atoms import Atoms
from ase.calculators.singlepoint import SinglePointCalculator
from ase.io import read, write, string2index
from ase.constraints import FixAtoms
from ase.gui.defaults import read_defaults
from ase.quaternions import Quaternion
class Images:
def __init__(self, images=None):
if images is not None:
self.initialize(images)
def initialize(self, images, filenames=None, init_magmom=False):
self.natoms = len(images[0])
self.nimages = len(images)
if hasattr(images[0], 'get_shapes'):
self.shapes = images[0].get_shapes()
self.Q = []
else:
self.shapes = None
if filenames is None:
filenames = [None] * self.nimages
self.filenames = filenames
self.P = np.empty((self.nimages, self.natoms, 3))
self.V = np.empty((self.nimages, self.natoms, 3))
self.E = np.empty(self.nimages)
self.K = np.empty(self.nimages)
self.F = np.empty((self.nimages, self.natoms, 3))
self.M = np.empty((self.nimages, self.natoms))
self.T = np.empty((self.nimages, self.natoms), int)
self.A = np.empty((self.nimages, 3, 3))
self.Z = images[0].get_atomic_numbers()
self.pbc = images[0].get_pbc()
self.covalent_radii = covalent_radii
config = read_defaults()
if config['covalent_radii'] is not None:
for data in config['covalent_radii']:
self.covalent_radii[data[0]] = data[1]
warning = False
for i, atoms in enumerate(images):
natomsi = len(atoms)
if (natomsi != self.natoms or
(atoms.get_atomic_numbers() != self.Z).any()):
raise RuntimeError('Can not handle different images with ' +
'different numbers of atoms or different ' +
'kinds of atoms!')
self.P[i] = atoms.get_positions()
self.V[i] = atoms.get_velocities()
if hasattr(self, 'Q'):
for q in atoms.get_quaternions():
self.Q.append(Quaternion(q))
self.A[i] = atoms.get_cell()
if (atoms.get_pbc() != self.pbc).any():
warning = True
try:
self.E[i] = atoms.get_potential_energy()
except RuntimeError:
self.E[i] = np.nan
self.K[i] = atoms.get_kinetic_energy()
try:
self.F[i] = atoms.get_forces(apply_constraint=False)
except RuntimeError:
self.F[i] = np.nan
try:
if init_magmom:
self.M[i] = atoms.get_initial_magnetic_moments()
else:
self.M[i] = atoms.get_magnetic_moments()
except (RuntimeError, AttributeError):
self.M[i] = atoms.get_initial_magnetic_moments()
# added support for tags
try:
self.T[i] = atoms.get_tags()
except RuntimeError:
self.T[i] = 0
if warning:
print('WARNING: Not all images have the same bondary conditions!')
self.selected = np.zeros(self.natoms, bool)
self.selected_ordered = []
self.atoms_to_rotate_0 = np.zeros(self.natoms, bool)
self.visible = np.ones(self.natoms, bool)
self.nselected = 0
self.set_dynamic(constraints = images[0].constraints)
self.repeat = np.ones(3, int)
self.set_radii(config['radii_scale'])
def prepare_new_atoms(self):
"Marks that the next call to append_atoms should clear the images."
self.next_append_clears = True
def append_atoms(self, atoms, filename=None):
"Append an atoms object to the images already stored."
assert len(atoms) == self.natoms
if self.next_append_clears:
i = 0
else:
i = self.nimages
for name in ('P', 'V', 'E', 'K', 'F', 'M', 'A', 'T'):
a = getattr(self, name)
newa = np.empty( (i+1,) + a.shape[1:], a.dtype )
if not self.next_append_clears:
newa[:-1] = a
setattr(self, name, newa)
self.next_append_clears = False
self.P[i] = atoms.get_positions()
self.V[i] = atoms.get_velocities()
self.A[i] = atoms.get_cell()
try:
self.E[i] = atoms.get_potential_energy()
except RuntimeError:
self.E[i] = np.nan
self.K[i] = atoms.get_kinetic_energy()
try:
self.F[i] = atoms.get_forces(apply_constraint=False)
except RuntimeError:
self.F[i] = np.nan
try:
self.M[i] = atoms.get_magnetic_moments()
except (RuntimeError, AttributeError):
self.M[i] = np.nan
try:
self.T[i] = atoms.get_tags()
except AttributeError:
if i == 0:
self.T[i] = 0
else:
self.T[i] = self.T[i-1]
self.nimages = i + 1
self.filenames.append(filename)
self.set_dynamic()
return self.nimages
def set_radii(self, scale):
if self.shapes == None:
self.r = self.covalent_radii[self.Z] * scale
else:
self.r = np.sqrt(np.sum(self.shapes**2, axis=1)) * scale
def read(self, filenames, index=-1, filetype=None):
images = []
names = []
for filename in filenames:
i = read(filename, index,filetype)
if not isinstance(i, list):
i = [i]
images.extend(i)
names.extend([filename] * len(i))
self.initialize(images, names)
def import_atoms(self, filename, cur_frame):
if filename:
filename = filename[0]
old_a = self.get_atoms(cur_frame)
imp_a = read(filename, -1)
new_a = old_a + imp_a
self.initialize([new_a], [filename])
def repeat_images(self, repeat):
n = self.repeat.prod()
repeat = np.array(repeat)
self.repeat = repeat
N = repeat.prod()
natoms = self.natoms // n
P = np.empty((self.nimages, natoms * N, 3))
V = np.empty((self.nimages, natoms * N, 3))
M = np.empty((self.nimages, natoms * N))
T = np.empty((self.nimages, natoms * N), int)
F = np.empty((self.nimages, natoms * N, 3))
Z = np.empty(natoms * N, int)
r = np.empty(natoms * N)
dynamic = np.empty(natoms * N, bool)
a0 = 0
for i0 in range(repeat[0]):
for i1 in range(repeat[1]):
for i2 in range(repeat[2]):
a1 = a0 + natoms
for i in range(self.nimages):
P[i, a0:a1] = (self.P[i, :natoms] +
np.dot((i0, i1, i2), self.A[i]))
V[:, a0:a1] = self.V[:, :natoms]
F[:, a0:a1] = self.F[:, :natoms]
M[:, a0:a1] = self.M[:, :natoms]
T[:, a0:a1] = self.T[:, :natoms]
Z[a0:a1] = self.Z[:natoms]
r[a0:a1] = self.r[:natoms]
dynamic[a0:a1] = self.dynamic[:natoms]
a0 = a1
self.P = P
self.V = V
self.F = F
self.Z = Z
self.T = T
self.M = M
self.r = r
self.dynamic = dynamic
self.natoms = natoms * N
self.selected = np.zeros(natoms * N, bool)
self.atoms_to_rotate_0 = np.zeros(self.natoms, bool)
self.visible = np.ones(natoms * N, bool)
self.nselected = 0
def center(self):
""" center each image in the existing unit cell, keeping the cell constant. """
c = self.A.sum(axis=1) / 2.0 - self.P.mean(axis=1)
self.P += c[:, np.newaxis, :]
def graph(self, expr):
""" routine to create the data in ag graphs, defined by the string expr. """
import ase.units as units
code = compile(expr + ',', 'atoms.py', 'eval')
n = self.nimages
def d(n1, n2):
return sqrt(((R[n1] - R[n2])**2).sum())
def a(n1, n2, n3):
v1 = R[n1]-R[n2]
v2 = R[n3]-R[n2]
arg = np.vdot(v1,v2)/(sqrt((v1**2).sum()*(v2**2).sum()))
if arg > 1.0: arg = 1.0
if arg < -1.0: arg = -1.0
return 180.0*np.arccos(arg)/np.pi
def dih(n1, n2, n3, n4):
# vector 0->1, 1->2, 2->3 and their normalized cross products:
a = R[n2]-R[n1]
b = R[n3]-R[n2]
c = R[n4]-R[n3]
bxa = np.cross(b,a)
bxa /= np.sqrt(np.vdot(bxa,bxa))
cxb = np.cross(c,b)
cxb /= np.sqrt(np.vdot(cxb,cxb))
angle = np.vdot(bxa,cxb)
# check for numerical trouble due to finite precision:
if angle < -1: angle = -1
if angle > 1: angle = 1
angle = np.arccos(angle)
if (np.vdot(bxa,c)) > 0: angle = 2*np.pi-angle
return angle*180.0/np.pi
# get number of mobile atoms for temperature calculation
ndynamic = 0
for dyn in self.dynamic:
if dyn: ndynamic += 1
S = self.selected
D = self.dynamic[:, np.newaxis]
E = self.E
s = 0.0
data = []
for i in range(n):
R = self.P[i]
V = self.V[i]
F = self.F[i]
A = self.A[i]
M = self.M[i]
f = ((F * D)**2).sum(1)**.5
fmax = max(f)
fave = f.mean()
epot = E[i]
ekin = self.K[i]
e = epot + ekin
T = 2.0 * ekin / (3.0 * ndynamic * units.kB)
data = eval(code)
if i == 0:
m = len(data)
xy = np.empty((m, n))
xy[:, i] = data
if i + 1 < n:
s += sqrt(((self.P[i + 1] - R)**2).sum())
return xy
def set_dynamic(self, constraints = None):
self.dynamic = np.ones(self.natoms, bool)
if constraints is not None:
for con in constraints:
if isinstance(con,FixAtoms):
self.dynamic[con.index] = False
def write(self, filename, rotations='', show_unit_cell=False, bbox=None, **kwargs):
indices = range(self.nimages)
p = filename.rfind('@')
if p != -1:
try:
slice = string2index(filename[p + 1:])
except ValueError:
pass
else:
indices = indices[slice]
filename = filename[:p]
if isinstance(indices, int):
indices = [indices]
images = [self.get_atoms(i) for i in indices]
if len(filename) > 4 and filename[-4:] in ['.eps', '.png', '.pov']:
write(filename, images,
rotation=rotations, show_unit_cell=show_unit_cell,
bbox=bbox, **kwargs)
else:
write(filename, images, **kwargs)
def get_atoms(self, frame):
atoms = Atoms(positions=self.P[frame],
numbers=self.Z,
magmoms=self.M[0],
tags=self.T[frame],
cell=self.A[frame],
pbc=self.pbc)
if not np.isnan(self.V).any():
atoms.set_velocities(self.V[frame])
# check for constrained atoms and add them accordingly:
if not self.dynamic.all():
atoms.set_constraint(FixAtoms(mask=1-self.dynamic))
atoms.set_calculator(SinglePointCalculator(self.E[frame],
self.F[frame],
None, None, atoms))
return atoms
def delete(self, i):
self.nimages -= 1
P = np.empty((self.nimages, self.natoms, 3))
V = np.empty((self.nimages, self.natoms, 3))
F = np.empty((self.nimages, self.natoms, 3))
A = np.empty((self.nimages, 3, 3))
E = np.empty(self.nimages)
P[:i] = self.P[:i]
P[i:] = self.P[i + 1:]
self.P = P
V[:i] = self.V[:i]
V[i:] = self.V[i + 1:]
self.V = V
F[:i] = self.F[:i]
F[i:] = self.F[i + 1:]
self.F = F
A[:i] = self.A[:i]
A[i:] = self.A[i + 1:]
self.A = A
E[:i] = self.E[:i]
E[i:] = self.E[i + 1:]
self.E = E
del self.filenames[i]
def aneb(self):
n = self.nimages
assert n % 5 == 0
levels = n // 5
n = self.nimages = 2 * levels + 3
P = np.empty((self.nimages, self.natoms, 3))
V = np.empty((self.nimages, self.natoms, 3))
F = np.empty((self.nimages, self.natoms, 3))
E = np.empty(self.nimages)
for L in range(levels):
P[L] = self.P[L * 5]
P[n - L - 1] = self.P[L * 5 + 4]
V[L] = self.V[L * 5]
V[n - L - 1] = self.V[L * 5 + 4]
F[L] = self.F[L * 5]
F[n - L - 1] = self.F[L * 5 + 4]
E[L] = self.E[L * 5]
E[n - L - 1] = self.E[L * 5 + 4]
for i in range(3):
P[levels + i] = self.P[levels * 5 - 4 + i]
V[levels + i] = self.V[levels * 5 - 4 + i]
F[levels + i] = self.F[levels * 5 - 4 + i]
E[levels + i] = self.E[levels * 5 - 4 + i]
self.P = P
self.V = V
self.F = F
self.E = E
def interpolate(self, m):
assert self.nimages == 2
self.nimages = 2 + m
P = np.empty((self.nimages, self.natoms, 3))
V = np.empty((self.nimages, self.natoms, 3))
F = np.empty((self.nimages, self.natoms, 3))
A = np.empty((self.nimages, 3, 3))
E = np.empty(self.nimages)
P[0] = self.P[0]
V[0] = self.V[0]
F[0] = self.F[0]
A[0] = self.A[0]
E[0] = self.E[0]
for i in range(1, m + 1):
x = i / (m + 1.0)
y = 1 - x
P[i] = y * self.P[0] + x * self.P[1]
V[i] = y * self.V[0] + x * self.V[1]
F[i] = y * self.F[0] + x * self.F[1]
A[i] = y * self.A[0] + x * self.A[1]
E[i] = y * self.E[0] + x * self.E[1]
P[-1] = self.P[1]
V[-1] = self.V[1]
F[-1] = self.F[1]
A[-1] = self.A[1]
E[-1] = self.E[1]
self.P = P
self.V = V
self.F = F
self.A = A
self.E = E
self.filenames[1:1] = [None] * m
if __name__ == '__main__':
import os
os.system('python gui.py')
|
JConwayAWT/PGSS14CC
|
lib/python/multimetallics/ase/gui/images.py
|
Python
|
gpl-2.0
| 15,050
|
[
"ASE"
] |
d912e0b937b6db09f73e1b64573aafcef4905a841b593b42faabe3c0cad129c8
|
from gromacs.fileformats import TOP
import numpy as np
import math
import copy, argparse
def scale_angles(mol, angles):
new_angles = {}
for dh in mol.angles:
atypes = dh.atom1.get_atomtype(), dh.atom2.get_atomtype(), dh.atom3.get_atomtype()
atypes = [a.replace("_", "").replace("=","") for a in atypes]
for iswitch in range(16):
if (iswitch%2==0 ):
a1=atypes[0]; a2=atypes[1]; a3=atypes[2]
else:
a1=atypes[2]; a2=atypes[1]; a3=atypes[0]
if((iswitch//2)%2==1): a1="X";
if((iswitch//4)%2==1): a2="X";
if((iswitch//8)%2==1): a3="X";
key = "{0}-{1}-{2}-{3}".format(a1, a2, a3, dh.gromacs['func'])
if (key in angles):
for i, at in enumerate(angles[key]):
#new_angles.append(at)
new_angles[key] = at
break
return new_angles.values()
def scale_dihedrals(mol, dihedrals):
new_dihedrals = {}
for dh in mol.dihedrals:
atypes = dh.atom1.get_atomtype(), dh.atom2.get_atomtype(), dh.atom3.get_atomtype(), dh.atom4.get_atomtype()
atypes = [a.replace("_", "").replace("=","") for a in atypes]
for iswitch in range(32):
if (iswitch%2==0 ):
a1=atypes[0]; a2=atypes[1]; a3=atypes[2]; a4=atypes[3]
else:
a1=atypes[3]; a2=atypes[2]; a3=atypes[1]; a4=atypes[0]
if((iswitch//2)%2==1): a1="X";
if((iswitch//4)%2==1): a2="X";
if((iswitch//8)%2==1): a3="X";
if((iswitch//16)%2==1): a4="X";
key = "{0}-{1}-{2}-{3}-{4}".format(a1, a2, a3, a4, dh.gromacs['func'])
if (key in dihedrals):
for i, dt in enumerate(dihedrals[key]):
#new_dihedrals.append(dt)
new_dihedrals[key] = dt
break
print new_dihedrals
return new_dihedrals.values()
def scale_impropers(mol, impropers):
new_impropers = {}
for im in mol.impropers:
atypes = im.atom1.get_atomtype(), im.atom2.get_atomtype(), im.atom3.get_atomtype(), im.atom4.get_atomtype()
atypes = [a.replace("_", "").replace("=","") for a in atypes]
for iswitch in range(32):
if (iswitch%2==0 ):
a1=atypes[0]; a2=atypes[1]; a3=atypes[2]; a4=atypes[3];
else:
a1=atypes[3]; a2=atypes[2]; a3=atypes[1]; a4=atypes[0];
if((iswitch/2)%2==1): a1="X";
if((iswitch/4)%2==1): a2="X";
if((iswitch/8)%2==1): a3="X";
if((iswitch/16)%2==1): a4="X";
key = "{0}-{1}-{2}-{3}-{4}".format(a1, a2, a3, a4, im.gromacs['func'])
if (key in impropers):
for i, imt in enumerate(impropers[key]):
new_impropers[key] = imt
break
print new_impropers
return new_impropers.values()
parser = argparse.ArgumentParser()
parser.add_argument("input")
parser.add_argument("output")
args = parser.parse_args()
top = TOP(args.input)
molname = top.molecules[0].name
mol = top.dict_molname_mol[molname]
#
# ATOMTYPES
#
atomtypes = {a.atomtype for a in mol.atoms}
top.atomtypes = [at for at in top.atomtypes if at.atype in atomtypes]
#
# BONDTYPES
#
bondtypes = {tuple(sorted((b.atom1.atomtype, b.atom2.atomtype))) for b in mol.bonds}
bondtypes_dictionary = {tuple(sorted((bt.atype1, bt.atype2))): bt for bt in top.bondtypes}
top.bondtypes = [bondtypes_dictionary[bt] for bt in bondtypes]
#
# Build bond dictionary
#
angletypes = {}
for at in top.angletypes:
name = "{0}-{1}-{2}-{3}".format(at.atype1, at.atype2, at.atype3, at.gromacs['func'])
if not name in angletypes: angletypes[name] = []
angletypes[name].append(at)
#
# Build dihedral dictionary
#
dihedraltypes = {}
for dt in top.dihedraltypes:
name = "{0}-{1}-{2}-{3}-{4}".format(dt.atype1, dt.atype2, dt.atype3, dt.atype4, dt.gromacs['func'])
if not name in dihedraltypes: dihedraltypes[name] = []
dihedraltypes[name].append(dt)
print("Build dihedraltypes dictionary with {0} entries".format(len(dihedraltypes)))
#
# Build improper dictionary
#
impropertypes = {}
for it in top.impropertypes:
name = "{0}-{1}-{2}-{3}-{4}".format(it.atype1, it.atype2, it.atype3, it.atype4, it.gromacs['func'])
if not name in impropertypes: impropertypes[name] = []
impropertypes[name].append(it)
print("Build impropertypes dictionary with {0} entries".format(len(impropertypes)))
top.angletypes = scale_angles(mol, angletypes)
top.dihedraltypes = scale_dihedrals(mol, dihedraltypes)
top.impropertypes = scale_impropers(mol, impropertypes)
top.nonbond_params = []
top.cmaptypes = []
atomtypes = {at.atype for at in top.atomtypes}
pairtypes = [pt for pt in top.pairtypes if (pt.atype1 in atomtypes) and (pt.atype2 in atomtypes)]
top.pairtypes = pairtypes
# Remove non-default moleculetypes
for k in top.dict_molname_mol.keys():
if k in [molname]: continue
del top.dict_molname_mol[k]
top.write(args.output)
|
jandom/GromacsWrapper
|
scripts/gw-forcefield.py
|
Python
|
gpl-3.0
| 5,792
|
[
"Gromacs"
] |
938ac9949e3e68441a70d2ede64f5bb417ddfac63211ff5bc6f28905e9bc590b
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2006 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
"""Tests for module :class:`stoqlib.database.runtime`"""
from stoqlib.database.exceptions import InterfaceError
from stoqlib.database.properties import UnicodeCol
from stoqlib.database.runtime import new_store
from stoqlib.domain.base import Domain
from stoqlib.domain.person import Person, Client, ClientView
from stoqlib.domain.test.domaintest import DomainTest
class WillBeCommitted(Domain):
__storm_table__ = 'will_be_committed'
SQL_DROP = """DROP TABLE IF EXISTS will_be_committed;"""
SQL_CREATE = """CREATE TABLE will_be_committed (
id uuid PRIMARY KEY DEFAULT uuid_generate_v1(),
test_var text,
te_id bigint UNIQUE REFERENCES transaction_entry(id) DEFAULT new_te()
);
CREATE RULE update_te AS ON UPDATE TO will_be_committed DO ALSO SELECT update_te(old.te_id);
"""
test_var = UnicodeCol()
def __init__(self, *args, **kwargs):
super(WillBeCommitted, self).__init__(*args, **kwargs)
self.reset()
def __storm_loaded__(self):
super(WillBeCommitted, self).__storm_loaded__()
self.reset()
def reset(self):
self.was_created = False
self.was_updated = False
self.was_deleted = False
self.update_test_var_on_update = False
self.on_update_called_count = 0
def on_create(self):
self.was_created = True
def on_delete(self):
self.was_deleted = True
def on_update(self):
self.was_updated = True
if self.update_test_var_on_update:
if self.on_update_called_count < 2:
self.test_var = "%s+" % self.test_var
self.on_update_called_count += 1
class StoqlibStoreTest(DomainTest):
def setUp(self):
super(StoqlibStoreTest, self).setUp()
self.store.execute(''.join((WillBeCommitted.SQL_DROP,
WillBeCommitted.SQL_CREATE)))
self.store.commit()
def test_get_pending_count(self):
store = new_store()
self.assertEqual(store.get_pending_count(), 0)
obj = WillBeCommitted(store=store)
self.assertEqual(store.get_pending_count(), 1)
# obj was already dirty, no change here
obj.test_var = u'yyy'
self.assertEqual(store.get_pending_count(), 1)
# Changing obj after flush should set it dirty again and thus,
# increase the pending count
store.flush()
obj.test_var = u'zzz'
self.assertEqual(store.get_pending_count(), 2)
store.commit()
self.assertEqual(store.get_pending_count(), 0)
store.close()
def test_get_pending_count_with_savepoint(self):
store = new_store()
self.assertEqual(store.get_pending_count(), 0)
obj = WillBeCommitted(store=store)
self.assertEqual(store.get_pending_count(), 1)
# savepoint should trigger a flush, making the next change set
# obj dirty again
store.savepoint("savepoint_a")
obj.test_var = u'yyy'
self.assertEqual(store.get_pending_count(), 2)
store.savepoint("savepoint_b")
obj.test_var = u'zzz'
self.assertEqual(store.get_pending_count(), 3)
store.savepoint("savepoint_c")
obj.test_var = u'www'
self.assertEqual(store.get_pending_count(), 4)
store.rollback_to_savepoint("savepoint_b")
self.assertEqual(store.get_pending_count(), 2)
store.rollback()
def test_dirty_flag(self):
# Creating an object should set its dirty flag to True
store = new_store()
obj = WillBeCommitted(store=store)
obj_id = obj.id
store.commit()
self.assertTrue(obj.te.dirty)
# Reset the flag to test changing the object
obj.te.dirty = False
store.commit()
store.close()
# Get the same object from a new connection
store = new_store()
obj = store.get(WillBeCommitted, obj_id)
# The flag must be False
self.assertFalse(obj.te.dirty)
# Changing the object and commiting should update the flag
obj.test_var = u'asd'
store.commit()
self.assertTrue(obj.te.dirty)
store.close()
def test_rollback_to_savepoint(self):
obj = WillBeCommitted(store=self.store, test_var=u'XXX')
obj2 = WillBeCommitted(store=self.store, test_var=u'foo')
self.assertEqual(obj.test_var, u'XXX')
self.assertEqual(obj2.test_var, u'foo')
self.store.savepoint('sp_1')
obj.test_var = u'YYY'
obj2.test_var = u'foo1'
self.store.savepoint('sp_2')
obj.test_var = u'ZZZ'
self.store.savepoint('sp_3')
obj.test_var = u'WWW'
self.assertEqual(obj.test_var, u'WWW')
# Test rollback to last savepoint
self.store.rollback_to_savepoint('sp_3')
self.assertEqual(obj.test_var, u'ZZZ')
self.assertEqual(obj2.test_var, u'foo1')
# Test rollback to a previous savepoint
self.store.rollback_to_savepoint('sp_1')
self.assertEqual(obj.test_var, u'XXX')
self.assertEqual(obj2.test_var, u'foo')
# Test rollback to an unknown savepoint
self.assertRaises(ValueError, self.store.rollback_to_savepoint,
name='Not existing savepoint')
def test_close(self):
store = new_store()
self.assertFalse(store.obsolete)
store.close()
self.assertTrue(store.obsolete)
self.assertRaises(InterfaceError, store.close)
self.assertRaises(InterfaceError, store.commit)
self.assertRaises(InterfaceError, store.rollback)
self.assertRaises(InterfaceError, store.fetch, None)
self.assertRaises(InterfaceError, store.savepoint, 'XXX')
self.assertRaises(InterfaceError, store.rollback_to_savepoint, 'XXX')
def test_transaction_commit_hook(self):
# Dummy will only be asserted for creation on the first commit.
# After that it should pass all assert for nothing made.
dummy_obj = WillBeCommitted(store=self.store,
test_var=u'XXX')
obj = WillBeCommitted(store=self.store,
test_var=u'AAA')
# Test obj being created on database
self.store.commit()
self._assert_created(obj)
self._assert_created(dummy_obj)
obj.reset()
dummy_obj.reset()
# Test obj being updated on the same object it was created
obj.test_var = u'BBB'
self.store.commit()
self._assert_updated(obj)
self._assert_nothing_made(dummy_obj)
obj.reset()
# Test obj being modified inside on_update
obj.test_var = u'CCC'
obj.update_test_var_on_update = True
self.store.commit()
# The obj will be modified inside on_update 2 times, so
# there'll be a call to on_update 3 times
self._assert_updated(obj, call_count=3)
self._assert_nothing_made(dummy_obj)
obj.reset()
obj = self.store.find(WillBeCommitted, id=obj.id).one()
dummy_obj = self.store.find(WillBeCommitted, id=dummy_obj.id).one()
# Test obj being commited without any modification
self.store.commit()
self._assert_nothing_made(obj)
self._assert_nothing_made(dummy_obj)
obj.reset()
# Test obj being commited after modification.
obj.test_var = u'DDD'
self.store.commit()
self._assert_updated(obj)
self._assert_nothing_made(dummy_obj)
obj.reset()
obj = WillBeCommitted(store=self.store,
test_var=u'EEE')
self.store.commit()
obj.reset()
# Test obj being deleted without any modification
self.store.remove(obj)
self.store.commit()
self._assert_deleted(obj)
self._assert_nothing_made(dummy_obj)
obj.reset()
obj = WillBeCommitted(store=self.store,
test_var=u'EEE')
self.store.commit()
obj.reset()
# Test obj being deleted after modification
obj.test_var = u'FFF'
self.store.remove(obj)
self.store.commit()
self._assert_deleted(obj)
self._assert_nothing_made(dummy_obj)
obj.reset()
# Test obj being deleted after creation
obj = WillBeCommitted(store=self.store,
test_var=u'EEE')
self.store.remove(obj)
self.store.commit()
self._assert_deleted(obj)
self._assert_nothing_made(dummy_obj)
obj.reset()
#
# Private
#
def _assert_created(self, obj):
self.assertTrue(obj.was_created)
self.assertFalse(obj.was_updated)
self.assertFalse(obj.was_deleted)
self.assertEqual(obj.on_update_called_count, 0)
def _assert_deleted(self, obj):
self.assertFalse(obj.was_created)
self.assertTrue(obj.was_deleted)
self.assertFalse(obj.was_updated)
self.assertEqual(obj.on_update_called_count, 0)
def _assert_updated(self, obj, call_count=1):
self.assertFalse(obj.was_created)
self.assertFalse(obj.was_deleted)
self.assertTrue(obj.was_updated)
self.assertEqual(obj.on_update_called_count, call_count)
def _assert_nothing_made(self, obj):
self.assertFalse(obj.was_updated)
self.assertFalse(obj.was_deleted)
self.assertFalse(obj.was_created)
self.assertEqual(obj.on_update_called_count, 0)
class TestStoqlibResultSet(DomainTest):
def test_fast_iter_single_table(self):
results = self.store.find(Person).order_by(Person.te_id)
# Make sure there are results so the test makes sense
assert results.count()
for obj, tpl in zip(results, results.fast_iter()):
for prop in ['name', 'id', 'te_id']:
self.assertEqual(getattr(obj, prop), getattr(tpl, prop))
def test_fast_iter_multiple_table(self):
results = self.store.find((Person, Client),
Person.id == Client.person_id)
# Make sure there are results so the test makes sense
assert results.count()
for objs, tpls in zip(results, results.fast_iter()):
self.assertEquals(objs[0].id, tpls[0].id)
self.assertEquals(objs[1].id, tpls[1].id)
def test_fast_iter_mixed(self):
results = self.store.find((Person, Client.id),
Person.id == Client.person_id)
# Make sure there are results so the test makes sense
assert results.count()
for objs, tpls in zip(results, results.fast_iter()):
self.assertEquals(objs[0].id, tpls[0].id)
self.assertEquals(objs[1], tpls[1])
def test_fast_iter_viewable(self):
results = self.store.find(ClientView).order_by(Client.te_id)
# Make sure there are results so the test makes sense
assert results.count()
for obj, tpl in zip(results, results.fast_iter()):
for prop in ['name', 'status', 'cpf']:
self.assertEqual(getattr(obj, prop), getattr(tpl, prop))
|
tiagocardosos/stoq
|
stoqlib/database/test/test_runtime.py
|
Python
|
gpl-2.0
| 12,115
|
[
"VisIt"
] |
128236ef22fabbee7e068e26ec3050c5e9b0b2aa2903ecdc83326ff3af32a2ab
|
# Copyright 2014, Brian Coca <bcoca@ansible.com>
# Copyright 2017, Ken Celenza <ken@networktocode.com>
# Copyright 2017, Jason Edelman <jason@networktocode.com>
# Copyright 2017, Ansible Project
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import itertools
import math
from jinja2.filters import environmentfilter
from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError
from ansible.module_utils.common.text import formatters
from ansible.module_utils.six import binary_type, text_type
from ansible.module_utils.six.moves import zip, zip_longest
from ansible.module_utils.common._collections_compat import Hashable, Mapping, Iterable
from ansible.module_utils._text import to_native, to_text
from ansible.utils.display import Display
try:
from jinja2.filters import do_unique
HAS_UNIQUE = True
except ImportError:
HAS_UNIQUE = False
display = Display()
@environmentfilter
def unique(environment, a, case_sensitive=False, attribute=None):
def _do_fail(e):
if case_sensitive or attribute:
raise AnsibleFilterError("Jinja2's unique filter failed and we cannot fall back to Ansible's version "
"as it does not support the parameters supplied", orig_exc=e)
error = e = None
try:
if HAS_UNIQUE:
c = do_unique(environment, a, case_sensitive=case_sensitive, attribute=attribute)
if isinstance(a, Hashable):
c = set(c)
else:
c = list(c)
except TypeError as e:
error = e
_do_fail(e)
except Exception as e:
error = e
_do_fail(e)
display.warning('Falling back to Ansible unique filter as Jinja2 one failed: %s' % to_text(e))
if not HAS_UNIQUE or error:
# handle Jinja2 specific attributes when using Ansible's version
if case_sensitive or attribute:
raise AnsibleFilterError("Ansible's unique filter does not support case_sensitive nor attribute parameters, "
"you need a newer version of Jinja2 that provides their version of the filter.")
if isinstance(a, Hashable):
c = set(a)
else:
c = []
for x in a:
if x not in c:
c.append(x)
return c
@environmentfilter
def intersect(environment, a, b):
if isinstance(a, Hashable) and isinstance(b, Hashable):
c = set(a) & set(b)
else:
c = unique(environment, [x for x in a if x in b])
return c
@environmentfilter
def difference(environment, a, b):
if isinstance(a, Hashable) and isinstance(b, Hashable):
c = set(a) - set(b)
else:
c = unique(environment, [x for x in a if x not in b])
return c
@environmentfilter
def symmetric_difference(environment, a, b):
if isinstance(a, Hashable) and isinstance(b, Hashable):
c = set(a) ^ set(b)
else:
isect = intersect(environment, a, b)
c = [x for x in union(environment, a, b) if x not in isect]
return c
@environmentfilter
def union(environment, a, b):
if isinstance(a, Hashable) and isinstance(b, Hashable):
c = set(a) | set(b)
else:
c = unique(environment, a + b)
return c
def min(a):
_min = __builtins__.get('min')
return _min(a)
def max(a):
_max = __builtins__.get('max')
return _max(a)
def logarithm(x, base=math.e):
try:
if base == 10:
return math.log10(x)
else:
return math.log(x, base)
except TypeError as e:
raise AnsibleFilterTypeError('log() can only be used on numbers: %s' % to_native(e))
def power(x, y):
try:
return math.pow(x, y)
except TypeError as e:
raise AnsibleFilterTypeError('pow() can only be used on numbers: %s' % to_native(e))
def inversepower(x, base=2):
try:
if base == 2:
return math.sqrt(x)
else:
return math.pow(x, 1.0 / float(base))
except (ValueError, TypeError) as e:
raise AnsibleFilterTypeError('root() can only be used on numbers: %s' % to_native(e))
def human_readable(size, isbits=False, unit=None):
''' Return a human readable string '''
try:
return formatters.bytes_to_human(size, isbits, unit)
except TypeError as e:
raise AnsibleFilterTypeError("human_readable() failed on bad input: %s" % to_native(e))
except Exception:
raise AnsibleFilterError("human_readable() can't interpret following string: %s" % size)
def human_to_bytes(size, default_unit=None, isbits=False):
''' Return bytes count from a human readable string '''
try:
return formatters.human_to_bytes(size, default_unit, isbits)
except TypeError as e:
raise AnsibleFilterTypeError("human_to_bytes() failed on bad input: %s" % to_native(e))
except Exception:
raise AnsibleFilterError("human_to_bytes() can't interpret following string: %s" % size)
def rekey_on_member(data, key, duplicates='error'):
"""
Rekey a dict of dicts on another member
May also create a dict from a list of dicts.
duplicates can be one of ``error`` or ``overwrite`` to specify whether to error out if the key
value would be duplicated or to overwrite previous entries if that's the case.
"""
if duplicates not in ('error', 'overwrite'):
raise AnsibleFilterError("duplicates parameter to rekey_on_member has unknown value: {0}".format(duplicates))
new_obj = {}
if isinstance(data, Mapping):
iterate_over = data.values()
elif isinstance(data, Iterable) and not isinstance(data, (text_type, binary_type)):
iterate_over = data
else:
raise AnsibleFilterTypeError("Type is not a valid list, set, or dict")
for item in iterate_over:
if not isinstance(item, Mapping):
raise AnsibleFilterTypeError("List item is not a valid dict")
try:
key_elem = item[key]
except KeyError:
raise AnsibleFilterError("Key {0} was not found".format(key))
except TypeError as e:
raise AnsibleFilterTypeError(to_native(e))
except Exception as e:
raise AnsibleFilterError(to_native(e))
# Note: if new_obj[key_elem] exists it will always be a non-empty dict (it will at
# minimum contain {key: key_elem}
if new_obj.get(key_elem, None):
if duplicates == 'error':
raise AnsibleFilterError("Key {0} is not unique, cannot correctly turn into dict".format(key_elem))
elif duplicates == 'overwrite':
new_obj[key_elem] = item
else:
new_obj[key_elem] = item
return new_obj
class FilterModule(object):
''' Ansible math jinja2 filters '''
def filters(self):
filters = {
# general math
'min': min,
'max': max,
# exponents and logarithms
'log': logarithm,
'pow': power,
'root': inversepower,
# set theory
'unique': unique,
'intersect': intersect,
'difference': difference,
'symmetric_difference': symmetric_difference,
'union': union,
# combinatorial
'product': itertools.product,
'permutations': itertools.permutations,
'combinations': itertools.combinations,
# computer theory
'human_readable': human_readable,
'human_to_bytes': human_to_bytes,
'rekey_on_member': rekey_on_member,
# zip
'zip': zip,
'zip_longest': zip_longest,
}
return filters
|
jtyr/ansible
|
lib/ansible/plugins/filter/mathstuff.py
|
Python
|
gpl-3.0
| 8,473
|
[
"Brian"
] |
892045c6c451842d1feedd2b78063b6a8774027f3dcf256c5423eab7ec804b03
|
#
# Copyright 2016-2017, 2020 Andreas Klemenz (Fraunhofer IWM)
# 2020 Thomas Reichenbach (Fraunhofer IWM)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import time
import sys
import distutils.version
import numpy as np
import ase
import ase.data
import ase.io
import ase.io.lammpsrun
import ase.calculators.lammpsrun
import matscipy.neighbours
import matscipy.opls
try:
import ase.version
ase_version_str = ase.version.version
except:
ase_version_str = ase.__version__
def read_extended_xyz(fileobj):
"""Read extended xyz file with labeled atoms."""
atoms = ase.io.read(fileobj)
opls_struct = matscipy.opls.OPLSStructure(atoms)
opls_struct.arrays = atoms.arrays
types = opls_struct.get_array('type')
opls_struct.types = np.unique(types)
tags = np.zeros(len(opls_struct), dtype=int)
for it, type in enumerate(opls_struct.types):
tags[types == type] = it
opls_struct.set_tags(tags)
return opls_struct
def read_block(filename, name):
data = {}
if isinstance(filename, str):
fileobj = open(filename, 'r')
block = False
for line in fileobj.readlines():
line = line.split()
# find data block
if len(line) >= 2:
if line[1] == name:
block = True
# end of data block
if block == True and len(line) == 0:
block = False
# read data
if block:
if line[0][0] == '#':
continue
else:
symbol = line[0]
data[symbol] = []
for word in line[1:]:
if word[0] == '#':
break
else:
data[symbol].append(float(word))
if len(data[symbol]) == 1:
data[symbol] = data[symbol][0]
if len(data) == 0:
print('Error: Data block \"%s\" not found in file \"%s\"' % (name, filename))
sys.exit()
fileobj.close()
return data
def read_cutoffs(filename):
cutoffs = matscipy.opls.CutoffList(read_block(filename, 'Cutoffs'))
return cutoffs
def read_parameter_file(filename):
one = read_block(filename, 'Element')
bonds = matscipy.opls.BondData(read_block(filename, 'Bonds'))
angles = matscipy.opls.AnglesData(read_block(filename, 'Angles'))
dihedrals = matscipy.opls.DihedralsData(read_block(filename, 'Dihedrals'))
cutoffs = matscipy.opls.CutoffList(read_block(filename, 'Cutoffs'))
return cutoffs, one, bonds, angles, dihedrals
def write_lammps(prefix, atoms):
write_lammps_in(prefix)
write_lammps_atoms(prefix, atoms)
write_lammps_definitions(prefix, atoms)
def write_lammps_in(prefix):
if isinstance(prefix, str):
fileobj = open(prefix + '.in', 'w')
fileobj.write("""# LAMMPS relaxation (written by ASE)
units metal
atom_style full
boundary p p p
#boundary p p f
""")
fileobj.write('read_data ' + prefix + '.atoms\n')
fileobj.write('include ' + prefix + '.opls\n')
fileobj.write("""
kspace_style pppm 1e-5
#kspace_modify slab 3.0
neighbor 1.0 bin
neigh_modify delay 0 every 1 check yes
thermo 1000
thermo_style custom step temp press cpu pxx pyy pzz pxy pxz pyz ke pe etotal vol lx ly lz atoms
dump 1 all xyz 1000 dump_relax.xyz
dump_modify 1 sort id
restart 100000 test_relax
min_style fire
minimize 1.0e-14 1.0e-5 100000 100000
""")
fileobj.close()
def write_lammps_atoms(prefix, atoms):
"""Write atoms input for LAMMPS"""
if isinstance(prefix, str):
fileobj = open(prefix + '.atoms', 'w')
# header
fileobj.write(fileobj.name + ' (by write_lammps_atoms)\n\n')
fileobj.write(str(len(atoms)) + ' atoms\n')
fileobj.write(str(len(atoms.types)) + ' atom types\n')
blist = atoms.bond_list
if len(blist):
btypes = atoms.bond_types
fileobj.write(str(len(blist)) + ' bonds\n')
fileobj.write(str(len(btypes)) + ' bond types\n')
alist = atoms.ang_list
if len(alist):
atypes = atoms.ang_types
fileobj.write(str(len(alist)) + ' angles\n')
fileobj.write(str(len(atypes)) + ' angle types\n')
dlist = atoms.dih_list
if len(dlist):
dtypes = atoms.dih_types
fileobj.write(str(len(dlist)) + ' dihedrals\n')
fileobj.write(str(len(dtypes)) + ' dihedral types\n')
# cell
if distutils.version.LooseVersion(ase_version_str) > distutils.version.LooseVersion('3.11.0'):
p = ase.calculators.lammpsrun.Prism(atoms.get_cell())
else:
p = ase.calculators.lammpsrun.prism(atoms.get_cell())
xhi, yhi, zhi, xy, xz, yz = p.get_lammps_prism()
fileobj.write('\n0.0 %f xlo xhi\n' % xhi)
fileobj.write('0.0 %f ylo yhi\n' % yhi)
fileobj.write('0.0 %f zlo zhi\n' % zhi)
# write tilt factors for non-orthogonal cells
if np.abs(xy) > 1e-10 or np.abs(xz) > 1e-10 or np.abs(yz) > 1e-10:
fileobj.write('\n%f %f %f xy xz yz\n' % (xy, xz, yz))
# atoms
fileobj.write('\nAtoms\n\n')
tags = atoms.get_tags()
types = atoms.types
if atoms.has('molid'):
molid = atoms.get_array('molid')
else:
molid = [1] * len(atoms)
if distutils.version.LooseVersion(ase_version_str) > distutils.version.LooseVersion('3.17.0'):
positions_lammps_str = p.vector_to_lammps(atoms.get_positions()).astype(str)
elif distutils.version.LooseVersion(ase_version_str) > distutils.version.LooseVersion('3.13.0'):
positions_lammps_str = p.positions_to_lammps_strs(atoms.get_positions())
else:
positions_lammps_str = map(p.pos_to_lammps_str, atoms.get_positions())
for i, r in enumerate(positions_lammps_str):
q = atoms.atom_data[types[tags[i]]][2]
fileobj.write('%6d %3d %3d %s %s %s %s' % ((i + 1, molid[i],
tags[i] + 1,
q)
+ tuple(r)))
fileobj.write(' # ' + atoms.types[tags[i]] + '\n')
# velocities
velocities = atoms.get_velocities()
if velocities is not None:
fileobj.write('\nVelocities\n\n')
for i, v in enumerate(velocities):
fileobj.write('%6d %g %g %g\n' %
(i + 1, v[0], v[1], v[2]))
# masses
masses = atoms.get_masses()
tags = atoms.get_tags()
fileobj.write('\nMasses\n\n')
for i, type, tag in zip(range(len(atoms.types)), atoms.types, np.unique(tags)):
fileobj.write('%6d %g # %s\n' %
(i + 1,
masses[tags == tag][0],
type))
# bonds
if len(blist):
fileobj.write('\nBonds\n\n')
for ib, bvals in enumerate(blist):
fileobj.write('%8d %6d %6d %6d ' %
(ib + 1, bvals[0] + 1, bvals[1] + 1,
bvals[2] + 1))
try:
fileobj.write('# ' + btypes[bvals[0]])
except:
pass
fileobj.write('\n')
# angles
if len(alist):
fileobj.write('\nAngles\n\n')
for ia, avals in enumerate(alist):
fileobj.write('%8d %6d %6d %6d %6d ' %
(ia + 1, avals[0] + 1,
avals[1] + 1, avals[2] + 1, avals[3] + 1))
try:
fileobj.write('# ' + atypes[avals[0]])
except:
pass
fileobj.write('\n')
# dihedrals
if len(dlist):
fileobj.write('\nDihedrals\n\n')
for i, dvals in enumerate(dlist):
fileobj.write('%8d %6d %6d %6d %6d %6d ' %
(i + 1, dvals[0] + 1,
dvals[1] + 1, dvals[2] + 1,
dvals[3] + 1, dvals[4] + 1))
try:
fileobj.write('# ' + dtypes[dvals[0]])
except:
pass
fileobj.write('\n')
def write_lammps_definitions(prefix, atoms):
"""Write force field definitions for LAMMPS."""
if isinstance(prefix, str):
fileobj = open(prefix + '.opls', 'w')
fileobj.write('# OPLS potential\n')
fileobj.write('# write_lammps ' +
str(time.asctime(
time.localtime(time.time()))))
# bonds
if len(atoms.bond_types):
fileobj.write('\n# bonds\n')
fileobj.write('bond_style harmonic\n')
for ib, btype in enumerate(atoms.bond_types):
fileobj.write('bond_coeff %6d' % (ib + 1))
itype, jtype = btype.split('-')
name, values = atoms.bonds.name_value(itype, jtype)
for value in values:
fileobj.write(' ' + str(value))
fileobj.write(' # ' + name + '\n')
# angles
if len(atoms.ang_types):
fileobj.write('\n# angles\n')
fileobj.write('angle_style harmonic\n')
for ia, atype in enumerate(atoms.ang_types):
fileobj.write('angle_coeff %6d' % (ia + 1))
itype, jtype, ktype = atype.split('-')
name, values = atoms.angles.name_value(itype, jtype, ktype)
for value in values:
fileobj.write(' ' + str(value))
fileobj.write(' # ' + name + '\n')
# dihedrals
if len(atoms.dih_types):
fileobj.write('\n# dihedrals\n')
fileobj.write('dihedral_style opls\n')
for id, dtype in enumerate(atoms.dih_types):
fileobj.write('dihedral_coeff %6d' % (id + 1))
itype, jtype, ktype, ltype = dtype.split('-')
name, values = atoms.dihedrals.name_value(itype, jtype, ktype, ltype)
for value in values:
fileobj.write(' ' + str(value))
fileobj.write(' # ' + name + '\n')
# Lennard Jones settings
fileobj.write('\n# L-J parameters\n')
fileobj.write('pair_style lj/cut/coul/long 10.0 7.4' +
' # consider changing these parameters\n')
fileobj.write('special_bonds lj/coul 0.0 0.0 0.5\n')
for ia, atype in enumerate(atoms.types):
if len(atype) < 2:
atype = atype + ' '
fileobj.write('pair_coeff ' + str(ia + 1) + ' ' + str(ia + 1))
for value in atoms.atom_data[atype][:2]:
fileobj.write(' ' + str(value))
fileobj.write(' # ' + atype + '\n')
fileobj.write('pair_modify shift yes mix geometric\n')
# Charges
fileobj.write('\n# charges\n')
for ia, atype in enumerate(atoms.types):
if len(atype) < 2:
atype = atype + ' '
fileobj.write('set type ' + str(ia + 1))
fileobj.write(' charge ' + str(atoms.atom_data[atype][2]))
fileobj.write(' # ' + atype + '\n')
def read_lammps_data(filename):
"""Read positions, connectivities, etc."""
if isinstance(filename, str):
fileobj = open(filename, 'r')
lines = fileobj.readlines()
lines.pop(0)
def next_entry():
line = lines.pop(0).strip()
if(len(line) > 0):
lines.insert(0, line)
def next_key():
while(len(lines)):
line = lines.pop(0).strip()
if(len(line) > 0):
lines.pop(0)
return line
return None
next_entry()
header = {}
while(True):
line = lines.pop(0).strip()
if len(line):
w = line.split()
if len(w) == 2:
header[w[1]] = int(w[0])
else:
header[w[1] + ' ' + w[2]] = int(w[0])
else:
break
# read box
next_entry()
cell = np.zeros(3)
for i in range(3):
line = lines.pop(0).strip()
cell[i] = float(line.split()[1])
while(not lines.pop(0).startswith('Atoms')):
pass
lines.pop(0)
natoms = header['atoms']
molid = np.ones(natoms, dtype=int)
tags = np.ones(natoms, dtype=int)
charges = np.zeros(natoms, dtype=float)
positions = np.zeros([natoms,3])
types = ['']*header['atom types']
inconsistent = False
for line in lines[:natoms]:
w = line.split()
i = int(w[0])-1
molid[i] = int(w[1])
tags[i] = int(w[2])-1
charges[i] = float(w[3])
positions[i][0] = float(w[4])
positions[i][1] = float(w[5])
positions[i][2] = float(w[6])
# try to read atom type from comment
if len(w) >= 8:
type = ''.join(w[8:])
if types[tags[i]] == type:
pass
elif types[tags[i]] == '':
types[tags[i]] = type
else:
inconsistent = True
if inconsistent:
print('WARNING: Inconsistency between particle descriptions and particle tags found.')
types = []
for type in np.unique(tags):
types.append(str(type))
opls_struct = matscipy.opls.OPLSStructure(str(natoms)+'H', positions=positions, cell=cell)
opls_struct.set_tags(tags)
opls_struct.set_array('molid', molid)
opls_struct.atom_data = {}
opls_struct.types = types
for tag, type in zip(np.unique(tags), types):
opls_struct.atom_data[type] = [0.0, 0.0, charges[tags == tag][0]]
del lines[:natoms]
key = next_key()
velocities = np.zeros([natoms,3])
if key == 'Velocities':
for line in lines[:natoms]:
w = line.split()
i = int(w[0])-1
velocities[i][0] = float(w[1])
velocities[i][1] = float(w[2])
velocities[i][2] = float(w[3])
del lines[:natoms]
key = next_key()
if key == 'Masses':
ntypes = len(opls_struct.atom_data)
masses = np.empty((ntypes))
for line in lines[:ntypes]:
w = line.split()
i = int(w[0])-1
masses[i] = float(w[1])
del lines[:ntypes]
opls_struct.set_masses(masses[tags])
opls_struct.set_velocities(velocities)
# get the elements from the masses
# this ensures that we have the right elements
# even when reading from a lammps dump file
def newtype(element, types):
if len(element) > 1:
# can not extend, we are restricted to
# two characters
return element
count = 0
for type in types:
if type[0] == element:
count += 1
label = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
return (element + label[count])
atomic_numbers = np.empty(ntypes, dtype=int)
ams = ase.data.atomic_masses[:]
ams[np.isnan(ams)] = 0
for i, mass in enumerate(masses):
m2 = (ams - mass)**2
atomic_numbers[i] = m2.argmin()
opls_struct.set_atomic_numbers(atomic_numbers[tags])
key = next_key()
if key != 'Bonds':
bond_list = np.empty([0,3], dtype=int)
bond_types = np.empty(0, dtype=str)
else:
nbonds = header['bonds']
bond_list = np.empty([nbonds,3], dtype=int)
bond_types = ['']*header['bond types']
inconsistent = False
for line in lines[:nbonds]:
w = line.split()
i = int(w[0])-1
bond_list[i][0] = int(w[1])-1
bond_list[i][1] = int(w[2])-1
bond_list[i][2] = int(w[3])-1
# try to read bond type info from comment
if len(w) >= 5:
bond_type = ''.join(w[5:])
if bond_types[bond_list[i][0]-1] == bond_type:
pass
elif bond_types[bond_list[i][0]-1] == '':
bond_types[bond_list[i][0]-1] = bond_type
else:
inconsistent = True
if inconsistent:
print('WARNING: Inconsistency between bond descriptions and bond type numbers found.')
bond_types = ['']*header['bond types']
del lines[:nbonds]
key = next_key()
opls_struct.bond_types = bond_types
opls_struct.bond_list = bond_list
if key != 'Angles':
ang_list = np.empty([0,4], dtype=int)
ang_types = np.empty(0, dtype=str)
else:
nangles = header['angles']
ang_list = np.empty([nangles,4], dtype=int)
ang_types = ['']*header['angle types']
inconsistent = False
for line in lines[:nangles]:
w = line.split()
i = int(w[0])-1
ang_list[i][0] = int(w[1])-1
ang_list[i][1] = int(w[2])-1
ang_list[i][2] = int(w[3])-1
ang_list[i][3] = int(w[4])-1
# try to read angle type info from comment
if len(w) >= 5:
ang_type = ''.join(w[6:])
if ang_types[ang_list[i][0]-1] == ang_type:
pass
elif ang_types[ang_list[i][0]-1] == '':
ang_types[ang_list[i][0]-1] = ang_type
else:
inconsistent = True
if inconsistent:
print('WARNING: Inconsistency between angle descriptions and angle type numbers found.')
ang_types = ['']*header['angle types']
del lines[:nangles]
key = next_key()
opls_struct.ang_types = ang_types
opls_struct.ang_list = ang_list
if key != 'Dihedrals':
dih_list = np.empty([0,5], dtype=int)
dih_types = np.empty(header['dihedral types'], dtype=str)
else:
ndihedrals = header['dihedrals']
dih_list = np.empty([ndihedrals,5], dtype=int)
dih_types = ['']*header['dihedral types']
inconsistent = False
for line in lines[:ndihedrals]:
w = line.split()
i = int(w[0])-1
dih_list[i][0] = int(w[1])-1
dih_list[i][1] = int(w[2])-1
dih_list[i][2] = int(w[3])-1
dih_list[i][3] = int(w[4])-1
dih_list[i][4] = int(w[5])-1
# try to read dihedral type info from comment
if len(w) >= 7:
dih_type = ''.join(w[7:])
if dih_types[dih_list[i][0]-1] == dih_type:
pass
elif dih_types[dih_list[i][0]-1] == '':
dih_types[dih_list[i][0]-1] = dih_type
else:
inconsistent = True
if inconsistent:
print('WARNING: Inconsistency between dihedral descriptions and dihedral type numbers found.')
dih_types = ['']*header['dihedral types']
del lines[:ndihedrals]
opls_struct.dih_types = dih_types
opls_struct.dih_list = dih_list
return opls_struct
def update_from_lammps_dump(atoms, filename, check=True):
atoms_dump = ase.io.lammpsrun.read_lammps_dump(filename)
if len(atoms_dump) != len(atoms):
raise RuntimeError('Structure in ' + filename +
' has wrong length: %d != %d' %
(len(atoms_dump), len(atoms)))
if check:
for a, b in zip(atoms, atoms_dump):
# check that the atom types match
if not (a.tag + 1 == b.number):
raise RuntimeError('Atoms index %d are of different '
'type (%d != %d)'
% (a.index, a.tag + 1, b.number))
atoms.set_cell(atoms_dump.get_cell())
atoms.set_positions(atoms_dump.get_positions())
if atoms_dump.get_velocities() is not None:
atoms.set_velocities(atoms_dump.get_velocities())
return atoms
|
libAtoms/matscipy
|
matscipy/io/opls.py
|
Python
|
lgpl-2.1
| 20,447
|
[
"ASE",
"LAMMPS",
"Matscipy"
] |
9b9e437ba5758ecea9b760b1f554ff83e86dd6f0a6815fc163a55a85338a33ac
|
#!/usr/bin/env python
""" This script is used to submit the jobs on the grid.
It uses an executable (first argument), creates
a directory in which it will store all the job ids (<jobName> args),
and submit a configurable amount of jobs.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC.Interfaces.API.Dirac import Dirac
from DIRAC.Interfaces.API.Job import Job
import sys
import os
if len(sys.argv) < 4:
print("Usage %s <scriptName> <jobName> <nbJobs>" % sys.argv[0])
sys.exit(1)
scriptName = sys.argv[1]
jobName = sys.argv[2]
nbJobs = int(sys.argv[3])
if not os.path.exists(jobName):
os.makedirs(jobName)
os.makedirs("%s/Done" % jobName)
os.makedirs("%s/Failed" % jobName)
else:
print("Folder %s exists" % jobName)
sys.exit(1)
f = open("%s/jobIdList.txt" % jobName, "w")
for i in range(nbJobs):
j = Job()
j.setCPUTime(10000)
j.setExecutable(scriptName)
j.addToOutputSandbox.append("myLog.txt")
j.addToOutputSandbox.append("clock.txt")
j.addToOutputSandbox.append("time.txt")
dirac = Dirac()
jobID = dirac.submitJob(j)
realId = jobID.get("JobID")
f.write("%s\n" % realId)
f.close()
|
ic-hep/DIRAC
|
tests/Performance/DFCPerformance/submitJobs.py
|
Python
|
gpl-3.0
| 1,320
|
[
"DIRAC"
] |
0b477494304abb90185d863c12abaa50e18655e0e341fdc12562a35d756c64f0
|
# Copyright (c) 2003-2014 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
""" %prog [options] module_or_package
Check that a module satisfies a coding standard (and more !).
%prog --help
Display this help message and exit.
%prog --help-msg <msg-id>[,<msg-id>]
Display help messages about given message identifiers and exit.
"""
from __future__ import print_function
import collections
import contextlib
import operator
import os
try:
import multiprocessing
except ImportError:
multiprocessing = None
import sys
import tokenize
import warnings
import six
import astroid
from astroid.__pkginfo__ import version as astroid_version
from astroid import modutils
from pylint import checkers
from pylint import interfaces
from pylint import reporters
from pylint import utils
from pylint import config
from pylint.__pkginfo__ import version
from pylint.reporters.ureports import nodes as report_nodes
MANAGER = astroid.MANAGER
INCLUDE_IDS_HELP = ("Deprecated. It was used to include message\'s "
"id in output. Use --msg-template instead.")
SYMBOLS_HELP = ("Deprecated. It was used to include symbolic ids of "
"messages in output. Use --msg-template instead.")
def _get_new_args(message):
location = (
message.abspath,
message.path,
message.module,
message.obj,
message.line,
message.column,
)
return (
message.msg_id,
message.symbol,
location,
message.msg,
message.confidence,
)
def _get_python_path(filepath):
dirname = os.path.realpath(os.path.expanduser(filepath))
if not os.path.isdir(dirname):
dirname = os.path.dirname(dirname)
while True:
if not os.path.exists(os.path.join(dirname, "__init__.py")):
return dirname
old_dirname = dirname
dirname = os.path.dirname(dirname)
if old_dirname == dirname:
return os.getcwd()
def _merge_stats(stats):
merged = {}
by_msg = collections.Counter()
for stat in stats:
message_stats = stat.pop('by_msg', {})
by_msg.update(message_stats)
for key, item in six.iteritems(stat):
if key not in merged:
merged[key] = item
else:
if isinstance(item, dict):
merged[key].update(item)
else:
merged[key] = merged[key] + item
merged['by_msg'] = by_msg
return merged
@contextlib.contextmanager
def _patch_sysmodules():
# Context manager that permits running pylint, on Windows, with -m switch
# and with --jobs, as in 'python -2 -m pylint .. --jobs'.
# For more details why this is needed,
# see Python issue http://bugs.python.org/issue10845.
mock_main = __name__ != '__main__' # -m switch
if mock_main:
sys.modules['__main__'] = sys.modules[__name__]
try:
yield
finally:
if mock_main:
sys.modules.pop('__main__')
# Python Linter class #########################################################
MSGS = {
'F0001': ('%s',
'fatal',
'Used when an error occurred preventing the analysis of a \
module (unable to find it for instance).'),
'F0002': ('%s: %s',
'astroid-error',
'Used when an unexpected error occurred while building the '
'Astroid representation. This is usually accompanied by a '
'traceback. Please report such errors !'),
'F0010': ('error while code parsing: %s',
'parse-error',
'Used when an exception occured while building the Astroid '
'representation which could be handled by astroid.'),
'I0001': ('Unable to run raw checkers on built-in module %s',
'raw-checker-failed',
'Used to inform that a built-in module has not been checked '
'using the raw checkers.'),
'I0010': ('Unable to consider inline option %r',
'bad-inline-option',
'Used when an inline option is either badly formatted or can\'t '
'be used inside modules.'),
'I0011': ('Locally disabling %s (%s)',
'locally-disabled',
'Used when an inline option disables a message or a messages '
'category.'),
'I0012': ('Locally enabling %s (%s)',
'locally-enabled',
'Used when an inline option enables a message or a messages '
'category.'),
'I0013': ('Ignoring entire file',
'file-ignored',
'Used to inform that the file will not be checked'),
'I0020': ('Suppressed %s (from line %d)',
'suppressed-message',
'A message was triggered on a line, but suppressed explicitly '
'by a disable= comment in the file. This message is not '
'generated for messages that are ignored due to configuration '
'settings.'),
'I0021': ('Useless suppression of %s',
'useless-suppression',
'Reported when a message is explicitly disabled for a line or '
'a block of code, but never triggered.'),
'I0022': ('Pragma "%s" is deprecated, use "%s" instead',
'deprecated-pragma',
'Some inline pylint options have been renamed or reworked, '
'only the most recent form should be used. '
'NOTE:skip-all is only available with pylint >= 0.26',
{'old_names': [('I0014', 'deprecated-disable-all')]}),
'E0001': ('%s',
'syntax-error',
'Used when a syntax error is raised for a module.'),
'E0011': ('Unrecognized file option %r',
'unrecognized-inline-option',
'Used when an unknown inline option is encountered.'),
'E0012': ('Bad option value %r',
'bad-option-value',
'Used when a bad value for an inline option is encountered.'),
}
if multiprocessing is not None:
class ChildLinter(multiprocessing.Process):
def run(self):
# pylint: disable=no-member, unbalanced-tuple-unpacking
tasks_queue, results_queue, self._config = self._args
self._config["jobs"] = 1 # Child does not parallelize any further.
self._python3_porting_mode = self._config.pop(
'python3_porting_mode', None)
self._plugins = self._config.pop('plugins', None)
# Run linter for received files/modules.
for file_or_module in iter(tasks_queue.get, 'STOP'):
result = self._run_linter(file_or_module[0])
try:
results_queue.put(result)
except Exception as ex:
print("internal error with sending report for module %s" %
file_or_module, file=sys.stderr)
print(ex, file=sys.stderr)
results_queue.put({})
def _run_linter(self, file_or_module):
linter = PyLinter()
# Register standard checkers.
linter.load_default_plugins()
# Load command line plugins.
if self._plugins:
linter.load_plugin_modules(self._plugins)
linter.load_configuration(**self._config)
linter.set_reporter(reporters.CollectingReporter())
# Enable the Python 3 checker mode. This option is
# passed down from the parent linter up to here, since
# the Python 3 porting flag belongs to the Run class,
# instead of the Linter class.
if self._python3_porting_mode:
linter.python3_porting_mode()
# Run the checks.
linter.check(file_or_module)
msgs = [_get_new_args(m) for m in linter.reporter.messages]
return (file_or_module, linter.file_state.base_name, linter.current_name,
msgs, linter.stats, linter.msg_status)
class PyLinter(config.OptionsManagerMixIn,
utils.MessagesHandlerMixIn,
utils.ReportsHandlerMixIn,
checkers.BaseTokenChecker):
"""lint Python modules using external checkers.
This is the main checker controlling the other ones and the reports
generation. It is itself both a raw checker and an astroid checker in order
to:
* handle message activation / deactivation at the module level
* handle some basic but necessary stats'data (number of classes, methods...)
IDE plugins developpers: you may have to call
`astroid.builder.MANAGER.astroid_cache.clear()` accross run if you want
to ensure the latest code version is actually checked.
"""
__implements__ = (interfaces.ITokenChecker, )
name = 'master'
priority = 0
level = 0
msgs = MSGS
@staticmethod
def make_options():
return (('ignore',
{'type' : 'csv', 'metavar' : '<file>[,<file>...]',
'dest' : 'black_list', 'default' : ('CVS',),
'help' : 'Add files or directories to the blacklist. '
'They should be base names, not paths.'}),
('persistent',
{'default': True, 'type' : 'yn', 'metavar' : '<y_or_n>',
'level': 1,
'help' : 'Pickle collected data for later comparisons.'}),
('load-plugins',
{'type' : 'csv', 'metavar' : '<modules>', 'default' : (),
'level': 1,
'help' : 'List of plugins (as comma separated values of '
'python modules names) to load, usually to register '
'additional checkers.'}),
('output-format',
{'default': 'text', 'type': 'string', 'metavar' : '<format>',
'short': 'f',
'group': 'Reports',
'help' : 'Set the output format. Available formats are text,'
' parseable, colorized, msvs (visual studio) and html. You '
'can also give a reporter class, eg mypackage.mymodule.'
'MyReporterClass.'}),
('files-output',
{'default': 0, 'type' : 'yn', 'metavar' : '<y_or_n>',
'group': 'Reports', 'level': 1,
'help' : 'Put messages in a separate file for each module / '
'package specified on the command line instead of printing '
'them on stdout. Reports (if any) will be written in a file '
'name "pylint_global.[txt|html]".'}),
('reports',
{'default': 1, 'type' : 'yn', 'metavar' : '<y_or_n>',
'short': 'r',
'group': 'Reports',
'help' : 'Tells whether to display a full report or only the '
'messages'}),
('evaluation',
{'type' : 'string', 'metavar' : '<python_expression>',
'group': 'Reports', 'level': 1,
'default': '10.0 - ((float(5 * error + warning + refactor + '
'convention) / statement) * 10)',
'help' : 'Python expression which should return a note less '
'than 10 (10 is the highest note). You have access '
'to the variables errors warning, statement which '
'respectively contain the number of errors / '
'warnings messages and the total number of '
'statements analyzed. This is used by the global '
'evaluation report (RP0004).'}),
('comment', utils.deprecated_option(opt_type='yn')),
('confidence',
{'type' : 'multiple_choice', 'metavar': '<levels>',
'default': '',
'choices': [c.name for c in interfaces.CONFIDENCE_LEVELS],
'group': 'Messages control',
'help' : 'Only show warnings with the listed confidence levels.'
' Leave empty to show all. Valid levels: %s' % (
', '.join(c.name for c in interfaces.CONFIDENCE_LEVELS),)}),
('enable',
{'type' : 'csv', 'metavar': '<msg ids>',
'short': 'e',
'group': 'Messages control',
'help' : 'Enable the message, report, category or checker with the '
'given id(s). You can either give multiple identifier '
'separated by comma (,) or put this option multiple time. '
'See also the "--disable" option for examples. '}),
('disable',
{'type' : 'csv', 'metavar': '<msg ids>',
'short': 'd',
'group': 'Messages control',
'help' : 'Disable the message, report, category or checker '
'with the given id(s). You can either give multiple identifiers'
' separated by comma (,) or put this option multiple times '
'(only on the command line, not in the configuration file '
'where it should appear only once).'
'You can also use "--disable=all" to disable everything first '
'and then reenable specific checks. For example, if you want '
'to run only the similarities checker, you can use '
'"--disable=all --enable=similarities". '
'If you want to run only the classes checker, but have no '
'Warning level messages displayed, use'
'"--disable=all --enable=classes --disable=W"'}),
('msg-template',
{'type' : 'string', 'metavar': '<template>',
'group': 'Reports',
'help' : ('Template used to display messages. '
'This is a python new-style format string '
'used to format the message information. '
'See doc for all details')
}),
('include-ids', utils.deprecated_option('i', 'yn', INCLUDE_IDS_HELP)),
('symbols', utils.deprecated_option('s', 'yn', SYMBOLS_HELP)),
('jobs',
{'type' : 'int', 'metavar': '<n-processes>',
'short': 'j',
'default': 1,
'help' : '''Use multiple processes to speed up Pylint.''',
}),
('unsafe-load-any-extension',
{'type': 'yn', 'metavar': '<yn>', 'default': False, 'hide': True,
'help': ('Allow loading of arbitrary C extensions. Extensions'
' are imported into the active Python interpreter and'
' may run arbitrary code.')}),
('extension-pkg-whitelist',
{'type': 'csv', 'metavar': '<pkg[,pkg]>', 'default': [],
'help': ('A comma-separated list of package or module names'
' from where C extensions may be loaded. Extensions are'
' loading into the active Python interpreter and may run'
' arbitrary code')}
),
('optimize-ast',
{'type': 'yn', 'metavar': '<yn>', 'default': False,
'help': ('Allow optimization of some AST trees. This will '
'activate a peephole AST optimizer, which will '
'apply various small optimizations. For instance, '
'it can be used to obtain the result of joining '
'multiple strings with the addition operator. '
'Joining a lot of strings can lead to a maximum '
'recursion error in Pylint and this flag can prevent '
'that. It has one side effect, the resulting AST '
'will be different than the one from reality.')}
),
)
option_groups = (
('Messages control', 'Options controling analysis messages'),
('Reports', 'Options related to output formating and reporting'),
)
def __init__(self, options=(), reporter=None, option_groups=(),
pylintrc=None):
# some stuff has to be done before ancestors initialization...
#
# messages store / checkers / reporter / astroid manager
self.msgs_store = utils.MessagesStore()
self.reporter = None
self._reporter_name = None
self._reporters = {}
self._checkers = collections.defaultdict(list)
self._pragma_lineno = {}
self._ignore_file = False
# visit variables
self.file_state = utils.FileState()
self.current_name = None
self.current_file = None
self.stats = None
# init options
self._external_opts = options
self.options = options + PyLinter.make_options()
self.option_groups = option_groups + PyLinter.option_groups
self._options_methods = {
'enable': self.enable,
'disable': self.disable}
self._bw_options_methods = {'disable-msg': self.disable,
'enable-msg': self.enable}
full_version = '%%prog %s, \nastroid %s\nPython %s' % (
version, astroid_version, sys.version)
utils.MessagesHandlerMixIn.__init__(self)
utils.ReportsHandlerMixIn.__init__(self)
super(PyLinter, self).__init__(
usage=__doc__,
version=full_version,
config_file=pylintrc or config.PYLINTRC)
checkers.BaseTokenChecker.__init__(self)
# provided reports
self.reports = (('RP0001', 'Messages by category',
report_total_messages_stats),
('RP0002', '% errors / warnings by module',
report_messages_by_module_stats),
('RP0003', 'Messages',
report_messages_stats),
('RP0004', 'Global evaluation',
self.report_evaluation),
)
self.register_checker(self)
self._dynamic_plugins = set()
self._python3_porting_mode = False
self._error_mode = False
self.load_provider_defaults()
if reporter:
self.set_reporter(reporter)
def load_default_plugins(self):
checkers.initialize(self)
reporters.initialize(self)
# Make sure to load the default reporter, because
# the option has been set before the plugins had been loaded.
if not self.reporter:
self._load_reporter()
def load_plugin_modules(self, modnames):
"""take a list of module names which are pylint plugins and load
and register them
"""
for modname in modnames:
if modname in self._dynamic_plugins:
continue
self._dynamic_plugins.add(modname)
module = modutils.load_module_from_name(modname)
module.register(self)
def _load_reporter(self):
name = self._reporter_name.lower()
if name in self._reporters:
self.set_reporter(self._reporters[name]())
else:
qname = self._reporter_name
module = modutils.load_module_from_name(
modutils.get_module_part(qname))
class_name = qname.split('.')[-1]
reporter_class = getattr(module, class_name)
self.set_reporter(reporter_class())
def set_reporter(self, reporter):
"""set the reporter used to display messages and reports"""
self.reporter = reporter
reporter.linter = self
def set_option(self, optname, value, action=None, optdict=None):
"""overridden from config.OptionsProviderMixin to handle some
special options
"""
if optname in self._options_methods or \
optname in self._bw_options_methods:
if value:
try:
meth = self._options_methods[optname]
except KeyError:
meth = self._bw_options_methods[optname]
warnings.warn('%s is deprecated, replace it by %s' % (optname,
optname.split('-')[0]),
DeprecationWarning)
value = utils._check_csv(value)
if isinstance(value, (list, tuple)):
for _id in value:
meth(_id, ignore_unknown=True)
else:
meth(value)
return # no need to call set_option, disable/enable methods do it
elif optname == 'output-format':
self._reporter_name = value
# If the reporters are already available, load
# the reporter class.
if self._reporters:
self._load_reporter()
try:
checkers.BaseTokenChecker.set_option(self, optname,
value, action, optdict)
except config.UnsupportedAction:
print('option %s can\'t be read from config file' % \
optname, file=sys.stderr)
def register_reporter(self, reporter_class):
self._reporters[reporter_class.name] = reporter_class
def report_order(self):
reports = sorted(self._reports, key=lambda x: getattr(x, 'name', ''))
try:
# Remove the current reporter and add it
# at the end of the list.
reports.pop(reports.index(self))
except ValueError:
pass
else:
reports.append(self)
return reports
# checkers manipulation methods ############################################
def register_checker(self, checker):
"""register a new checker
checker is an object implementing IRawChecker or / and IAstroidChecker
"""
assert checker.priority <= 0, 'checker priority can\'t be >= 0'
self._checkers[checker.name].append(checker)
for r_id, r_title, r_cb in checker.reports:
self.register_report(r_id, r_title, r_cb, checker)
self.register_options_provider(checker)
if hasattr(checker, 'msgs'):
self.msgs_store.register_messages(checker)
checker.load_defaults()
# Register the checker, but disable all of its messages.
# TODO(cpopa): we should have a better API for this.
if not getattr(checker, 'enabled', True):
self.disable(checker.name)
def disable_noerror_messages(self):
for msgcat, msgids in six.iteritems(self.msgs_store._msgs_by_category):
if msgcat == 'E':
for msgid in msgids:
self.enable(msgid)
else:
for msgid in msgids:
self.disable(msgid)
def disable_reporters(self):
"""disable all reporters"""
for _reporters in six.itervalues(self._reports):
for report_id, _, _ in _reporters:
self.disable_report(report_id)
def error_mode(self):
"""error mode: enable only errors; no reports, no persistent"""
self._error_mode = True
self.disable_noerror_messages()
self.disable('miscellaneous')
if self._python3_porting_mode:
self.disable('all')
for msg_id in self._checker_messages('python3'):
if msg_id.startswith('E'):
self.enable(msg_id)
else:
self.disable('python3')
self.set_option('reports', False)
self.set_option('persistent', False)
def python3_porting_mode(self):
"""Disable all other checkers and enable Python 3 warnings."""
self.disable('all')
self.enable('python3')
if self._error_mode:
# The error mode was activated, using the -E flag.
# So we'll need to enable only the errors from the
# Python 3 porting checker.
for msg_id in self._checker_messages('python3'):
if msg_id.startswith('E'):
self.enable(msg_id)
else:
self.disable(msg_id)
self._python3_porting_mode = True
# block level option handling #############################################
#
# see func_block_disable_msg.py test case for expected behaviour
def process_tokens(self, tokens):
"""process tokens from the current module to search for module/block
level options
"""
control_pragmas = {'disable', 'enable'}
for (tok_type, content, start, _, _) in tokens:
if tok_type != tokenize.COMMENT:
continue
match = utils.OPTION_RGX.search(content)
if match is None:
continue
if match.group(1).strip() == "disable-all" or \
match.group(1).strip() == 'skip-file':
if match.group(1).strip() == "disable-all":
self.add_message('deprecated-pragma', line=start[0],
args=('disable-all', 'skip-file'))
self.add_message('file-ignored', line=start[0])
self._ignore_file = True
return
try:
opt, value = match.group(1).split('=', 1)
except ValueError:
self.add_message('bad-inline-option', args=match.group(1).strip(),
line=start[0])
continue
opt = opt.strip()
if opt in self._options_methods or opt in self._bw_options_methods:
try:
meth = self._options_methods[opt]
except KeyError:
meth = self._bw_options_methods[opt]
# found a "(dis|en)able-msg" pragma deprecated suppresssion
self.add_message('deprecated-pragma', line=start[0],
args=(opt, opt.replace('-msg', '')))
for msgid in utils._splitstrip(value):
# Add the line where a control pragma was encountered.
if opt in control_pragmas:
self._pragma_lineno[msgid] = start[0]
try:
if (opt, msgid) == ('disable', 'all'):
self.add_message('deprecated-pragma', line=start[0],
args=('disable=all', 'skip-file'))
self.add_message('file-ignored', line=start[0])
self._ignore_file = True
return
meth(msgid, 'module', start[0])
except utils.UnknownMessage:
self.add_message('bad-option-value', args=msgid, line=start[0])
else:
self.add_message('unrecognized-inline-option', args=opt, line=start[0])
# code checking methods ###################################################
def get_checkers(self):
"""return all available checkers as a list"""
return [self] + [c for _checkers in six.itervalues(self._checkers)
for c in _checkers if c is not self]
def prepare_checkers(self):
"""return checkers needed for activated messages and reports"""
if not self.config.reports:
self.disable_reporters()
# get needed checkers
neededcheckers = [self]
for checker in self.get_checkers()[1:]:
# fatal errors should not trigger enable / disabling a checker
messages = set(msg for msg in checker.msgs
if msg[0] != 'F' and self.is_message_enabled(msg))
if (messages or
any(self.report_is_enabled(r[0]) for r in checker.reports)):
neededcheckers.append(checker)
# Sort checkers by priority
neededcheckers = sorted(neededcheckers,
key=operator.attrgetter('priority'),
reverse=True)
return neededcheckers
def should_analyze_file(self, modname, path): # pylint: disable=unused-argument, no-self-use
"""Returns whether or not a module should be checked.
This implementation returns True for all python source file, indicating
that all files should be linted.
Subclasses may override this method to indicate that modules satisfying
certain conditions should not be linted.
:param str modname: The name of the module to be checked.
:param str path: The full path to the source code of the module.
:returns: True if the module should be checked.
:rtype: bool
"""
return path.endswith('.py')
def check(self, files_or_modules):
"""main checking entry: check a list of files or modules from their
name.
"""
# initialize msgs_state now that all messages have been registered into
# the store
for msg in self.msgs_store.messages:
if not msg.may_be_emitted():
self._msgs_state[msg.msgid] = False
if not isinstance(files_or_modules, (list, tuple)):
files_or_modules = (files_or_modules,)
if self.config.jobs == 1:
self._do_check(files_or_modules)
else:
with _patch_sysmodules():
self._parallel_check(files_or_modules)
def _get_jobs_config(self):
child_config = {}
filter_options = {'symbols', 'include-ids', 'long-help'}
filter_options.update((opt_name for opt_name, _ in self._external_opts))
for opt_providers in six.itervalues(self._all_options):
for optname, optdict, val in opt_providers.options_and_values():
if optdict.get('deprecated'):
continue
if optname not in filter_options:
child_config[optname] = utils._format_option_value(
optdict, val)
child_config['python3_porting_mode'] = self._python3_porting_mode
child_config['plugins'] = self._dynamic_plugins
return child_config
def _parallel_task(self, files_or_modules):
# Prepare configuration for child linters.
child_config = self._get_jobs_config()
children = []
manager = multiprocessing.Manager()
tasks_queue = manager.Queue()
results_queue = manager.Queue()
for _ in range(self.config.jobs):
child_linter = ChildLinter(args=(tasks_queue, results_queue,
child_config))
child_linter.start()
children.append(child_linter)
# Send files to child linters.
expanded_files = self.expand_files(files_or_modules)
for files_or_module in expanded_files:
path = files_or_module['path']
tasks_queue.put([path])
# collect results from child linters
failed = False
for _ in expanded_files:
try:
result = results_queue.get()
except Exception as ex:
print("internal error while receiving results from child linter",
file=sys.stderr)
print(ex, file=sys.stderr)
failed = True
break
yield result
# Stop child linters and wait for their completion.
for _ in range(self.config.jobs):
tasks_queue.put('STOP')
for child in children:
child.join()
if failed:
print("Error occured, stopping the linter.", file=sys.stderr)
sys.exit(32)
def _parallel_check(self, files_or_modules):
# Reset stats.
self.open()
all_stats = []
module = None
for result in self._parallel_task(files_or_modules):
(
_,
self.file_state.base_name,
module,
messages,
stats,
msg_status
) = result
for msg in messages:
msg = utils.Message(*msg)
self.set_current_module(module)
self.reporter.handle_message(msg)
all_stats.append(stats)
self.msg_status |= msg_status
self.stats = _merge_stats(all_stats)
self.current_name = module
# Insert stats data to local checkers.
for checker in self.get_checkers():
if checker is not self:
checker.stats = self.stats
def _do_check(self, files_or_modules):
walker = utils.PyLintASTWalker(self)
_checkers = self.prepare_checkers()
tokencheckers = [c for c in _checkers
if interfaces.implements(c, interfaces.ITokenChecker)
and c is not self]
rawcheckers = [c for c in _checkers
if interfaces.implements(c, interfaces.IRawChecker)]
# notify global begin
for checker in _checkers:
checker.open()
if interfaces.implements(checker, interfaces.IAstroidChecker):
walker.add_checker(checker)
# build ast and check modules or packages
for descr in self.expand_files(files_or_modules):
modname, filepath = descr['name'], descr['path']
if not descr['isarg'] and not self.should_analyze_file(modname, filepath):
continue
if self.config.files_output:
reportfile = 'pylint_%s.%s' % (modname, self.reporter.extension)
self.reporter.set_output(open(reportfile, 'w'))
self.set_current_module(modname, filepath)
# get the module representation
ast_node = self.get_ast(filepath, modname)
if ast_node is None:
continue
# XXX to be correct we need to keep module_msgs_state for every
# analyzed module (the problem stands with localized messages which
# are only detected in the .close step)
self.file_state = utils.FileState(descr['basename'])
self._ignore_file = False
# fix the current file (if the source file was not available or
# if it's actually a c extension)
self.current_file = ast_node.file # pylint: disable=maybe-no-member
self.check_astroid_module(ast_node, walker, rawcheckers, tokencheckers)
# warn about spurious inline messages handling
spurious_messages = self.file_state.iter_spurious_suppression_messages(self.msgs_store)
for msgid, line, args in spurious_messages:
self.add_message(msgid, line, None, args)
# notify global end
self.stats['statement'] = walker.nbstatements
for checker in reversed(_checkers):
checker.close()
def expand_files(self, modules):
"""get modules and errors from a list of modules and handle errors
"""
result, errors = utils.expand_modules(modules, self.config.black_list)
for error in errors:
message = modname = error["mod"]
key = error["key"]
self.set_current_module(modname)
if key == "fatal":
message = str(error["ex"]).replace(os.getcwd() + os.sep, '')
self.add_message(key, args=message)
return result
def set_current_module(self, modname, filepath=None):
"""set the name of the currently analyzed module and
init statistics for it
"""
if not modname and filepath is None:
return
self.reporter.on_set_current_module(modname, filepath)
self.current_name = modname
self.current_file = filepath or modname
self.stats['by_module'][modname] = {}
self.stats['by_module'][modname]['statement'] = 0
for msg_cat in six.itervalues(utils.MSG_TYPES):
self.stats['by_module'][modname][msg_cat] = 0
def get_ast(self, filepath, modname):
"""return a ast(roid) representation for a module"""
try:
return MANAGER.ast_from_file(filepath, modname, source=True)
except astroid.AstroidBuildingException as ex:
if isinstance(ex.args[0], SyntaxError):
ex = ex.args[0]
self.add_message('syntax-error',
line=ex.lineno or 0,
args=ex.msg)
else:
self.add_message('parse-error', args=ex)
except Exception as ex: # pylint: disable=broad-except
import traceback
traceback.print_exc()
self.add_message('astroid-error', args=(ex.__class__, ex))
def check_astroid_module(self, ast_node, walker,
rawcheckers, tokencheckers):
"""Check a module from its astroid representation."""
try:
tokens = utils.tokenize_module(ast_node)
except tokenize.TokenError as ex:
self.add_message('syntax-error', line=ex.args[1][0], args=ex.args[0])
return
if not ast_node.pure_python:
self.add_message('raw-checker-failed', args=ast_node.name)
else:
#assert astroid.file.endswith('.py')
# invoke ITokenChecker interface on self to fetch module/block
# level options
self.process_tokens(tokens)
if self._ignore_file:
return False
# walk ast to collect line numbers
self.file_state.collect_block_lines(self.msgs_store, ast_node)
# run raw and tokens checkers
for checker in rawcheckers:
checker.process_module(ast_node)
for checker in tokencheckers:
checker.process_tokens(tokens)
# generate events to astroid checkers
walker.walk(ast_node)
return True
# IAstroidChecker interface #################################################
def open(self):
"""initialize counters"""
self.stats = {'by_module' : {},
'by_msg' : {},
}
MANAGER.optimize_ast = self.config.optimize_ast
MANAGER.always_load_extensions = self.config.unsafe_load_any_extension
MANAGER.extension_package_whitelist.update(
self.config.extension_pkg_whitelist)
for msg_cat in six.itervalues(utils.MSG_TYPES):
self.stats[msg_cat] = 0
def generate_reports(self):
"""close the whole package /module, it's time to make reports !
if persistent run, pickle results for later comparison
"""
# Display whatever messages are left on the reporter.
self.reporter.display_messages(report_nodes.Section())
if self.file_state.base_name is not None:
# load previous results if any
previous_stats = config.load_results(self.file_state.base_name)
# XXX code below needs refactoring to be more reporter agnostic
self.reporter.on_close(self.stats, previous_stats)
if self.config.reports:
sect = self.make_reports(self.stats, previous_stats)
if self.config.files_output:
filename = 'pylint_global.' + self.reporter.extension
self.reporter.set_output(open(filename, 'w'))
else:
sect = report_nodes.Section()
if self.config.reports:
self.reporter.display_reports(sect)
# save results if persistent run
if self.config.persistent:
config.save_results(self.stats, self.file_state.base_name)
else:
self.reporter.on_close(self.stats, {})
# specific reports ########################################################
def report_evaluation(self, sect, stats, previous_stats):
"""make the global evaluation report"""
# check with at least check 1 statements (usually 0 when there is a
# syntax error preventing pylint from further processing)
if stats['statement'] == 0:
raise utils.EmptyReport()
# get a global note for the code
evaluation = self.config.evaluation
try:
note = eval(evaluation, {}, self.stats) # pylint: disable=eval-used
except Exception as ex: # pylint: disable=broad-except
msg = 'An exception occurred while rating: %s' % ex
else:
stats['global_note'] = note
msg = 'Your code has been rated at %.2f/10' % note
pnote = previous_stats.get('global_note')
if pnote is not None:
msg += ' (previous run: %.2f/10, %+.2f)' % (pnote, note - pnote)
sect.append(report_nodes.Text(msg))
# some reporting functions ####################################################
def report_total_messages_stats(sect, stats, previous_stats):
"""make total errors / warnings report"""
lines = ['type', 'number', 'previous', 'difference']
lines += checkers.table_lines_from_stats(stats, previous_stats,
('convention', 'refactor',
'warning', 'error'))
sect.append(report_nodes.Table(children=lines, cols=4, rheaders=1))
def report_messages_stats(sect, stats, _):
"""make messages type report"""
if not stats['by_msg']:
# don't print this report when we didn't detected any errors
raise utils.EmptyReport()
in_order = sorted([(value, msg_id)
for msg_id, value in six.iteritems(stats['by_msg'])
if not msg_id.startswith('I')])
in_order.reverse()
lines = ('message id', 'occurrences')
for value, msg_id in in_order:
lines += (msg_id, str(value))
sect.append(report_nodes.Table(children=lines, cols=2, rheaders=1))
def report_messages_by_module_stats(sect, stats, _):
"""make errors / warnings by modules report"""
if len(stats['by_module']) == 1:
# don't print this report when we are analysing a single module
raise utils.EmptyReport()
by_mod = collections.defaultdict(dict)
for m_type in ('fatal', 'error', 'warning', 'refactor', 'convention'):
total = stats[m_type]
for module in six.iterkeys(stats['by_module']):
mod_total = stats['by_module'][module][m_type]
if total == 0:
percent = 0
else:
percent = float((mod_total)*100) / total
by_mod[module][m_type] = percent
sorted_result = []
for module, mod_info in six.iteritems(by_mod):
sorted_result.append((mod_info['error'],
mod_info['warning'],
mod_info['refactor'],
mod_info['convention'],
module))
sorted_result.sort()
sorted_result.reverse()
lines = ['module', 'error', 'warning', 'refactor', 'convention']
for line in sorted_result:
# Don't report clean modules.
if all(entry == 0 for entry in line[:-1]):
continue
lines.append(line[-1])
for val in line[:-1]:
lines.append('%.2f' % val)
if len(lines) == 5:
raise utils.EmptyReport()
sect.append(report_nodes.Table(children=lines, cols=5, rheaders=1))
# utilities ###################################################################
class ArgumentPreprocessingError(Exception):
"""Raised if an error occurs during argument preprocessing."""
def preprocess_options(args, search_for):
"""look for some options (keys of <search_for>) which have to be processed
before others
values of <search_for> are callback functions to call when the option is
found
"""
i = 0
while i < len(args):
arg = args[i]
if arg.startswith('--'):
try:
option, val = arg[2:].split('=', 1)
except ValueError:
option, val = arg[2:], None
try:
cb, takearg = search_for[option]
except KeyError:
i += 1
else:
del args[i]
if takearg and val is None:
if i >= len(args) or args[i].startswith('-'):
msg = 'Option %s expects a value' % option
raise ArgumentPreprocessingError(msg)
val = args[i]
del args[i]
elif not takearg and val is not None:
msg = "Option %s doesn't expects a value" % option
raise ArgumentPreprocessingError(msg)
cb(option, val)
else:
i += 1
@contextlib.contextmanager
def fix_import_path(args):
"""Prepare sys.path for running the linter checks.
Within this context, each of the given arguments is importable.
Paths are added to sys.path in corresponding order to the arguments.
We avoid adding duplicate directories to sys.path.
`sys.path` is reset to its original value upon exitign this context.
"""
orig = list(sys.path)
changes = []
for arg in args:
path = _get_python_path(arg)
if path in changes:
continue
else:
changes.append(path)
sys.path[:] = changes + sys.path
try:
yield
finally:
sys.path[:] = orig
class Run(object):
"""helper class to use as main for pylint :
run(*sys.argv[1:])
"""
LinterClass = PyLinter
option_groups = (
('Commands', 'Options which are actually commands. Options in this \
group are mutually exclusive.'),
)
def __init__(self, args, reporter=None, exit=True):
self._rcfile = None
self._plugins = []
try:
preprocess_options(args, {
# option: (callback, takearg)
'init-hook': (cb_init_hook, True),
'rcfile': (self.cb_set_rcfile, True),
'load-plugins': (self.cb_add_plugins, True),
})
except ArgumentPreprocessingError as ex:
print(ex, file=sys.stderr)
sys.exit(32)
self.linter = linter = self.LinterClass((
('rcfile',
{'action' : 'callback', 'callback' : lambda *args: 1,
'type': 'string', 'metavar': '<file>',
'help' : 'Specify a configuration file.'}),
('init-hook',
{'action' : 'callback', 'callback' : lambda *args: 1,
'type' : 'string', 'metavar': '<code>',
'level': 1,
'help' : 'Python code to execute, usually for sys.path '
'manipulation such as pygtk.require().'}),
('help-msg',
{'action' : 'callback', 'type' : 'string', 'metavar': '<msg-id>',
'callback' : self.cb_help_message,
'group': 'Commands',
'help' : 'Display a help message for the given message id and '
'exit. The value may be a comma separated list of message ids.'}),
('list-msgs',
{'action' : 'callback', 'metavar': '<msg-id>',
'callback' : self.cb_list_messages,
'group': 'Commands', 'level': 1,
'help' : "Generate pylint's messages."}),
('list-conf-levels',
{'action' : 'callback',
'callback' : cb_list_confidence_levels,
'group': 'Commands', 'level': 1,
'help' : "Generate pylint's messages."}),
('full-documentation',
{'action' : 'callback', 'metavar': '<msg-id>',
'callback' : self.cb_full_documentation,
'group': 'Commands', 'level': 1,
'help' : "Generate pylint's full documentation."}),
('generate-rcfile',
{'action' : 'callback', 'callback' : self.cb_generate_config,
'group': 'Commands',
'help' : 'Generate a sample configuration file according to '
'the current configuration. You can put other options '
'before this one to get them in the generated '
'configuration.'}),
('generate-man',
{'action' : 'callback', 'callback' : self.cb_generate_manpage,
'group': 'Commands',
'help' : "Generate pylint's man page.", 'hide': True}),
('errors-only',
{'action' : 'callback', 'callback' : self.cb_error_mode,
'short': 'E',
'help' : 'In error mode, checkers without error messages are '
'disabled and for others, only the ERROR messages are '
'displayed, and no reports are done by default'''}),
('py3k',
{'action' : 'callback', 'callback' : self.cb_python3_porting_mode,
'help' : 'In Python 3 porting mode, all checkers will be '
'disabled and only messages emitted by the porting '
'checker will be displayed'}),
('profile', utils.deprecated_option(opt_type='yn')),
), option_groups=self.option_groups, pylintrc=self._rcfile)
# register standard checkers
linter.load_default_plugins()
# load command line plugins
linter.load_plugin_modules(self._plugins)
# add some help section
linter.add_help_section('Environment variables', config.ENV_HELP, level=1)
# pylint: disable=bad-continuation
linter.add_help_section('Output',
'Using the default text output, the message format is : \n'
' \n'
' MESSAGE_TYPE: LINE_NUM:[OBJECT:] MESSAGE \n'
' \n'
'There are 5 kind of message types : \n'
' * (C) convention, for programming standard violation \n'
' * (R) refactor, for bad code smell \n'
' * (W) warning, for python specific problems \n'
' * (E) error, for probable bugs in the code \n'
' * (F) fatal, if an error occurred which prevented pylint from doing further\n'
'processing.\n'
, level=1)
linter.add_help_section('Output status code',
'Pylint should leave with following status code: \n'
' * 0 if everything went fine \n'
' * 1 if a fatal message was issued \n'
' * 2 if an error message was issued \n'
' * 4 if a warning message was issued \n'
' * 8 if a refactor message was issued \n'
' * 16 if a convention message was issued \n'
' * 32 on usage error \n'
' \n'
'status 1 to 16 will be bit-ORed so you can know which different categories has\n'
'been issued by analysing pylint output status code\n',
level=1)
# read configuration
linter.disable('suppressed-message')
linter.disable('useless-suppression')
linter.read_config_file()
config_parser = linter.cfgfile_parser
# run init hook, if present, before loading plugins
if config_parser.has_option('MASTER', 'init-hook'):
cb_init_hook('init-hook',
utils._unquote(config_parser.get('MASTER',
'init-hook')))
# is there some additional plugins in the file configuration, in
if config_parser.has_option('MASTER', 'load-plugins'):
plugins = utils._splitstrip(
config_parser.get('MASTER', 'load-plugins'))
linter.load_plugin_modules(plugins)
# now we can load file config and command line, plugins (which can
# provide options) have been registered
linter.load_config_file()
if reporter:
# if a custom reporter is provided as argument, it may be overridden
# by file parameters, so re-set it here, but before command line
# parsing so it's still overrideable by command line option
linter.set_reporter(reporter)
try:
args = linter.load_command_line_configuration(args)
except SystemExit as exc:
if exc.code == 2: # bad options
exc.code = 32
raise
if not args:
print(linter.help())
sys.exit(32)
if linter.config.jobs < 0:
print("Jobs number (%d) should be greater than 0"
% linter.config.jobs, file=sys.stderr)
sys.exit(32)
if linter.config.jobs > 1 or linter.config.jobs == 0:
if multiprocessing is None:
print("Multiprocessing library is missing, "
"fallback to single process", file=sys.stderr)
linter.set_option("jobs", 1)
else:
if linter.config.jobs == 0:
linter.config.jobs = multiprocessing.cpu_count()
# insert current working directory to the python path to have a correct
# behaviour
with fix_import_path(args):
linter.check(args)
linter.generate_reports()
if exit:
sys.exit(self.linter.msg_status)
def cb_set_rcfile(self, name, value):
"""callback for option preprocessing (i.e. before option parsing)"""
self._rcfile = value
def cb_add_plugins(self, name, value):
"""callback for option preprocessing (i.e. before option parsing)"""
self._plugins.extend(utils._splitstrip(value))
def cb_error_mode(self, *args, **kwargs):
"""error mode:
* disable all but error messages
* disable the 'miscellaneous' checker which can be safely deactivated in
debug
* disable reports
* do not save execution information
"""
self.linter.error_mode()
def cb_generate_config(self, *args, **kwargs):
"""optik callback for sample config file generation"""
self.linter.generate_config(skipsections=('COMMANDS',))
sys.exit(0)
def cb_generate_manpage(self, *args, **kwargs):
"""optik callback for sample config file generation"""
from pylint import __pkginfo__
self.linter.generate_manpage(__pkginfo__)
sys.exit(0)
def cb_help_message(self, option, optname, value, parser):
"""optik callback for printing some help about a particular message"""
self.linter.msgs_store.help_message(utils._splitstrip(value))
sys.exit(0)
def cb_full_documentation(self, option, optname, value, parser):
"""optik callback for printing full documentation"""
self.linter.print_full_documentation()
sys.exit(0)
def cb_list_messages(self, option, optname, value, parser): # FIXME
"""optik callback for printing available messages"""
self.linter.msgs_store.list_messages()
sys.exit(0)
def cb_python3_porting_mode(self, *args, **kwargs):
"""Activate only the python3 porting checker."""
self.linter.python3_porting_mode()
def cb_list_confidence_levels(option, optname, value, parser):
for level in interfaces.CONFIDENCE_LEVELS:
print('%-18s: %s' % level)
sys.exit(0)
def cb_init_hook(optname, value):
"""exec arbitrary code to set sys.path for instance"""
exec(value) # pylint: disable=exec-used
if __name__ == '__main__':
Run(sys.argv[1:])
|
si618/pi-time
|
node_modules/grunt-pylint/tasks/lib/pylint/lint.py
|
Python
|
gpl-3.0
| 57,921
|
[
"VisIt"
] |
0d4b401aaa1e0062755cdf88529d5d16384dbdc143a4a6f8de523ca19c74c5ce
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Jerome Rapinat
# Copyright (C) 2008 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .._hasnotebase import HasNoteBase
#-------------------------------------------------------------------------
# "Events having notes"
#-------------------------------------------------------------------------
class HasNote(HasNoteBase):
"""Events having notes"""
name = _('Events having <count> notes')
description = _("Matches events having a certain number of notes")
|
prculley/gramps
|
gramps/gen/filters/rules/event/_hasnote.py
|
Python
|
gpl-2.0
| 1,754
|
[
"Brian"
] |
9a773b3860743a371eb7b61e8ebf6f8d636c7ff4ee5b4ec6535beec844e9f576
|
# DNSChef is a highly configurable DNS Proxy for Penetration Testers
# and Malware Analysts. Please visit http://thesprawl.org/projects/dnschef/
# for the latest version and documentation. Please forward all issues and
# concerns to iphelix [at] thesprawl.org.
# Copyright (C) 2015 Peter Kacherginsky, Marcello Salvati
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import threading, random, operator, time
import SocketServer, socket, sys, os
import binascii
import string
import base64
import time
import logging
from configobj import ConfigObj
from core.configwatcher import ConfigWatcher
from core.utils import shutdown
from core.logger import logger
from dnslib import *
from IPy import IP
formatter = logging.Formatter("%(asctime)s %(clientip)s [DNS] %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
log = logger().setup_logger("DNSChef", formatter)
dnslog = logging.getLogger('dnslog')
handler = logging.FileHandler('./logs/dns/dns.log',)
handler.setFormatter(formatter)
dnslog.addHandler(handler)
dnslog.setLevel(logging.INFO)
# DNSHandler Mixin. The class contains generic functions to parse DNS requests and
# calculate an appropriate response based on user parameters.
class DNSHandler():
def parse(self,data):
nametodns = DNSChef().nametodns
nameservers = DNSChef().nameservers
hsts = DNSChef().hsts
hstsconfig = DNSChef().real_records
server_address = DNSChef().server_address
clientip = {"clientip": self.client_address[0]}
response = ""
try:
# Parse data as DNS
d = DNSRecord.parse(data)
except Exception as e:
log.info("Error: invalid DNS request", extra=clientip)
dnslog.info("Error: invalid DNS request", extra=clientip)
else:
# Only Process DNS Queries
if QR[d.header.qr] == "QUERY":
# Gather query parameters
# NOTE: Do not lowercase qname here, because we want to see
# any case request weirdness in the logs.
qname = str(d.q.qname)
# Chop off the last period
if qname[-1] == '.': qname = qname[:-1]
qtype = QTYPE[d.q.qtype]
# Find all matching fake DNS records for the query name or get False
fake_records = dict()
for record in nametodns:
fake_records[record] = self.findnametodns(qname, nametodns[record])
if hsts:
if qname in hstsconfig:
response = self.hstsbypass(hstsconfig[qname], qname, nameservers, d)
return response
elif qname[:4] == 'wwww':
response = self.hstsbypass(qname[1:], qname, nameservers, d)
return response
elif qname[:3] == 'web':
response = self.hstsbypass(qname[3:], qname, nameservers, d)
return response
# Check if there is a fake record for the current request qtype
if qtype in fake_records and fake_records[qtype]:
fake_record = fake_records[qtype]
# Create a custom response to the query
response = DNSRecord(DNSHeader(id=d.header.id, bitmap=d.header.bitmap, qr=1, aa=1, ra=1), q=d.q)
log.info("Cooking the response of type '{}' for {} to {}".format(qtype, qname, fake_record), extra=clientip)
dnslog.info("Cooking the response of type '{}' for {} to {}".format(qtype, qname, fake_record), extra=clientip)
# IPv6 needs additional work before inclusion:
if qtype == "AAAA":
ipv6 = IP(fake_record)
ipv6_bin = ipv6.strBin()
ipv6_hex_tuple = [int(ipv6_bin[i:i+8],2) for i in xrange(0,len(ipv6_bin),8)]
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](ipv6_hex_tuple)))
elif qtype == "SOA":
mname,rname,t1,t2,t3,t4,t5 = fake_record.split(" ")
times = tuple([int(t) for t in [t1,t2,t3,t4,t5]])
# dnslib doesn't like trailing dots
if mname[-1] == ".": mname = mname[:-1]
if rname[-1] == ".": rname = rname[:-1]
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](mname,rname,times)))
elif qtype == "NAPTR":
order,preference,flags,service,regexp,replacement = fake_record.split(" ")
order = int(order)
preference = int(preference)
# dnslib doesn't like trailing dots
if replacement[-1] == ".": replacement = replacement[:-1]
response.add_answer( RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](order,preference,flags,service,regexp,DNSLabel(replacement))) )
elif qtype == "SRV":
priority, weight, port, target = fake_record.split(" ")
priority = int(priority)
weight = int(weight)
port = int(port)
if target[-1] == ".": target = target[:-1]
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](priority, weight, port, target) ))
elif qtype == "DNSKEY":
flags, protocol, algorithm, key = fake_record.split(" ")
flags = int(flags)
protocol = int(protocol)
algorithm = int(algorithm)
key = base64.b64decode(("".join(key)).encode('ascii'))
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](flags, protocol, algorithm, key) ))
elif qtype == "RRSIG":
covered, algorithm, labels, orig_ttl, sig_exp, sig_inc, key_tag, name, sig = fake_record.split(" ")
covered = getattr(QTYPE,covered) # NOTE: Covered QTYPE
algorithm = int(algorithm)
labels = int(labels)
orig_ttl = int(orig_ttl)
sig_exp = int(time.mktime(time.strptime(sig_exp +'GMT',"%Y%m%d%H%M%S%Z")))
sig_inc = int(time.mktime(time.strptime(sig_inc +'GMT',"%Y%m%d%H%M%S%Z")))
key_tag = int(key_tag)
if name[-1] == '.': name = name[:-1]
sig = base64.b64decode(("".join(sig)).encode('ascii'))
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](covered, algorithm, labels,orig_ttl, sig_exp, sig_inc, key_tag, name, sig)))
else:
# dnslib doesn't like trailing dots
if fake_record[-1] == ".": fake_record = fake_record[:-1]
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](fake_record)))
response = response.pack()
elif qtype == "*" and not None in fake_records.values():
log.info("Cooking the response of type '{}' for {} with {}".format("ANY", qname, "all known fake records."), extra=clientip)
dnslog.info("Cooking the response of type '{}' for {} with {}".format("ANY", qname, "all known fake records."), extra=clientip)
response = DNSRecord(DNSHeader(id=d.header.id, bitmap=d.header.bitmap,qr=1, aa=1, ra=1), q=d.q)
for qtype,fake_record in fake_records.items():
if fake_record:
# NOTE: RDMAP is a dictionary map of qtype strings to handling classses
# IPv6 needs additional work before inclusion:
if qtype == "AAAA":
ipv6 = IP(fake_record)
ipv6_bin = ipv6.strBin()
fake_record = [int(ipv6_bin[i:i+8],2) for i in xrange(0,len(ipv6_bin),8)]
elif qtype == "SOA":
mname,rname,t1,t2,t3,t4,t5 = fake_record.split(" ")
times = tuple([int(t) for t in [t1,t2,t3,t4,t5]])
# dnslib doesn't like trailing dots
if mname[-1] == ".": mname = mname[:-1]
if rname[-1] == ".": rname = rname[:-1]
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](mname,rname,times)))
elif qtype == "NAPTR":
order,preference,flags,service,regexp,replacement = fake_record.split(" ")
order = int(order)
preference = int(preference)
# dnslib doesn't like trailing dots
if replacement and replacement[-1] == ".": replacement = replacement[:-1]
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](order,preference,flags,service,regexp,replacement)))
elif qtype == "SRV":
priority, weight, port, target = fake_record.split(" ")
priority = int(priority)
weight = int(weight)
port = int(port)
if target[-1] == ".": target = target[:-1]
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](priority, weight, port, target) ))
elif qtype == "DNSKEY":
flags, protocol, algorithm, key = fake_record.split(" ")
flags = int(flags)
protocol = int(protocol)
algorithm = int(algorithm)
key = base64.b64decode(("".join(key)).encode('ascii'))
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](flags, protocol, algorithm, key) ))
elif qtype == "RRSIG":
covered, algorithm, labels, orig_ttl, sig_exp, sig_inc, key_tag, name, sig = fake_record.split(" ")
covered = getattr(QTYPE,covered) # NOTE: Covered QTYPE
algorithm = int(algorithm)
labels = int(labels)
orig_ttl = int(orig_ttl)
sig_exp = int(time.mktime(time.strptime(sig_exp +'GMT',"%Y%m%d%H%M%S%Z")))
sig_inc = int(time.mktime(time.strptime(sig_inc +'GMT',"%Y%m%d%H%M%S%Z")))
key_tag = int(key_tag)
if name[-1] == '.': name = name[:-1]
sig = base64.b64decode(("".join(sig)).encode('ascii'))
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](covered, algorithm, labels,orig_ttl, sig_exp, sig_inc, key_tag, name, sig) ))
else:
# dnslib doesn't like trailing dots
if fake_record[-1] == ".": fake_record = fake_record[:-1]
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](fake_record)))
response = response.pack()
# Proxy the request
else:
log.debug("Proxying the response of type '{}' for {}".format(qtype, qname), extra=clientip)
dnslog.info("Proxying the response of type '{}' for {}".format(qtype, qname), extra=clientip)
nameserver_tuple = random.choice(nameservers).split('#')
response = self.proxyrequest(data, *nameserver_tuple)
return response
# Find appropriate ip address to use for a queried name. The function can
def findnametodns(self,qname,nametodns):
# Make qname case insensitive
qname = qname.lower()
# Split and reverse qname into components for matching.
qnamelist = qname.split('.')
qnamelist.reverse()
# HACK: It is important to search the nametodns dictionary before iterating it so that
# global matching ['*.*.*.*.*.*.*.*.*.*'] will match last. Use sorting for that.
for domain,host in sorted(nametodns.iteritems(), key=operator.itemgetter(1)):
# NOTE: It is assumed that domain name was already lowercased
# when it was loaded through --file, --fakedomains or --truedomains
# don't want to waste time lowercasing domains on every request.
# Split and reverse domain into components for matching
domain = domain.split('.')
domain.reverse()
# Compare domains in reverse.
for a,b in map(None,qnamelist,domain):
if a != b and b != "*":
break
else:
# Could be a real IP or False if we are doing reverse matching with 'truedomains'
return host
else:
return False
# Obtain a response from a real DNS server.
def proxyrequest(self, request, host, port="53", protocol="udp"):
clientip = {'clientip': self.client_address[0]}
reply = None
try:
if DNSChef().ipv6:
if protocol == "udp":
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
elif protocol == "tcp":
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
if protocol == "udp":
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
elif protocol == "tcp":
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(3.0)
# Send the proxy request to a randomly chosen DNS server
if protocol == "udp":
sock.sendto(request, (host, int(port)))
reply = sock.recv(1024)
sock.close()
elif protocol == "tcp":
sock.connect((host, int(port)))
# Add length for the TCP request
length = binascii.unhexlify("%04x" % len(request))
sock.sendall(length+request)
# Strip length from the response
reply = sock.recv(1024)
reply = reply[2:]
sock.close()
except Exception as e:
log.warning("Could not proxy request: {}".format(e), extra=clientip)
dnslog.info("Could not proxy request: {}".format(e), extra=clientip)
else:
return reply
def hstsbypass(self, real_domain, fake_domain, nameservers, d):
clientip = {'clientip': self.client_address[0]}
log.info("Resolving '{}' to '{}' for HSTS bypass".format(fake_domain, real_domain), extra=clientip)
dnslog.info("Resolving '{}' to '{}' for HSTS bypass".format(fake_domain, real_domain), extra=clientip)
response = DNSRecord(DNSHeader(id=d.header.id, bitmap=d.header.bitmap, qr=1, aa=1, ra=1), q=d.q)
nameserver_tuple = random.choice(nameservers).split('#')
#First proxy the request with the real domain
q = DNSRecord.question(real_domain).pack()
r = self.proxyrequest(q, *nameserver_tuple)
if r is None: return None
#Parse the answer
dns_rr = DNSRecord.parse(r).rr
#Create the DNS response
for res in dns_rr:
if res.get_rname() == real_domain:
res.set_rname(fake_domain)
response.add_answer(res)
else:
response.add_answer(res)
return response.pack()
# UDP DNS Handler for incoming requests
class UDPHandler(DNSHandler, SocketServer.BaseRequestHandler):
def handle(self):
(data,socket) = self.request
response = self.parse(data)
if response:
socket.sendto(response, self.client_address)
# TCP DNS Handler for incoming requests
class TCPHandler(DNSHandler, SocketServer.BaseRequestHandler):
def handle(self):
data = self.request.recv(1024)
# Remove the addition "length" parameter used in the
# TCP DNS protocol
data = data[2:]
response = self.parse(data)
if response:
# Calculate and add the additional "length" parameter
# used in TCP DNS protocol
length = binascii.unhexlify("%04x" % len(response))
self.request.sendall(length+response)
class ThreadedUDPServer(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
# Override SocketServer.UDPServer to add extra parameters
def __init__(self, server_address, RequestHandlerClass):
self.address_family = socket.AF_INET6 if DNSChef().ipv6 else socket.AF_INET
SocketServer.UDPServer.__init__(self,server_address,RequestHandlerClass)
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
# Override default value
allow_reuse_address = True
# Override SocketServer.TCPServer to add extra parameters
def __init__(self, server_address, RequestHandlerClass):
self.address_family = socket.AF_INET6 if DNSChef().ipv6 else socket.AF_INET
SocketServer.TCPServer.__init__(self,server_address,RequestHandlerClass)
class DNSChef(ConfigWatcher):
version = "0.4"
tcp = False
ipv6 = False
hsts = False
real_records = {}
nametodns = {}
server_address = "0.0.0.0"
nameservers = ["8.8.8.8"]
port = 53
__shared_state = {}
def __init__(self):
self.__dict__ = self.__shared_state
def on_config_change(self):
config = self.config['MITMf']['DNS']
self.port = int(config['port'])
# Main storage of domain filters
# NOTE: RDMAP is a dictionary map of qtype strings to handling classe
for qtype in RDMAP.keys():
self.nametodns[qtype] = dict()
# Adjust defaults for IPv6
if config['ipv6'].lower() == 'on':
self.ipv6 = True
if config['nameservers'] == "8.8.8.8":
self.nameservers = "2001:4860:4860::8888"
# Use alternative DNS servers
if config['nameservers']:
self.nameservers = []
if type(config['nameservers']) is str:
self.nameservers.append(config['nameservers'])
elif type(config['nameservers']) is list:
self.nameservers = config['nameservers']
for section in config.sections:
if section in self.nametodns:
for domain,record in config[section].iteritems():
# Make domain case insensitive
domain = domain.lower()
self.nametodns[section][domain] = record
for k,v in self.config["SSLstrip+"].iteritems():
self.real_records[v] = k
def setHstsBypass(self):
self.hsts = True
def start(self):
self.on_config_change()
self.start_config_watch()
try:
if self.config['MITMf']['DNS']['tcp'].lower() == 'on':
self.startTCP()
else:
self.startUDP()
except socket.error as e:
if "Address already in use" in e:
shutdown("\n[DNS] Unable to start DNS server on port {}: port already in use".format(self.config['MITMf']['DNS']['port']))
# Initialize and start the DNS Server
def startUDP(self):
server = ThreadedUDPServer((self.server_address, int(self.port)), UDPHandler)
# Start a thread with the server -- that thread will then start
# more threads for each request
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
# Initialize and start the DNS Server
def startTCP(self):
server = ThreadedTCPServer((self.server_address, int(self.port)), TCPHandler)
# Start a thread with the server -- that thread will then start
# more threads for each request
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
|
ru-faraon/MITMf
|
core/servers/DNS.py
|
Python
|
gpl-3.0
| 22,886
|
[
"VisIt"
] |
fc91c37175f6d2e2169592fb0703ba6ade739154eabd72ac5c314f77e9391a0c
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops.bijectors import AffineLinearOperator
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import normal
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.util import deprecation
__all__ = [
"MultivariateNormalLinearOperator",
]
_mvn_sample_note = """
`value` is a batch vector with compatible shape if `value` is a `Tensor` whose
shape can be broadcast up to either:
```python
self.batch_shape + self.event_shape
```
or
```python
[M1, ..., Mm] + self.batch_shape + self.event_shape
```
"""
# TODO(b/35290280): Import in `../../__init__.py` after adding unit-tests.
class MultivariateNormalLinearOperator(
transformed_distribution.TransformedDistribution):
"""The multivariate normal distribution on `R^k`.
The Multivariate Normal distribution is defined over `R^k` and parameterized
by a (batch of) length-`k` `loc` vector (aka "mu") and a (batch of) `k x k`
`scale` matrix; `covariance = scale @ scale.T`, where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-0.5 ||y||**2) / Z,
y = inv(scale) @ (x - loc),
Z = (2 pi)**(0.5 k) |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||**2` denotes the squared Euclidean norm of `y`.
The MultivariateNormal distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ MultivariateNormal(loc=0, scale=1) # Identity scale, zero shift.
Y = scale @ X + loc
```
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3-variate Gaussian.
mu = [1., 2, 3]
cov = [[ 0.36, 0.12, 0.06],
[ 0.12, 0.29, -0.13],
[ 0.06, -0.13, 0.26]]
scale = tf.cholesky(cov)
# ==> [[ 0.6, 0. , 0. ],
# [ 0.2, 0.5, 0. ],
# [ 0.1, -0.3, 0.4]])
mvn = tfd.MultivariateNormalLinearOperator(
loc=mu,
scale=tf.linalg.LinearOperatorLowerTriangular(scale))
# Covariance agrees with cholesky(cov) parameterization.
mvn.covariance().eval()
# ==> [[ 0.36, 0.12, 0.06],
# [ 0.12, 0.29, -0.13],
# [ 0.06, -0.13, 0.26]]
# Compute the pdf of an`R^3` observation; return a scalar.
mvn.prob([-1., 0, 1]).eval() # shape: []
# Initialize a 2-batch of 3-variate Gaussians.
mu = [[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
scale_diag = [[1., 2, 3],
[0.5, 1, 1.5]] # shape: [2, 3]
mvn = tfd.MultivariateNormalLinearOperator(
loc=mu,
scale=tf.linalg.LinearOperatorDiag(scale_diag))
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[-0.9, 0, 0.1],
[-10, 0, 9]] # shape: [2, 3]
mvn.prob(x).eval() # shape: [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tfp.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalLinearOperator"):
"""Construct Multivariate Normal distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = scale @ scale.T`.
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale: Instance of `LinearOperator` with same `dtype` as `loc` and shape
`[B1, ..., Bb, k, k]`.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
ValueError: if `scale` is unspecified.
TypeError: if not `scale.dtype.is_floating`
"""
parameters = dict(locals())
if scale is None:
raise ValueError("Missing required `scale` parameter.")
if not scale.dtype.is_floating:
raise TypeError("`scale` parameter must have floating-point dtype.")
with ops.name_scope(name, values=[loc] + scale.graph_parents) as name:
# Since expand_dims doesn't preserve constant-ness, we obtain the
# non-dynamic value if possible.
loc = ops.convert_to_tensor(loc, name="loc") if loc is not None else loc
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
super(MultivariateNormalLinearOperator, self).__init__(
distribution=normal.Normal(
loc=array_ops.zeros([], dtype=scale.dtype),
scale=array_ops.ones([], dtype=scale.dtype)),
bijector=AffineLinearOperator(
shift=loc, scale=scale, validate_args=validate_args),
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
@property
def loc(self):
"""The `loc` `Tensor` in `Y = scale @ X + loc`."""
return self.bijector.shift
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X + loc`."""
return self.bijector.scale
@distribution_util.AppendDocstring(_mvn_sample_note)
def _log_prob(self, x):
return super(MultivariateNormalLinearOperator, self)._log_prob(x)
@distribution_util.AppendDocstring(_mvn_sample_note)
def _prob(self, x):
return super(MultivariateNormalLinearOperator, self)._prob(x)
def _mean(self):
shape = self.batch_shape.concatenate(self.event_shape)
has_static_shape = shape.is_fully_defined()
if not has_static_shape:
shape = array_ops.concat([
self.batch_shape_tensor(),
self.event_shape_tensor(),
], 0)
if self.loc is None:
return array_ops.zeros(shape, self.dtype)
if has_static_shape and shape == self.loc.get_shape():
return array_ops.identity(self.loc)
# Add dummy tensor of zeros to broadcast. This is only necessary if shape
# != self.loc.shape, but we could not determine if this is the case.
return array_ops.identity(self.loc) + array_ops.zeros(shape, self.dtype)
def _covariance(self):
if distribution_util.is_diagonal_scale(self.scale):
return array_ops.matrix_diag(math_ops.square(self.scale.diag_part()))
else:
return self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)
def _variance(self):
if distribution_util.is_diagonal_scale(self.scale):
return math_ops.square(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense()))
else:
return array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True))
def _stddev(self):
if distribution_util.is_diagonal_scale(self.scale):
return math_ops.abs(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense())))
else:
return math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)))
def _mode(self):
return self._mean()
@kullback_leibler.RegisterKL(MultivariateNormalLinearOperator,
MultivariateNormalLinearOperator)
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tfp.distributions`.",
warn_once=True)
def _kl_brute_force(a, b, name=None):
"""Batched KL divergence `KL(a || b)` for multivariate Normals.
With `X`, `Y` both multivariate Normals in `R^k` with means `mu_a`, `mu_b` and
covariance `C_a`, `C_b` respectively,
```
KL(a || b) = 0.5 * ( L - k + T + Q ),
L := Log[Det(C_b)] - Log[Det(C_a)]
T := trace(C_b^{-1} C_a),
Q := (mu_b - mu_a)^T C_b^{-1} (mu_b - mu_a),
```
This `Op` computes the trace by solving `C_b^{-1} C_a`. Although efficient
methods for solving systems with `C_b` may be available, a dense version of
(the square root of) `C_a` is used, so performance is `O(B s k**2)` where `B`
is the batch size, and `s` is the cost of solving `C_b x = y` for vectors `x`
and `y`.
Args:
a: Instance of `MultivariateNormalLinearOperator`.
b: Instance of `MultivariateNormalLinearOperator`.
name: (optional) name to use for created ops. Default "kl_mvn".
Returns:
Batchwise `KL(a || b)`.
"""
def squared_frobenius_norm(x):
"""Helper to make KL calculation slightly more readable."""
# http://mathworld.wolfram.com/FrobeniusNorm.html
# The gradient of KL[p,q] is not defined when p==q. The culprit is
# linalg_ops.norm, i.e., we cannot use the commented out code.
# return math_ops.square(linalg_ops.norm(x, ord="fro", axis=[-2, -1]))
return math_ops.reduce_sum(math_ops.square(x), axis=[-2, -1])
# TODO(b/35041439): See also b/35040945. Remove this function once LinOp
# supports something like:
# A.inverse().solve(B).norm(order='fro', axis=[-1, -2])
def is_diagonal(x):
"""Helper to identify if `LinearOperator` has only a diagonal component."""
return (isinstance(x, linalg.LinearOperatorIdentity) or
isinstance(x, linalg.LinearOperatorScaledIdentity) or
isinstance(x, linalg.LinearOperatorDiag))
with ops.name_scope(name, "kl_mvn", values=[a.loc, b.loc] +
a.scale.graph_parents + b.scale.graph_parents):
# Calculation is based on:
# http://stats.stackexchange.com/questions/60680/kl-divergence-between-two-multivariate-gaussians
# and,
# https://en.wikipedia.org/wiki/Matrix_norm#Frobenius_norm
# i.e.,
# If Ca = AA', Cb = BB', then
# tr[inv(Cb) Ca] = tr[inv(B)' inv(B) A A']
# = tr[inv(B) A A' inv(B)']
# = tr[(inv(B) A) (inv(B) A)']
# = sum_{ij} (inv(B) A)_{ij}**2
# = ||inv(B) A||_F**2
# where ||.||_F is the Frobenius norm and the second equality follows from
# the cyclic permutation property.
if is_diagonal(a.scale) and is_diagonal(b.scale):
# Using `stddev` because it handles expansion of Identity cases.
b_inv_a = (a.stddev() / b.stddev())[..., array_ops.newaxis]
else:
b_inv_a = b.scale.solve(a.scale.to_dense())
kl_div = (b.scale.log_abs_determinant()
- a.scale.log_abs_determinant()
+ 0.5 * (
- math_ops.cast(a.scale.domain_dimension_tensor(), a.dtype)
+ squared_frobenius_norm(b_inv_a)
+ squared_frobenius_norm(b.scale.solve(
(b.mean() - a.mean())[..., array_ops.newaxis]))))
kl_div.set_shape(array_ops.broadcast_static_shape(
a.batch_shape, b.batch_shape))
return kl_div
|
hfp/tensorflow-xsmm
|
tensorflow/contrib/distributions/python/ops/mvn_linear_operator.py
|
Python
|
apache-2.0
| 13,425
|
[
"Gaussian"
] |
e989ff467b23cb35ce6cb6e9f515efdac11b59bc056f5cccbd71b8ec883cb439
|
"""Perform streaming post-alignment preparation -- de-duplication and sorting.
Centralizes a pipelined approach to generating sorted, de-duplicated BAM output
from sequencer results.
samblaster: http://arxiv.org/pdf/1403.7486v1.pdf
biobambam bammarkduplicates: http://arxiv.org/abs/1306.0836
"""
import contextlib
import os
import toolz as tz
from bcbio import bam, broad, utils
from bcbio.bam import ref
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.log import logger
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
@contextlib.contextmanager
def tobam_cl(data, out_file, is_paired=False):
"""Prepare command line for producing de-duplicated sorted output.
- If no deduplication, sort and prepare a BAM file.
- If paired, then use samblaster and prepare discordant outputs.
- If unpaired, use biobambam's bammarkduplicates
"""
do_dedup = _check_dedup(data)
umi_consensus = dd.get_umi_consensus(data)
with file_transaction(data, out_file) as tx_out_file:
if not do_dedup:
yield (sam_to_sortbam_cl(data, tx_out_file), tx_out_file)
elif umi_consensus:
yield (_sam_to_grouped_umi_cl(data, umi_consensus, tx_out_file), tx_out_file)
elif is_paired and _need_sr_disc_reads(data) and not _too_many_contigs(dd.get_ref_file(data)):
sr_file = "%s-sr.bam" % os.path.splitext(out_file)[0]
disc_file = "%s-disc.bam" % os.path.splitext(out_file)[0]
with file_transaction(data, sr_file) as tx_sr_file:
with file_transaction(data, disc_file) as tx_disc_file:
yield (samblaster_dedup_sort(data, tx_out_file, tx_sr_file, tx_disc_file),
tx_out_file)
else:
yield (_biobambam_dedup_sort(data, tx_out_file), tx_out_file)
def _too_many_contigs(ref_file):
"""Check for more contigs than the maximum samblaster deduplication supports.
"""
max_contigs = 32768
return len(list(ref.file_contigs(ref_file))) >= max_contigs
def _need_sr_disc_reads(data):
"""Check if we need split and discordant reads in downstream processing.
We use samblaster when needed and otherwise use an approach that does not
extract these reads to be less resource intensive.
"""
from bcbio import structural
return "lumpy" in structural.get_svcallers(data)
def _get_cores_memory(data, downscale=2):
"""Retrieve cores and memory, using samtools as baseline.
For memory, scaling down because we share with alignment and de-duplication.
"""
resources = config_utils.get_resources("samtools", data["config"])
num_cores = data["config"]["algorithm"].get("num_cores", 1)
max_mem = config_utils.adjust_memory(resources.get("memory", "2G"),
downscale, "decrease").upper()
return num_cores, max_mem
def sam_to_sortbam_cl(data, tx_out_file, name_sort=False):
"""Convert to sorted BAM output.
Set name_sort to True to sort reads by queryname
"""
samtools = config_utils.get_program("samtools", data["config"])
cores, mem = _get_cores_memory(data, downscale=2)
tmp_file = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0]
sort_flag = "-n" if name_sort else ""
return ("{samtools} sort -@ {cores} -m {mem} {sort_flag} "
"-T {tmp_file} -o {tx_out_file} /dev/stdin".format(**locals()))
def samblaster_dedup_sort(data, tx_out_file, tx_sr_file, tx_disc_file):
"""Deduplicate and sort with samblaster, produces split read and discordant pair files.
"""
samblaster = config_utils.get_program("samblaster", data["config"])
samtools = config_utils.get_program("samtools", data["config"])
tmp_prefix = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0]
tobam_cmd = ("{samtools} sort {sort_opt} -@ {cores} -m {mem} -T {tmp_prefix}-{dext} -o {out_file} -")
# full BAM -- associate more memory and cores
cores, mem = _get_cores_memory(data, downscale=2)
sort_opt = "-n" if data.get("align_split") else ""
dedup_cmd = tobam_cmd.format(out_file=tx_out_file, dext="full", **locals())
# split and discordant BAMs -- give less memory/cores since smaller files
sort_opt = ""
cores, mem = _get_cores_memory(data, downscale=4)
splitter_cmd = tobam_cmd.format(out_file=tx_sr_file, dext="spl", **locals())
discordant_cmd = tobam_cmd.format(out_file=tx_disc_file, dext="disc", **locals())
# samblaster 0.1.22 and better require the -M flag for compatibility with bwa-mem
cmd = ("{samblaster} --addMateTags -M --splitterFile >({splitter_cmd}) --discordantFile >({discordant_cmd}) "
"| {dedup_cmd}")
return cmd.format(**locals())
def _biobambam_dedup_sort(data, tx_out_file):
"""Perform streaming deduplication and sorting with biobambam's bamsormadup
"""
samtools = config_utils.get_program("samtools", data["config"])
cores, mem = _get_cores_memory(data, downscale=2)
tmp_file = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0]
if data.get("align_split"):
cmd = "{samtools} sort -n -@ {cores} -m {mem} -O bam -T {tmp_file}-namesort -o {tx_out_file} -"
else:
cmd = ("bamsormadup inputformat=sam threads={cores} tmpfile={tmp_file}-markdup "
"SO=coordinate indexfilename={tx_out_file}.bai > {tx_out_file}")
return cmd.format(**locals())
def _sam_to_grouped_umi_cl(data, umi_consensus, tx_out_file):
"""Mark duplicates on aligner output and convert to grouped UMIs by position.
Works with either a separate umi_file or UMI embedded in the read names.
"""
tmp_file = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0]
jvm_opts = _get_fgbio_jvm_opts(data, os.path.dirname(tmp_file), 1)
cores, mem = _get_cores_memory(data)
cmd = ("bamsormadup tmpfile={tmp_file}-markdup inputformat=sam threads={cores} outputformat=bam "
"level=0 SO=coordinate | ")
# UMIs in a separate file
if os.path.exists(umi_consensus):
cmd += "fgbio {jvm_opts} AnnotateBamWithUmis -i /dev/stdin -f {umi_consensus} -o {tx_out_file}"
# UMIs embedded in read name
else:
cmd += "umis bamtag - | samtools view -b > {tx_out_file}"
return cmd.format(**locals())
def _get_fgbio_jvm_opts(data, tmpdir, scale_factor=None):
cores, mem = _get_cores_memory(data)
resources = config_utils.get_resources("fgbio", data["config"])
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx4g"])
if scale_factor and cores > scale_factor:
jvm_opts = config_utils.adjust_opts(jvm_opts, {"algorithm": {"memory_adjust":
{"direction": "increase",
"magnitude": cores // scale_factor}}})
jvm_opts += broad.get_default_jvm_opts(tmpdir)
jvm_opts = " ".join(jvm_opts)
return jvm_opts
def umi_consensus(data):
"""Convert UMI grouped reads into fastq pair for re-alignment.
"""
align_bam = dd.get_work_bam(data)
f1_out = "%s-cumi-1.fq.gz" % utils.splitext_plus(align_bam)[0]
f2_out = "%s-cumi-2.fq.gz" % utils.splitext_plus(align_bam)[0]
if not utils.file_uptodate(f1_out, align_bam):
with file_transaction(data, f1_out, f2_out) as (tx_f1_out, tx_f2_out):
jvm_opts = _get_fgbio_jvm_opts(data, os.path.dirname(tx_f1_out), 2)
group_opts, cons_opts = _get_fgbio_options(data)
cmd = ("unset JAVA_HOME && "
"fgbio {jvm_opts} GroupReadsByUmi {group_opts} -s adjacency -i {align_bam} | "
"fgbio {jvm_opts} CallMolecularConsensusReads {cons_opts} "
"-S queryname -i /dev/stdin -o /dev/stdout | "
"bamtofastq F={tx_f1_out} F2={tx_f2_out} gz=1")
do.run(cmd.format(**locals()), "UMI consensus fastq generation")
return f1_out, f2_out
def _get_fgbio_options(data):
"""Get adjustable, through resources, or default options for fgbio.
"""
group_opts = ["--edits", "--min-map-q"]
cons_opts = ["--min-reads"]
defaults = {"--min-reads": "1",
"--min-map-q": "1",
"--edits": "1"}
ropts = config_utils.get_resources("fgbio", data["config"]).get("options", [])
assert len(ropts) % 2 == 0, "Expect even number of options for fgbio" % ropts
defaults.update(dict(tz.partition(2, ropts)))
group_out = " ".join(["%s %s" % (x, defaults[x]) for x in group_opts])
cons_out = " ".join(["%s %s" % (x, defaults[x]) for x in cons_opts])
return group_out, cons_out
def _check_dedup(data):
"""Check configuration for de-duplication, handling back compatibility.
"""
dup_param = utils.get_in(data, ("config", "algorithm", "mark_duplicates"), True)
if dup_param and isinstance(dup_param, basestring):
logger.info("Warning: bcbio no longer support explicit setting of mark_duplicate algorithm. "
"Using best-practice choice based on input data.")
dup_param = True
return dup_param
def dedup_bam(in_bam, data):
"""Perform non-stream based deduplication of BAM input files using biobambam.
"""
if _check_dedup(data):
out_file = "%s-dedup%s" % utils.splitext_plus(in_bam)
if not utils.file_exists(out_file):
with tx_tmpdir(data) as tmpdir:
with file_transaction(data, out_file) as tx_out_file:
bammarkduplicates = config_utils.get_program("bammarkduplicates", data["config"])
base_tmp = os.path.join(tmpdir, os.path.splitext(os.path.basename(tx_out_file))[0])
cores, mem = _get_cores_memory(data, downscale=2)
cmd = ("{bammarkduplicates} tmpfile={base_tmp}-markdup "
"markthreads={cores} I={in_bam} O={tx_out_file}")
do.run(cmd.format(**locals()), "De-duplication with biobambam")
bam.index(out_file, data["config"])
return out_file
else:
return in_bam
|
brainstorm/bcbio-nextgen
|
bcbio/ngsalign/postalign.py
|
Python
|
mit
| 10,135
|
[
"BWA"
] |
7360707e2cd3a362fe4764b4acd58672555118851fb49c6bfaea13b39d94baf9
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.