text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
from contextlib import suppress
from warnings import warn
import numpy as np
import pandas as pd
import statsmodels.api as sm
from scipy.stats import iqr
from ..mapping.evaluation import after_stat
from ..doctools import document
from ..exceptions import PlotnineError, PlotnineWarning
from .stat import stat
# NOTE: Parameter discriptions are in
# statsmodels/nonparametric/kde.py
@document
class stat_density(stat):
"""
Compute density estimate
{usage}
Parameters
----------
{common_parameters}
kernel : str, optional (default: 'gaussian')
Kernel used for density estimation. One of::
'biweight'
'cosine'
'cosine2'
'epanechnikov'
'gaussian'
'triangular'
'triweight'
'uniform'
adjust : float, optional (default: 1)
An adjustment factor for the ``bw``. Bandwidth becomes
:py:`bw * adjust`.
Adjustment of the bandwidth.
trim : bool, optional (default: False)
This parameter only matters if you are displaying multiple
densities in one plot. If :py:`False`, the default, each
density is computed on the full range of the data. If
:py:`True`, each density is computed over the range of that
group; this typically means the estimated x values will not
line-up, and hence you won't be able to stack density values.
n : int, optional(default: 1024)
Number of equally spaced points at which the density is to
be estimated. For efficient computation, it should be a power
of two.
gridsize : int, optional (default: None)
If gridsize is :py:`None`, :py:`max(len(x), 50)` is used.
bw : str or float, optional (default: 'nrd0')
The bandwidth to use, If a float is given, it is the bandwidth.
The :py:`str` choices are::
'nrd0'
'normal_reference'
'scott'
'silverman'
``nrd0`` is a port of ``stats::bw.nrd0`` in R; it is eqiuvalent
to ``silverman`` when there is more than 1 value in a group.
cut : float, optional (default: 3)
Defines the length of the grid past the lowest and highest
values of ``x`` so that the kernel goes to zero. The end points
are ``-/+ cut*bw*{min(x) or max(x)}``.
clip : tuple, optional (default: (-np.inf, np.inf))
Values in ``x`` that are outside of the range given by clip are
dropped. The number of values in ``x`` is then shortened.
See Also
--------
plotnine.geoms.geom_density
statsmodels.nonparametric.kde.KDEUnivariate
statsmodels.nonparametric.kde.KDEUnivariate.fit
"""
_aesthetics_doc = """
{aesthetics_table}
.. rubric:: Options for computed aesthetics
::
'density' # density estimate
'count' # density * number of points,
# useful for stacked density plots
'scaled' # density estimate, scaled to maximum of 1
"""
REQUIRED_AES = {'x'}
DEFAULT_PARAMS = {'geom': 'density', 'position': 'stack',
'na_rm': False,
'kernel': 'gaussian', 'adjust': 1,
'trim': False, 'n': 1024, 'gridsize': None,
'bw': 'nrd0', 'cut': 3,
'clip': (-np.inf, np.inf)}
DEFAULT_AES = {'y': after_stat('density')}
CREATES = {'density', 'count', 'scaled', 'n'}
def setup_params(self, data):
params = self.params.copy()
lookup = {
'biweight': 'biw',
'cosine': 'cos',
'cosine2': 'cos2',
'epanechnikov': 'epa',
'gaussian': 'gau',
'triangular': 'tri',
'triweight': 'triw',
'uniform': 'uni'}
with suppress(KeyError):
params['kernel'] = lookup[params['kernel'].lower()]
if params['kernel'] not in lookup.values():
msg = ("kernel should be one of {}. "
"You may use the abbreviations {}")
raise PlotnineError(msg.format(lookup.keys(),
lookup.values()))
return params
@classmethod
def compute_group(cls, data, scales, **params):
weight = data.get('weight')
if params['trim']:
range_x = data['x'].min(), data['x'].max()
else:
range_x = scales.x.dimension()
return compute_density(data['x'], weight, range_x, **params)
def compute_density(x, weight, range, **params):
x = np.asarray(x, dtype=float)
not_nan = ~np.isnan(x)
x = x[not_nan]
bw = params['bw']
kernel = params['kernel']
n = len(x)
if n == 0 or (n == 1 and isinstance(bw, str)):
if n == 1:
warn("To compute the density of a group with only one "
"value set the bandwidth manually. e.g `bw=0.1`",
PlotnineWarning)
warn("Groups with fewer than 2 data points have been removed.",
PlotnineWarning)
return pd.DataFrame()
# kde is computed efficiently using fft. But the fft does
# not support weights and is only available with the
# gaussian kernel. When weights are relevant we
# turn off the fft.
if weight is None:
if kernel != 'gau':
weight = np.ones(n) / n
else:
weight = np.asarray(weight, dtype=float)
if kernel == 'gau' and weight is None:
fft = True
else:
fft = False
if bw == 'nrd0':
bw = nrd0(x)
kde = sm.nonparametric.KDEUnivariate(x)
kde.fit(
kernel=kernel,
bw=bw,
fft=fft,
weights=weight,
adjust=params['adjust'],
cut=params['cut'],
gridsize=params['gridsize'],
clip=params['clip']
)
x2 = np.linspace(range[0], range[1], params['n'])
try:
y = kde.evaluate(x2)
if np.isscalar(y) and np.isnan(y):
raise ValueError('kde.evaluate returned nan')
except ValueError:
y = []
for _x in x2:
result = kde.evaluate(_x)
try:
y.append(result[0])
except TypeError:
y.append(result)
y = np.asarray(y)
# Evaluations outside the kernel domain return np.nan,
# these values and corresponding x2s are dropped.
# The kernel domain is defined by the values in x, but
# the evaluated values in x2 could have a much wider range.
not_nan = ~np.isnan(y)
x2 = x2[not_nan]
y = y[not_nan]
return pd.DataFrame({'x': x2,
'density': y,
'scaled': y / np.max(y) if len(y) else [],
'count': y * n,
'n': n})
def nrd0(x):
"""
Port of R stats::bw.nrd0
This is equivalent to statsmodels silverman when x has more than
1 unique value. It can never give a zero bandwidth.
Parameters
----------
x : array_like
Values whose density is to be estimated
Returns
-------
out : float
Bandwidth of x
"""
n = len(x)
if n < 1:
raise ValueError(
"Need at leat 2 data points to compute the nrd0 bandwidth."
)
std = np.std(x, ddof=1)
std_estimate = iqr(x)/1.349
low_std = np.min((std, std_estimate))
if low_std == 0:
low_std = std_estimate or np.abs(np.asarray(x)[0]) or 1
return 0.9 * low_std * (n ** -0.2)
|
has2k1/plotnine
|
plotnine/stats/stat_density.py
|
Python
|
gpl-2.0
| 7,516
|
[
"Gaussian"
] |
1b33b17e26688e7b276e2acda78e44d4f4d40a5b98c79e6b94d6348c6dc2187e
|
#! /usr/bin/env python
### reciprocal_BLAST.py ################################################################
### By Geoffrey Thomson
### This script performs a nucleotide reciprocal BLAST on two fasta files input by the user.
### Draws heavily from the online tutorial (http://biopython.org/DIST/docs/tutorial/Tutorial.html#sec11)
### and a script by Hong Qin (https://github.com/hongqin/Simple-reciprocal-best-blast-hit-pairs)
from Bio.Blast.Applications import NcbiblastnCommandline
from subprocess import call
import sys, re, os
if len(sys.argv) < 4:
sys.exit('\n Usage: python %s <Input-FASTA-file-1> <Input-FASTA-file-2> <Output-file.txt>\n' % sys.argv[0])
fasta_file_1 = sys.argv[1]
fasta_file_2 = sys.argv[2]
output_file = sys.argv[3]
# Make databases to BLAST against
call(["makeblastdb", "-in", fasta_file_1, "-parse_seqids", "-dbtype", "nucl", "-out", "DB1.db"])
call(["makeblastdb", "-in", fasta_file_2, "-parse_seqids", "-dbtype", "nucl", "-out", "DB2.db"])
# Perform BLAST alignments
blastn_fasta_file_1_vs_DB_2 = NcbiblastnCommandline(query = fasta_file_1, db = "DB2.db", evalue = 0.001, outfmt = 7, out = "DB2.txt")
stdout, stderr = blastn_fasta_file_1_vs_DB_2()
blastn_fasta_file_2_vs_DB_1 = NcbiblastnCommandline(query = fasta_file_2, db = "DB1.db", evalue = 0.001, outfmt = 7, out = "DB1.txt")
stdout, stderr = blastn_fasta_file_2_vs_DB_1()
# Parse BLAST results, place matches into dictionaries
DB1_results = open("DB2.txt", 'r')
Dict_2 = {}
for Line in DB1_results:
if (Line[0] != '#'):
Line.strip()
Elements = re.split('\t', Line)
queryId = Elements[0]
subjectId = Elements[1]
if (not( queryId in Dict_2.keys())):
Dict_2[queryId] = subjectId
DB1_results = open("DB1.txt", 'r')
Dict_1 = {}
for Line in DB1_results:
if (Line[0] != '#'):
Line.strip()
Elements = re.split('\t', Line)
queryId = Elements[0]
subjectId = Elements[1]
if (not(queryId in Dict_1.keys())):
Dict_1[queryId] = subjectId
# Identify reciprocal best hits and put them in a new dictionary
RBH = {}
for id1 in Dict_1.keys():
value1 = Dict_1[id1]
#print(id1, value1)
if (value1 in Dict_2.keys()):
if (id1 == Dict_2[value1]) :
RBH[value1] = id1
# Write results to an output file
output = open(output_file, 'w')
header = fasta_file_1 + "\t" + fasta_file_2 + "\n"
output.write(header)
for pair in RBH.keys():
line = pair + '\t' + RBH[pair] + '\n'
output.write(line)
print("\n Identified %s reciprocal best BLAST hists between %s and %s \n" % (len(RBH), fasta_file_1, fasta_file_2))
output.close()
# Clean up created files
filelist = [ f for f in os.listdir(".") if f.startswith(("DB1", "DB2")) ]
for f in filelist:
os.remove(f)
|
G-Thomson/GPyS
|
reciprocal_BLAST.py
|
Python
|
mit
| 2,799
|
[
"BLAST",
"Biopython"
] |
0a2fa4506a64c1bd4669a759cbc07915ae8ea5eccdb73f46a38f8f6d89ae4e66
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-install-agent
# Author : Ricardo Graciani
########################################################################
"""
Do the initial installation and configuration of a DIRAC agent
"""
__RCSID__ = "$Id$"
#
from DIRAC.Core.Utilities import InstallTools
from DIRAC.ConfigurationSystem.Client.Helpers import getCSExtensions
from DIRAC.FrameworkSystem.Utilities import MonitoringUtilities
#
from DIRAC import gConfig, S_OK, S_ERROR
from DIRAC import exit as DIRACexit
InstallTools.exitOnError = True
#
from DIRAC.Core.Base import Script
overwrite = False
def setOverwrite( opVal ):
global overwrite
overwrite = True
return S_OK()
module = ''
specialOptions = {}
def setModule( optVal ):
global specialOptions,module
specialOptions['Module'] = optVal
module = optVal
return S_OK()
def setSpecialOption( optVal ):
global specialOptions
option,value = optVal.split('=')
specialOptions[option] = value
return S_OK()
Script.registerSwitch( "w", "overwrite", "Overwrite the configuration in the global CS", setOverwrite )
Script.registerSwitch( "m:", "module=", "Python module name for the agent code", setModule )
Script.registerSwitch( "p:", "parameter=", "Special agent option ", setSpecialOption )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... System Agent|System/Agent' % Script.scriptName,
'Arguments:',
' System: Name of the DIRAC system (ie: WorkloadManagement)',
' Agent: Name of the DIRAC agent (ie: JobCleaningAgent)'] ) )
Script.parseCommandLine()
args = Script.getPositionalArgs()
if len( args ) == 1:
args = args[0].split( '/' )
if len( args ) != 2:
Script.showHelp()
DIRACexit( -1 )
#
system = args[0]
agent = args[1]
if module:
result = InstallTools.addDefaultOptionsToCS( gConfig, 'agent', system, module,
getCSExtensions(),
overwrite = overwrite )
result = InstallTools.addDefaultOptionsToCS( gConfig, 'agent', system, agent,
getCSExtensions(),
specialOptions=specialOptions,
overwrite = overwrite,
addDefaultOptions = False )
else:
result = InstallTools.addDefaultOptionsToCS( gConfig, 'agent', system, agent,
getCSExtensions(),
specialOptions=specialOptions,
overwrite = overwrite )
if not result['OK']:
print "ERROR:", result['Message']
else:
result = InstallTools.installComponent( 'agent', system, agent, getCSExtensions(), module )
if not result['OK']:
print "ERROR:", result['Message']
DIRACexit( 1 )
else:
print "Successfully installed agent %s in %s system, now setting it up" % ( agent, system )
result = InstallTools.setupComponent( 'agent', system, agent, getCSExtensions(), module )
if not result['OK']:
print "ERROR:", result['Message']
DIRACexit( 1 )
result = monitoringUtilities.monitorInstallation( 'agent', system, agent, module )
if not result['OK']:
print "ERROR:", result['Message']
DIRACexit( 1 )
print "Successfully completed the installation of agent %s in %s system" % ( agent, system )
DIRACexit()
|
marcelovilaca/DIRAC
|
Core/scripts/dirac-install-agent.py
|
Python
|
gpl-3.0
| 3,729
|
[
"DIRAC"
] |
f95e0ce39b2471fc34f0c5d363b2dedc0e8bae8eacf216b042f2bf915bbc1e3d
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""
GIMLi Basics
============
This is the first tutorial where we demonstrate the general use of
:term:`GIMLi` in Python, i.e., :term:`pyGIMLi`.
The modelling as well as the inversion part of :term:`GIMLi` often requires a
spatial discretization for the domain of interest, the so called
:gimliapi:`GIMLI::Mesh`.
This tutorial shows some basic aspects of handling a mesh.
First, the library needs to be imported.
To avoid name clashes with other libraries we suggest to ``import pygimli`` and
alias it to the simple abbreviation ``pg``: CR
"""
import pygimli as pg
###############################################################################
# Every part of the c++ namespace :gimliapi:`GIMLI` is bound to python and can
# be used with the leading ``pg.``
#
# For instance get the current version for :term:`GIMLi` with:
print(pg.__version__)
###############################################################################
#
# Now that we know the name space :gimliapi:`GIMLI`, we can create a first mesh.
# A mesh is represented by a collection of nodes, cells and boundaries,
# i.e., geometrical entities.
#
# .. note::
#
# A regularly spaced mesh consisting of rectangles or hexahedrons is
# usually called a grid. However, a grid is just a special variant of a mesh
# so GIMLi treats it the same. The only difference is how they are created.
#
# GIMLi provides a collection of tools for mesh import, export and generation.
# A simple grid generation is built-in but we also provide wrappers for
# unstructured mesh generations, e.g., :term:`Triangle`, :term:`Tetgen` and
# :term:`Gmsh`. To create a 2d grid you need to give two arrays/lists of sample points
# in x and y direction, in that order, or just numbers.
grid = pg.createGrid(x=[-1.0, 0.0, 1.0, 4.0], y=[-1.0, 0.0, 1.0, 4.0])
###############################################################################
# The returned object ``grid`` is an instance of :gimliapi:`GIMLI::Mesh` and
# provides various methods for modification and io-operations. General
# information about the grid can be printed using the simple print() function.
#
print(grid)
###############################################################################
#
# Or you can access them manually using different methods:
#
print('Mesh: Nodes:', grid.nodeCount(),
'Cells:', grid.cellCount(),
'Boundaries:', grid.boundaryCount())
###############################################################################
#
# You can iterate through all cells of the general type :gimliapi:`GIMLI::Cell`
# that also provides a lot of methods. Here we list the number of nodes and the
# node ids per cell:
for cell in grid.cells():
print("Cell", cell.id(), "has", cell.nodeCount(),
"nodes. Node IDs:", [n.id() for n in cell.nodes()])
print(type(grid.cell(0)))
###############################################################################
# To generate the input arrays ``x`` and ``y``, you can use the
# built-in :gimliapi:`GIMLI::Vector` (pre-defined with values that are type double as
# ``pg.Vector``), standard python lists or :term:`numpy` arrays,
# which are widely compatible with :term:`GIMLi` vectors.
import numpy as np
grid = pg.createGrid(x=np.linspace(-1.0, 1.0, 10),
y=1.0 - np.logspace(np.log10(1.0), np.log10(2.0), 10))
###############################################################################
#
# We can find that this new ``grid`` contains
#
print(grid.cellCount())
###############################################################################
# rectangles of type :gimliapi:`GIMLI::Quadrangle` derived from the
# base type :gimliapi:`GIMLI::Cell`, edges of type :gimliapi:`GIMLI::Edge`,
# which are boundaries of the general type :gimliapi:`GIMLI::Boundary`.
#
print(grid.boundaryCount())
###############################################################################
# The mesh can be saved and loaded in our binary mesh format ``.bms``.
# Or exported into ``.vtk`` format for 2D or 3D visualization using
# :term:`Paraview`.
#
# However, we recommend visualizing 2-dimensional content using python scripts
# that provide better exports to graphics files (e.g., png, pdf, svg).
# In :term:`pygimli` we provide some basic post-processing routines using
# the :term:`matplotlib` visualization framework. The main visualization call
# is :py:mod:`pygimli.viewer.show` which is sufficient for most meshes,
# fields, models and streamline views.
pg.viewer.show(grid)
pg.wait()
###############################################################################
# For more control you can also use the appropriate draw methods
# :py:mod:`pygimli.viewer.mpl.drawMesh`.
|
gimli-org/gimli
|
doc/tutorials/1_basics/plot_1-gimli_basics.py
|
Python
|
apache-2.0
| 4,741
|
[
"ParaView",
"VTK"
] |
d6a884c69a0555edbc9e39dabc16d1107e63b258b9b5f7381af53636c47dd5c3
|
"""
Copyright (c) 2016 Gianluca Gerard
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Portions of the code are
Copyright (c) 2010--2015, Deep Learning Tutorials Development Team
All rights reserved.
"""
from __future__ import print_function, division
import timeit
import os
import matplotlib.pyplot as plt
import numpy
import theano
import scipy.misc
from theano import tensor
from theano.tensor import nnet
from theano.compile.nanguardmode import NanGuardMode
#from theano.tensor.shared_randomstreams import RandomStreams
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from MNIST import MNIST
from utils import get_minibatches_idx
class RBM(object):
"""Restricted Boltzmann Machine (RBM) """
"""Initial version from http://deeplearning.net/tutorial/code/rbm.py """
def __init__(
self,
name="",
input=None,
n_visible=784,
n_hidden=500,
W=None,
hbias=None,
vbias=None,
p=1.0,
numpy_rng=None,
theano_rng=None
):
"""
RBM constructor. Defines the parameters of the model along with
basic operations for inferring hidden from visible (and vice-versa),
as well as for performing CD updates.
:param input: None for standalone RBMs or symbolic variable if RBM is
part of a larger graph.
:param n_visible: number of visible units
:param n_hidden: number of hidden units
:param W: None for standalone RBMs or symbolic variable pointing to a
shared weight matrix in case RBM is part of a DBN network; in a DBN,
the weights are shared between RBMs and layers of a MLP
:param hbias: None for standalone RBMs or symbolic variable pointing
to a shared hidden units bias vector in case RBM is part of a
different network
:param vbias: None for standalone RBMs or a symbolic variable
pointing to a shared visible units bias
"""
self.name = name
self.n_visible = n_visible
self.n_hidden = n_hidden
self.p = p
if numpy_rng is None:
# create a number generator
numpy_rng = numpy.random.RandomState(1234)
if theano_rng is None:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
self.numpy_rng = numpy_rng
self.theano_rng = theano_rng
self.training_end_state = None
if W is None:
# W is initialized with `initial_W` which is uniformly
# sampled from -4*sqrt(6./(n_visible+n_hidden)) and
# 4*sqrt(6./(n_hidden+n_visible)) the output of uniform if
# converted using asarray to dtype theano.config.floatX so
# that the code is runable on GPU
initial_W = numpy.asarray(
numpy_rng.normal(
scale=0.01,
# numpy_rng.uniform(
# low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)),
# high=4 * numpy.sqrt(6. / (n_hidden + n_visible)),
size=(n_visible, n_hidden)
),
dtype=theano.config.floatX
)
# theano shared variables for weights and biases
W = theano.shared(value=initial_W, name='W', borrow=True)
if hbias is None:
# create shared variable for hidden units bias
hbias = theano.shared(
value=numpy.zeros(
n_hidden,
dtype=theano.config.floatX
),
name='hbias',
borrow=True
)
if vbias is None:
# create shared variable for visible units bias
vbias = theano.shared(
value=numpy.zeros(
n_visible,
dtype=theano.config.floatX
),
name='vbias',
borrow=True
)
# initialize input layer for standalone RBM or layer0 of DBN
if not input:
self.input = tensor.matrix('input')
else:
self.input = input
self.W = W
self.Wt = W.T
self.hbias = hbias
self.vbias = vbias
self.theano_rng = theano_rng
# **** WARNING: It is not a good idea to put things in this list
# other than shared variables created in this function.
self.params = [self.W, self.hbias, self.vbias]
self.r_sample = None
# Parameters to implement momentum
# See: Hinton, "A Practical Guide to Training Restricted Boltzmann Machines",
# UTML TR 2010-003, 2010. Section 9
self.reset_speed_params()
self.speed_params = [self.W_speed, self.hbias_speed, self.vbias_speed]
def reset_speed_params(self):
self.W_speed = theano.shared(
numpy.zeros((self.n_visible, self.n_hidden), dtype=theano.config.floatX),
name='W_speed',
borrow=True)
self.hbias_speed = theano.shared(numpy.zeros(self.n_hidden, dtype=theano.config.floatX),
name='hbias_speed',
borrow=True)
self.vbias_speed = theano.shared(numpy.zeros(self.n_visible, dtype=theano.config.floatX),
name='vbias_speed',
borrow=True)
def free_energy(self, v_sample):
''' Function to compute the free energy '''
wx_b = tensor.dot(v_sample, self.W) + self.hbias
vbias_term = tensor.dot(v_sample, self.vbias)
hidden_term = tensor.sum(nnet.softplus(wx_b), axis=1)
return -hidden_term - vbias_term
def free_energy_gap(self, train, test):
""" Computes the free energy gap between train and test set, F(x_test) - F(x_train).
See: Hinton, "A Practical Guide to Training Restricted Boltzmann Machines", UTML TR 2010-003, 2010, section 6.
Originally from: https://github.com/wuaalb/keras_extensions/blob/master/keras_extensions/rbm.py
"""
return tensor.mean(self.free_energy(test)) - tensor.mean(self.free_energy(train))
def free_energies(self, train, test):
self.free_energy(train)
self.free_energy(test)
return self.free_energy(train), self.free_energy(test)
def propup(self, vis):
'''This function propagates the visible units activation upwards to
the hidden units
Note that we return also the pre-sigmoid activation of the
layer. As it will turn out later, due to how Theano deals with
optimizations, this symbolic variable will be needed to write
down a more stable computational graph (see details in the
reconstruction cost function)
'''
pre_sigmoid_activation = tensor.dot(vis, self.W) + self.hbias
return [pre_sigmoid_activation, nnet.sigmoid(pre_sigmoid_activation)]
def sample_h_given_v(self, v0_sample):
''' This function infers state of hidden units given visible units '''
# compute the activation of the hidden units given a sample of
# the visible units
pre_sigmoid_h1, h1_mean = self.propup(v0_sample)
h1_mean = h1_mean * self.r_sample
# get a sample of the hiddens given their activation
# Note that theano_rng.binomial returns a symbolic sample of dtype
# int64 by default. If we want to keep our computations in floatX
# for the GPU we need to specify to return the dtype floatX
h1_sample = self.theano_rng.binomial(size=h1_mean.shape,
n=1, p=h1_mean,
dtype=theano.config.floatX)
return [pre_sigmoid_h1, h1_mean, h1_sample]
def propdown(self, hid):
'''This function propagates the hidden units activation downwards to
the visible units
Note that we return also the pre_sigmoid_activation of the
layer. As it will turn out later, due to how Theano deals with
optimizations, this symbolic variable will be needed to write
down a more stable computational graph (see details in the
reconstruction cost function)
'''
pre_sigmoid_activation = tensor.dot(hid, self.Wt) + self.vbias
return [pre_sigmoid_activation, nnet.sigmoid(pre_sigmoid_activation)]
def sample_v_given_h(self, h0_sample):
''' This function infers state of visible units given hidden units '''
# compute the activation of the visible given the hidden sample
pre_sigmoid_v1, v1_mean = self.propdown(h0_sample)
# get a sample of the visible given their activation
# Note that theano_rng.binomial returns a symbolic sample of dtype
# int64 by default. If we want to keep our computations in floatX
# for the GPU we need to specify to return the dtype floatX
v1_sample = self.theano_rng.binomial(size=v1_mean.shape,
n=1, p=v1_mean,
dtype=theano.config.floatX)
return [pre_sigmoid_v1, v1_mean, v1_sample]
def gibbs_hvh(self, h0_sample):
''' This function implements one step of Gibbs sampling,
starting from the hidden state'''
pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h0_sample)
pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v1_sample)
return [pre_sigmoid_v1, v1_mean, v1_sample,
pre_sigmoid_h1, h1_mean, h1_sample]
def gibbs_vhv(self, v0_sample):
''' This function implements one step of Gibbs sampling,
starting from the visible state'''
pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v0_sample)
pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h1_sample)
return [pre_sigmoid_h1, h1_mean, h1_sample,
pre_sigmoid_v1, v1_mean, v1_sample]
def get_cost_updates(self,
lr,
momentum,
weightcost,
k=1,
batch_size=None,
persistent=None,
automated_grad=False
):
"""This functions implements one step of CD-k or PCD-k
:param lr: learning rate used to train the RBM
:param k: number of Gibbs steps to do in CD-k/PCD-k
:param lambdas: parameters for tuning weigths updates in CD-k/PCD-k
of Bernoullian RBM
:param weightcost: L1 weight-decay (see Hinton 2010
"A Practical Guide to Training Restricted Boltzmann
Machines" section 10)
:param batch_size: size of the batch of samples used for training
:param persistent: None for CD. For PCD, shared variable
containing archived state of Gibbs chain. This must be a shared
variable of size (batch size, number of hidden units).
:param symbolic_grad: True if Theano automated gradient is
used instead of CD. Default is False.
:return: Returns a proxy for the cost and the updates dictionary. The
dictionary contains the update rules for weights and biases but
also an update of the shared variable used to store the persistent
chain, if one is used.
"""
self.Wt = self.W.T
# Dropout
self.r_sample = self.theano_rng.binomial(size=(batch_size,self.n_hidden),
n=1, p=self.p,
dtype=theano.config.floatX)
# compute values for the positive phase
pre_sigmoid_ph, ph_mean, ph_sample = self.sample_h_given_v(self.input)
# decide how to initialize persistent chain:
# for CD, we use the newly generate hidden sample
# for PCD, we initialize from the archived state of the chain
if persistent is None:
chain_start = ph_sample
else:
chain_start = persistent
# perform actual negative phase
# in order to implement CD-k/PCD-k we need to scan over the
# function that implements one gibbs step k times.
# Read Theano tutorial on scan for more information :
# http://deeplearning.net/software/theano/library/scan.html
# the scan will return the entire Gibbs chain
(
[
pre_sigmoid_nvs,
nv_means,
nv_samples,
pre_sigmoid_nhs,
nh_means,
nh_samples
],
updates
) = theano.scan(
self.gibbs_hvh,
# the None are place holders, saying that
# chain_start is the initial state corresponding to the
# 6th output
outputs_info=[None, None, None, None, None, chain_start],
n_steps=k,
name="gibbs_hvh"
)
# determine gradients on RBM parameters
# note that we only need the sample at the end of the chain
chain_end = nv_samples[-1]
if automated_grad:
gradients = self.compute_symbolic_grad(chain_end)
else:
gradients = self.compute_rbm_grad(ph_mean, nh_means[-1], nv_means[-1],
batch_size, weightcost)
for gradient, param, speed_param in zip(
gradients, self.params, self.speed_params):
# make sure that the momentum is of the right dtype
# make sure that the learning rate is of the right dtype
updates[speed_param] = speed_param * \
tensor.cast(momentum, dtype=theano.config.floatX) + \
gradient * \
(1.0 - tensor.cast(momentum, dtype=theano.config.floatX))
updates[param] = param + speed_param * \
tensor.cast(lr, dtype=theano.config.floatX)
if persistent:
# Note that this works only if persistent is a shared variable
updates[persistent] = nh_samples[-1]
# pseudo-likelihood is a better proxy for PCD
monitoring_cost = self.get_pseudo_likelihood_cost(updates)
else:
# reconstruction cross-entropy is a better proxy for CD
monitoring_cost = self.get_reconstruction_cost(pre_sigmoid_nvs[-1])
return monitoring_cost, updates
def compute_symbolic_grad(self, chain_end):
"""
Compute the gradient of the log-likelihood with respect to the parameters
self.params symbolically.
:param chain_end: symbolic variable with the final sample of the Gibbs chain
:return: a list with the gradients for each of the parameters self.params
"""
cost = tensor.mean(self.free_energy(chain_end)) - \
tensor.mean(self.free_energy(self.input))
# We must not compute the gradient through the gibbs sampling
gradients = tensor.grad(cost, self.params, consider_constant=[chain_end])
return gradients
def compute_rbm_grad(self, ph_mean, nh_mean, nv_mean, batch_size, weightcost):
"""
Compute the gradient of the log-likelihood for an RBM with respect
to the parameters self.params using the expectations.
:param ph_mean: symbolic variable with p(h_i=1|v0) where v0 is a
training sample for all hidden nodes and for all samples
:param nh_mean: symbolic variable with p(h_i=1|vk) where vk is the
final sample of the Gibbs chain for all hidden nodes and
for all samples
:param nv_mean: symbolic variable with p(v_j=1|hk) where hk is the final
hidden layer of the Gibbs chain for all visible nodes and
for all samples
:param batch_size: number of samples of the training set
:param weightcost: scalar used as weight-cost for L1 weight-decay
(see Hinton, "A Practical Guide to Training Restricted
Boltzmann Machines" (2010))
:return: a list with the gradients for each parameter in self.params
"""
W_grad = (tensor.dot(self.input.T, ph_mean) -
tensor.dot(nv_mean.T, nh_mean))/ \
tensor.cast(batch_size, dtype=theano.config.floatX) - \
tensor.cast(weightcost, dtype=theano.config.floatX) * self.W
hbias_grad = tensor.mean(ph_mean - nh_mean, axis=0)
vbias_grad = tensor.mean(self.input - nv_mean, axis=0)
gradients = [W_grad, hbias_grad, vbias_grad]
return gradients
def get_pseudo_likelihood_cost(self, updates):
"""Stochastic approximation to the pseudo-likelihood"""
# index of bit i in expression p(x_i | x_{\i})
bit_i_idx = theano.shared(value=0, name='bit_i_idx')
# binarize the input image by rounding to nearest integer
xi = tensor.round(self.input)
# calculate free energy for the given bit configuration
fe_xi = self.free_energy(xi)
# flip bit x_i of matrix xi and preserve all other bits x_{\i}
# Equivalent to xi[:,bit_i_idx] = 1-xi[:, bit_i_idx], but assigns
# the result to xi_flip, instead of working in place on xi.
xi_flip = tensor.set_subtensor(xi[:, bit_i_idx], 1 - xi[:, bit_i_idx])
# calculate free energy with bit flipped
fe_xi_flip = self.free_energy(xi_flip)
# equivalent to e^(-FE(x_i)) / (e^(-FE(x_i)) + e^(-FE(x_{\i})))
cost = - tensor.mean(self.n_visible * nnet.softplus(fe_xi - fe_xi_flip))
# increment bit_i_idx % number as part of updates
updates[bit_i_idx] = (bit_i_idx + 1) % self.n_visible
return cost
def get_reconstruction_cost(self, pre_sigmoid_nv):
"""Approximation to the reconstruction error
Note that this function requires the pre-sigmoid activation as
input. To understand why this is so you need to understand a
bit about how Theano works. Whenever you compile a Theano
function, the computational graph that you pass as input gets
optimized for speed and stability. This is done by changing
several parts of the subgraphs with others. One such
optimization expresses terms of the form log(sigmoid(x)) in
terms of softplus. We need this optimization for the
cross-entropy since sigmoid of numbers larger than 30. (or
even less then that) turn to 1. and numbers smaller than
-30. turn to 0 which in terms will force theano to compute
log(0) and therefore we will get either -inf or NaN as
cost. If the value is expressed in terms of softplus we do not
get this undesirable behaviour. This optimization usually
works fine, but here we have a special case. The sigmoid is
applied inside the scan op, while the log is
outside. Therefore Theano will only see log(scan(..)) instead
of log(sigmoid(..)) and will not apply the wanted
optimization. We can not go and replace the sigmoid in scan
with something else also, because this only needs to be done
on the last step. Therefore the easiest and more efficient way
is to get also the pre-sigmoid activation as an output of
scan, and apply both the log and sigmoid outside scan such
that Theano can catch and optimize the expression.
"""
cross_entropy = nnet.binary_crossentropy(
nnet.sigmoid(pre_sigmoid_nv),self.input).sum(axis=1).mean()
return cross_entropy
def training(self, train_set_x, validation_set_x,
training_epochs, batch_size=10,
learning_rate=0.1, k=1,
initial_momentum = 0.0, final_momentum = 0.0,
weightcost = 0.0,
persistent = True,
display_fn=None, graph_output=False):
if persistent:
# initialize storage for the persistent chain (state = hidden
# layer of chain)
persistent_chain = theano.shared(numpy.zeros((batch_size, self.n_hidden),
dtype=theano.config.floatX),
borrow=True)
else:
persistent_chain = None
# get the cost and the gradient corresponding to one step of CD-15
cost, updates = self.get_cost_updates(lr=learning_rate,
k=k,
weightcost=weightcost,
batch_size=batch_size,
persistent=persistent_chain
)
self.learn_model(train_set_x=train_set_x,
validation_set_x=validation_set_x,
training_epochs=training_epochs,
batch_size=batch_size,
initial_momentum=initial_momentum,
final_momentum=final_momentum,
cost=cost,
updates=updates,
display_fn=display_fn,
graph_output=graph_output)
def learn_model(self, train_set_x, validation_set_x,
training_epochs, batch_size,
initial_momentum, final_momentum,
cost, updates,
display_fn, graph_output):
# allocate symbolic variables for the data
indexes = tensor.vector('indexes', dtype='int32') # index to a [mini]batch
momentum = tensor.scalar('momentum', dtype=theano.config.floatX)
# it is ok for a theano function to have no output
# the purpose of train_rbm is solely to update the RBM parameters
train_rbm = theano.function(
[indexes, momentum],
cost,
updates=updates,
givens={
self.input: train_set_x[indexes],
self.momentum: momentum
},
name='train_rbm'
# TODO: NanGuardMode should be selected with a flag
# ,mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True)
)
train_sample = tensor.matrix('train_smaple', dtype=theano.config.floatX)
validation_sample = tensor.matrix('validation_smaple', dtype=theano.config.floatX)
feg = self.free_energy_gap(train_sample, validation_sample)
feg_rbm = theano.function(
[indexes],
outputs=feg,
givens={
train_sample: train_set_x[indexes],
validation_sample: validation_set_x
}
)
if graph_output:
v_sample = tensor.matrix('v_sample', dtype=theano.config.floatX)
h_out = self.sample_h_given_v(v_sample)
get_output = theano.function(
[v_sample],
outputs=h_out,
)
# compute number of minibatches for training, validation and testing
n_train_data = train_set_x.get_value(borrow=True).shape[0]
plotting_time = 0.
if graph_output:
fig = plt.figure(1)
plt.ion()
start_time = timeit.default_timer()
# go through training epochs
momentum = initial_momentum
for epoch in range(training_epochs):
if epoch == 5:
momentum = final_momentum
_, minibatches = get_minibatches_idx(n_train_data,
batch_size,
self.numpy_rng)
# go through the training set
mean_cost = []
for batch_indexes in minibatches:
mean_cost += [train_rbm(batch_indexes, momentum)]
feg = feg_rbm(range(validation_set_x.get_value(borrow=True).shape[0]))
print('Training epoch %d, cost is ' % epoch, numpy.mean(mean_cost))
print('Free energy gap is ', feg)
# Plot filters after each training epoch
plotting_start = timeit.default_timer()
if display_fn is not None:
# Construct image from the weight matrix
Wimg = display_fn(self.W.get_value(borrow=True), self.n_hidden)
scipy.misc.imsave('filters_at_epoch_%i.png' % epoch, Wimg)
if graph_output:
validation_output = get_output(validation_set_x.get_value(borrow=True))
plt.clf()
plt.subplot(2, 1, 1)
plt.imshow(validation_output[1])
training_output = get_output(train_set_x.get_value(borrow=True))
plt.subplot(2, 1, 2)
plt.imshow(training_output[1][range(validation_set_x.get_value(borrow=True).shape[0])])
plt.draw()
plt.pause(0.05)
plotting_stop = timeit.default_timer()
plotting_time += (plotting_stop - plotting_start)
end_time = timeit.default_timer()
pretraining_time = (end_time - start_time) - plotting_time
print ('Training took %f minutes' % (pretraining_time / 60.))
if graph_output:
plt.close(fig)
def sampling(self, n_samples, persistent_vis_chain):
"""
Sampling from the RBM.
:param n_samples:
:param persistent_vis_chain
:return:
"""
if self.r_sample is None:
self.r_sample = theano.shared(value=numpy.ones((persistent_vis_chain.get_value().shape[0],
self.n_hidden),
dtype=theano.config.floatX),
name='r_sample',
borrow=True)
plot_every = 5
# define one step of Gibbs sampling define a
# function that does `plot_every` steps before returning the
# sample for plotting
(
[
presig_hids,
hid_mfs,
hid_samples,
presig_vis,
vis_mfs,
vis_samples
],
updates
) = theano.scan(
self.gibbs_vhv,
outputs_info=[None, None, None, None, None, persistent_vis_chain],
n_steps=plot_every,
name="gibbs_vhv"
)
# add to updates the shared variable that takes care of our persistent
# chain :.
updates.update({persistent_vis_chain: vis_samples[-1]})
# construct the function that implements our persistent chain.
# we generate the "mean field" activations for plotting and the actual
# samples for reinitializing the state of our persistent chain
sample_fn = theano.function(
[],
[
vis_mfs[-1],
vis_samples[-1]
],
updates=updates,
name='sample_fn'
)
samples = []
for idx in range(n_samples):
# generate `plot_every` intermediate samples that we discard,
# because successive samples in the chain are too correlated
print(' ... computing sample %d' % idx)
vis_mf, vis_sample = sample_fn()
samples.append(vis_mf)
return samples
def reverse_sampling(self, n_samples, persistent_hid_chain, gibbs_steps = 50):
"""
Sampling from the RBM.
:param n_samples:
:param persistent_hid_chain:
:param gibbs_steps:
:return:
"""
if self.r_sample is None:
self.r_sample = theano.shared(value=numpy.ones((persistent_hid_chain.get_value().shape[0],
self.n_hidden),
dtype=theano.config.floatX),
name='r_sample',
borrow=True)
# define one step of Gibbs sampling define a
# function that does `plot_every` steps before returning the
# sample for plotting
(
[
presig_vis,
vis_mfs,
vis_samples,
presig_hids,
hid_mfs,
hid_samples
],
updates
) = theano.scan(
self.gibbs_hvh,
outputs_info=[None, None, None, None, None, persistent_hid_chain],
n_steps=gibbs_steps,
name="gibbs_hvh"
)
# add to updates the shared variable that takes care of our persistent
# chain :.
updates.update({persistent_hid_chain: hid_samples[-1]})
# construct the function that implements our persistent chain.
# we generate the "mean field" activations for plotting and the actual
# samples for reinitializing the state of our persistent chain
sample_fn = theano.function(
[],
[
vis_mfs[-1],
vis_samples[-1]
],
updates=updates,
name='sample_fn'
)
samples = []
for idx in range(n_samples):
# generate `plot_every` intermediate samples that we discard,
# because successive samples in the chain are too correlated
# print(' ... computing sample %d' % idx)
vis_mf, vis_sample = sample_fn()
samples.append(vis_sample)
return samples
class GRBM(RBM):
# Implement a Gaussian-Bernoulli Restricted Boltzmann Machine
def __init__(self,
name="",
input=None,
n_visible=784,
n_hidden=500,
W=None,
hbias=None,
vbias=None,
p=1.0,
numpy_rng=None,
theano_rng=None,
error_free=True):
super(GRBM, self).__init__(name, input, n_visible, n_hidden,
W, hbias, vbias, p, numpy_rng, theano_rng)
self.error_free = error_free
def sample_v_given_h(self, h0_sample):
''' This function infers state of visible units given hidden units '''
# compute the activation of the visible given the hidden sample
v1_mean = tensor.dot(h0_sample, self.Wt) + self.vbias
if self.error_free:
v1_sample = v1_mean
else:
# get a sample of the visible given their activation
v1_sample = v1_mean + self.theano_rng.normal(size=v1_mean.shape,
avg=0, std=1.0,
dtype=theano.config.floatX)
return [v1_mean, v1_mean, v1_sample]
def gibbs_hvh(self, h0_sample):
''' This function implements one step of Gibbs sampling,
starting from the hidden state.
For Gaussian Bernoulli we uses a mean field approximation
of the intermediate visible state.
'''
pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h0_sample)
pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v1_mean)
return [pre_sigmoid_v1, v1_mean, v1_sample,
pre_sigmoid_h1, h1_mean, h1_sample]
def gibbs_vhv(self, v0_sample):
''' This function implements one step of Gibbs sampling,
starting from the visible state.
For Gaussian Bernoulli we uses a mean field approximation
of the intermediate hidden state.
'''
pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v0_sample)
pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h1_mean)
return [pre_sigmoid_h1, h1_mean, h1_sample,
pre_sigmoid_v1, v1_mean, v1_sample]
def free_energy(self, v_sample):
wx_b = tensor.dot(v_sample, self.W) + self.hbias
vbias_term = 0.5*tensor.sqr(v_sample - self.vbias).sum(axis=1)
hidden_term = nnet.softplus(wx_b).sum(axis=1)
return -hidden_term + vbias_term
def get_cost_updates(self,
lr,
weightcost,
k=1,
lambdas= [0.0, 0.0],
batch_size=None,
persistent=None,
automated_grad=False
):
"""This functions implements one step of CD-k or PCD-k
:param lr: learning rate used to train the RBM
:param k: number of Gibbs steps to do in CD-k/PCD-k
:param lambdas: parameters for tuning weigths updates in CD-k/PCD-k
of Bernoullian RBM
:param weightcost: L1 weight-decay (see Hinton 2010
"A Practical Guide to Training Restricted Boltzmann
Machines" section 10)
:param batch_size: size of the batch of samples used for training
:param persistent: None for CD. For PCD, shared variable
containing archived state of Gibbs chain. This must be a shared
variable of size (batch size, number of hidden units).
:param symbolic_grad: True if Theano automated gradient is
used instead of CD. Default is False.
:return: Returns a proxy for the cost and the updates dictionary. The
dictionary contains the update rules for weights and biases but
also an update of the shared variable used to store the persistent
chain, if one is used.
"""
self.Wt = self.W.T
# Dropout
self.r_sample = self.theano_rng.binomial(size=(batch_size,self.n_hidden),
n=1, p=self.p,
dtype=theano.config.floatX)
# compute values for the positive phase
pre_sigmoid_ph, ph_mean, ph_sample = self.sample_h_given_v(self.input)
# decide how to initialize persistent chain:
# for CD, we use the newly generate hidden sample
# for PCD, we initialize from the archived state of the chain
if persistent is None:
chain_start = ph_mean
else:
chain_start = persistent
# perform actual negative phase
# in order to implement CD-k/PCD-k we need to scan over the
# function that implements one gibbs step k times.
# Read Theano tutorial on scan for more information :
# http://deeplearning.net/software/theano/library/scan.html
# the scan will return the entire Gibbs chain
(
[
pre_sigmoid_nvs,
nv_means,
nv_samples,
pre_sigmoid_nhs,
nh_means,
nh_samples
],
updates
) = theano.scan(
self.gibbs_hvh,
# the None are place holders, saying that
# chain_start is the initial state corresponding to the
# 6th output
outputs_info=[None, None, None, None, None, chain_start],
n_steps=k,
name="gibbs_hvh"
)
# determine gradients on RBM parameters
# note that we only need the sample at the end of the chain
chain_end = nv_samples[-1]
if automated_grad:
gradients = self.compute_symbolic_grad(chain_end)
else:
gradients = self.compute_rbm_grad(ph_mean, nh_means[-1], nv_means[-1],
batch_size, weightcost)
epsilon = 0.00001
# ISSUE: it returns Inf when Wij is small
gradients[0] = gradients[0] / tensor.cast(1.0 + 2.0 * lr * lambdas[0] / (tensor.abs_(self.W)+epsilon),
dtype=theano.config.floatX)
# constructs the update dictionary
multipliers = [
# Issue: it returns Inf when Wij is small, therefore a small constant is added
(1.0 - 2.0 * lr * lambdas[1]) / (1.0 + 2.0 * lr * lambdas[0] / (tensor.abs_(self.W) + epsilon)),
1.0,
1.0]
for gradient, param, multiplier in zip(gradients, self.params, multipliers):
# make sure that the learning rate is of the right dtype
updates[param] = param * tensor.cast(multiplier, dtype=theano.config.floatX) + \
gradient * tensor.cast(lr, dtype=theano.config.floatX)
if persistent:
# Note that this works only if persistent is a shared variable
updates[persistent] = nh_means[-1]
# pseudo-likelihood is a better proxy for PCD
monitoring_cost = self.get_pseudo_likelihood_cost(updates)
else:
# reconstruction cross-entropy is a better proxy for CD
monitoring_cost = self.get_reconstruction_cost(pre_sigmoid_nvs[-1])
return monitoring_cost, updates
def get_reconstruction_cost(self, pre_sigmoid_nv):
""" Compute mean squared error between reconstructed data and input data.
Mean over the samples and features.
"""
error = tensor.sqr(nnet.sigmoid(pre_sigmoid_nv) - self.input).mean()
return error
def training(self, train_set_x, validation_set_x,
training_epochs, batch_size=10,
learning_rate=0.01, k=1,
initial_momentum = 0.0, final_momentum = 0.0,
weightcost = 0.0,
lambdas = [0.0, 0.1],
persistent = False,
display_fn=None, graph_output=False):
cost, updates = self.get_cost_updates(lr=learning_rate,
k=k,
lambdas=lambdas,
weightcost=weightcost,
batch_size=batch_size
)
self.learn_model(train_set_x=train_set_x,
validation_set_x=validation_set_x,
training_epochs=training_epochs,
batch_size=batch_size,
initial_momentum=initial_momentum,
final_momentum=final_momentum,
cost=cost,
updates=updates,
display_fn=display_fn,
graph_output=graph_output)
def test(class_to_test=RBM,
learning_rate=0.1,
training_epochs=15,
batch_size=20,
n_chains=20,
n_samples=10,
output_folder='rbm_plots',
n_hidden=500):
"""
Demonstrate how to train and afterwards sample from it using Theano.
This is demonstrated on MNIST.
:param learning_rate: learning rate used for training the RBM
:param training_epochs: number of epochs used for training
:param datafile: path to the dataset
:param batch_size: size of a batch used to train the RBM
:param n_chains: number of parallel Gibbs chains to be used for sampling
:param n_samples: number of samples to plot for each chain
"""
# Load the data
mnist = MNIST()
raw_dataset = mnist.images
n_data = raw_dataset.shape[0]
if class_to_test == GRBM:
dataset = mnist.normalize(raw_dataset)
# Gaussian RBM needs a lower learning rate. See Hinton'10
learning_rate = learning_rate / 10
else:
dataset = raw_dataset/255
validation_set_size = 60
train_set_x = theano.shared(dataset[0:int(n_data*5/6)-validation_set_size], borrow=True)
validation_set_x = theano.shared(dataset[int(n_data*5/6)-validation_set_size:int(n_data*5/6)])
test_set_x = theano.shared(dataset[int(n_data*5/6):n_data], borrow=True)
# find out the number of test samples
number_of_test_samples = test_set_x.get_value(borrow=True).shape[0]
print('Number of test samples %d' % number_of_test_samples)
x = tensor.matrix('x') # the data is presented as rasterized images
rng = numpy.random.RandomState(123)
theano_rng = RandomStreams(rng.randint(2 ** 30))
#################################
# Training the RBM #
#################################
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
root_dir = os.getcwd()
os.chdir(output_folder)
# construct the RBM class
rbm = class_to_test(input=x, n_visible=mnist.sizeX * mnist.sizeY,
n_hidden=n_hidden, numpy_rng=rng, theano_rng=theano_rng)
rbm.training(train_set_x=train_set_x,
validation_set_x=validation_set_x,
training_epochs=training_epochs,
batch_size=batch_size,
learning_rate=learning_rate,
initial_momentum=0.6, final_momentum=0.9,
weightcost=0.0002,
display_fn=mnist.display_weigths,
graph_output=True)
# pick random test examples, with which to initialize the persistent chain
test_idx = rng.randint(number_of_test_samples - n_chains)
persistent_vis_chain = theano.shared(
numpy.asarray(
test_set_x.get_value(borrow=True)[test_idx:test_idx + n_chains],
dtype=theano.config.floatX
)
)
samples = rbm.sampling(n_samples, persistent_vis_chain)
# construct image
Y = mnist.display_samples(samples)
scipy.misc.imsave('samples.png', Y)
os.chdir(root_dir)
if __name__ == '__main__':
test(class_to_test=RBM, training_epochs=8)
|
glgerard/cloud-mdbn
|
src/rbm.py
|
Python
|
apache-2.0
| 43,150
|
[
"Gaussian"
] |
44fd1a81ddf7d3963a738fba5ea0aa42389b7eb66408ed89af3e4f875f20de3f
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
from django.db.models import Q
from django.db.models.signals import m2m_changed
from django.dispatch import receiver
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from djblets.auth.signals import user_registered
from djblets.cache.backend import cache_memoize
from djblets.db.fields import CounterField, JSONField
from djblets.forms.fields import TIMEZONE_CHOICES
from djblets.siteconfig.models import SiteConfiguration
from reviewboard.accounts.managers import (ProfileManager,
ReviewRequestVisitManager,
TrophyManager)
from reviewboard.accounts.trophies import trophies_registry
from reviewboard.admin.read_only import is_site_read_only_for
from reviewboard.avatars import avatar_services
from reviewboard.reviews.models import Group, ReviewRequest
from reviewboard.reviews.signals import (reply_published,
review_published,
review_request_published)
from reviewboard.site.models import LocalSite
from reviewboard.site.signals import local_site_user_added
@python_2_unicode_compatible
class ReviewRequestVisit(models.Model):
"""
A recording of the last time a review request was visited by a user.
Users have one ReviewRequestVisit entry in the database per review
request they've visited. This is used to keep track of any updates
to review requests they've already seen, so that we can intelligently
inform them that new discussions have taken place.
"""
VISIBLE = 'V'
ARCHIVED = 'A'
MUTED = 'M'
VISIBILITY = (
(VISIBLE, 'Visible'),
(ARCHIVED, 'Archived'),
(MUTED, 'Muted'),
)
user = models.ForeignKey(User, related_name='review_request_visits')
review_request = models.ForeignKey(ReviewRequest, related_name='visits')
timestamp = models.DateTimeField(_('last visited'), default=timezone.now)
visibility = models.CharField(max_length=1, choices=VISIBILITY,
default=VISIBLE)
# Set this up with a ReviewRequestVisitManager, which inherits from
# ConcurrencyManager to help prevent race conditions.
objects = ReviewRequestVisitManager()
def __str__(self):
"""Return a string used for the admin site listing."""
return 'Review request visit'
class Meta:
db_table = 'accounts_reviewrequestvisit'
unique_together = ('user', 'review_request')
index_together = [('user', 'visibility')]
verbose_name = _('Review Request Visit')
verbose_name_plural = _('Review Request Visits')
@python_2_unicode_compatible
class Profile(models.Model):
"""User profile which contains some basic configurable settings."""
user = models.ForeignKey(User, unique=True)
# This will redirect new users to the account settings page the first time
# they log in (or immediately after creating an account). This allows
# people to fix their real name and join groups.
first_time_setup_done = models.BooleanField(
default=False,
verbose_name=_("first time setup done"),
help_text=_("Indicates whether the user has already gone through "
"the first time setup process by saving their user "
"preferences."))
# Whether the user wants to receive emails
should_send_email = models.BooleanField(
default=True,
verbose_name=_("send email"),
help_text=_("Indicates whether the user wishes to receive emails."))
should_send_own_updates = models.BooleanField(
default=True,
verbose_name=_("receive emails about own actions"),
help_text=_("Indicates whether the user wishes to receive emails "
"about their own activity."))
collapsed_diffs = models.BooleanField(
default=True,
verbose_name=_("collapsed diffs"),
help_text=_("Indicates whether diffs should be shown in their "
"collapsed state by default."))
wordwrapped_diffs = models.BooleanField(
default=True,
help_text=_("This field is unused and will be removed in a future "
"version."))
syntax_highlighting = models.BooleanField(
default=True,
verbose_name=_("syntax highlighting"),
help_text=_("Indicates whether the user wishes to see "
"syntax highlighting in the diffs."))
is_private = models.BooleanField(
default=False,
verbose_name=_("profile private"),
help_text=_("Indicates whether the user wishes to keep his/her "
"profile private."))
open_an_issue = models.BooleanField(
default=True,
verbose_name=_("opens an issue"),
help_text=_("Indicates whether the user wishes to default "
"to opening an issue or not."))
default_use_rich_text = models.NullBooleanField(
default=None,
verbose_name=_('enable Markdown by default'),
help_text=_('Indicates whether new posts or comments should default '
'to being in Markdown format.'))
# Indicate whether closed review requests should appear in the
# review request lists (excluding the dashboard).
show_closed = models.BooleanField(default=True)
sort_review_request_columns = models.CharField(max_length=256, blank=True)
sort_dashboard_columns = models.CharField(max_length=256, blank=True)
sort_submitter_columns = models.CharField(max_length=256, blank=True)
sort_group_columns = models.CharField(max_length=256, blank=True)
review_request_columns = models.CharField(max_length=256, blank=True)
dashboard_columns = models.CharField(max_length=256, blank=True)
submitter_columns = models.CharField(max_length=256, blank=True)
group_columns = models.CharField(max_length=256, blank=True)
# A list of starred review requests. This allows users to monitor a
# review request and receive e-mails on updates without actually being
# on the reviewer list or commenting on the review. This is similar to
# adding yourself to a CC list.
starred_review_requests = models.ManyToManyField(ReviewRequest, blank=True,
related_name="starred_by")
# A list of watched groups. This is so that users can monitor groups
# without actually joining them, preventing e-mails being sent to the
# user and review requests from entering the Incoming Reviews list.
starred_groups = models.ManyToManyField(Group, blank=True,
related_name="starred_by")
# Allows per-user timezone settings
timezone = models.CharField(choices=TIMEZONE_CHOICES, default='UTC',
max_length=30)
settings = JSONField(null=True, default=dict)
extra_data = JSONField(null=True, default=dict)
objects = ProfileManager()
@property
def should_use_rich_text(self):
"""Get whether rich text should be used by default for this user.
If the user has chosen whether or not to use rich text explicitly,
then that choice will be respected. Otherwise, the system default is
used.
"""
if self.default_use_rich_text is None:
siteconfig = SiteConfiguration.objects.get_current()
return siteconfig.get('default_use_rich_text')
else:
return self.default_use_rich_text
@property
def should_enable_desktop_notifications(self):
"""Return whether desktop notifications should be used for this user.
If the user has chosen whether or not to use desktop notifications
explicitly, then that choice will be respected. Otherwise, we
enable desktop notifications by default.
Returns:
bool:
If the user has set whether they wish to recieve desktop
notifications, then use their preference. Otherwise, we return
``True``.
"""
return (not self.settings or
self.settings.get('enable_desktop_notifications', True))
def star_review_request(self, review_request):
"""Mark a review request as starred.
This will mark a review request as starred for this user and
immediately save to the database.
"""
self.starred_review_requests.add(review_request)
if (review_request.public and
review_request.status in (ReviewRequest.PENDING_REVIEW,
ReviewRequest.SUBMITTED)):
site_profile = \
self.user.get_site_profile(review_request.local_site)
site_profile.increment_starred_public_request_count()
def unstar_review_request(self, review_request):
"""Mark a review request as unstarred.
This will mark a review request as starred for this user and
immediately save to the database.
"""
self.starred_review_requests.remove(review_request)
if (review_request.public and
review_request.status in (ReviewRequest.PENDING_REVIEW,
ReviewRequest.SUBMITTED)):
site_profile = \
self.user.get_site_profile(review_request.local_site)
site_profile.decrement_starred_public_request_count()
def star_review_group(self, review_group):
"""Mark a review group as starred.
This will mark a review group as starred for this user and
immediately save to the database.
"""
self.starred_groups.add(review_group)
def unstar_review_group(self, review_group):
"""Mark a review group as unstarred.
This will mark a review group as starred for this user and
immediately save to the database.
"""
self.starred_groups.remove(review_group)
def __str__(self):
"""Return a string used for the admin site listing."""
return self.user.username
@property
def avatar_service(self):
"""The avatar service the user has selected.
Returns:
djblets.avatars.services.base.AvatarService:
The avatar service.
"""
service_id = self.settings.get('avatars', {}).get('avatar_service_id')
return avatar_services.get_or_default(service_id)
@avatar_service.setter
def avatar_service(self, service):
"""Set the avatar service.
Args:
service (djblets.avatars.services.base.AvatarService):
The avatar service.
"""
self.settings.setdefault('avatars', {})['avatar_service_id'] = \
service.avatar_service_id
def get_display_name(self, viewing_user):
"""Return the name to display to the given user.
If any of the following is True and the user this profile belongs to
has a full name set, the display name will be the the user's full name:
* The viewing user is authenticated and this profile is public.
* The viewing user is the user this profile belongs to.
* The viewing user is an administrator.
* The viewing user is a LocalSite administrator on any LocalSite for
which the user whose this profile belongs to is a user.
Otherwise the display name will be the user's username.
Args:
viewing_user (django.contrib.auth.models.User):
The user who is viewing the profile.
Returns:
unicode:
The name to display.
"""
if (viewing_user is not None and
viewing_user.is_authenticated() and
(not self.is_private or
viewing_user.pk == self.user_id or
viewing_user.is_admin_for_user(self.user))):
return self.user.get_full_name() or self.user.username
else:
return self.user.username
def save(self, *args, **kwargs):
"""Save the profile to the database.
The profile will only be saved if the user is not affected by read-only
mode.
Args:
*args (tuple):
Positional arguments to pass through to the superclass.
**kwargs (dict):
Keyword arguments to pass through to the superclass.
"""
if not is_site_read_only_for(self.user):
super(Profile, self).save(*args, **kwargs)
class Meta:
db_table = 'accounts_profile'
verbose_name = _('Profile')
verbose_name_plural = _('Profiles')
@python_2_unicode_compatible
class LocalSiteProfile(models.Model):
"""User profile information specific to a LocalSite."""
user = models.ForeignKey(User, related_name='site_profiles')
profile = models.ForeignKey(Profile, related_name='site_profiles')
local_site = models.ForeignKey(LocalSite, null=True, blank=True,
related_name='site_profiles')
# A dictionary of permission that the user has granted. Any permission
# missing is considered to be False.
permissions = JSONField(null=True)
# Counts for quickly knowing how many review requests are incoming
# (both directly and total), outgoing (pending and total ever made),
# and starred (public).
direct_incoming_request_count = CounterField(
_('direct incoming review request count'),
initializer=lambda p: (
ReviewRequest.objects.to_user_directly(
p.user, local_site=p.local_site).count()
if p.user_id else 0))
total_incoming_request_count = CounterField(
_('total incoming review request count'),
initializer=lambda p: (
ReviewRequest.objects.to_user(
p.user, local_site=p.local_site).count()
if p.user_id else 0))
pending_outgoing_request_count = CounterField(
_('pending outgoing review request count'),
initializer=lambda p: (
ReviewRequest.objects.from_user(
p.user, p.user, local_site=p.local_site).count()
if p.user_id else 0))
total_outgoing_request_count = CounterField(
_('total outgoing review request count'),
initializer=lambda p: (
ReviewRequest.objects.from_user(
p.user, p.user, None, local_site=p.local_site).count()
if p.user_id else 0))
starred_public_request_count = CounterField(
_('starred public review request count'),
initializer=lambda p: (
p.profile.starred_review_requests.public(
user=None, local_site=p.local_site).count()
if p.pk else 0))
def __str__(self):
"""Return a string used for the admin site listing."""
return '%s (%s)' % (self.user.username, self.local_site)
class Meta:
db_table = 'accounts_localsiteprofile'
unique_together = (('user', 'local_site'),
('profile', 'local_site'))
verbose_name = _('Local Site Profile')
verbose_name_plural = _('Local Site Profiles')
class Trophy(models.Model):
"""A trophy represents an achievement given to the user.
It is associated with a ReviewRequest and a User and can be associated
with a LocalSite.
"""
category = models.CharField(max_length=100)
received_date = models.DateTimeField(default=timezone.now)
review_request = models.ForeignKey(ReviewRequest, related_name="trophies")
local_site = models.ForeignKey(LocalSite, null=True,
related_name="trophies")
user = models.ForeignKey(User, related_name="trophies")
objects = TrophyManager()
@cached_property
def trophy_type(self):
"""The TrophyType instance for this trophy."""
return trophies_registry.get_for_category(self.category)
def get_display_text(self):
"""Get the display text for this trophy."""
return self.trophy_type.get_display_text(self)
class Meta:
db_table = 'accounts_trophy'
verbose_name = _('Trophy')
verbose_name_plural = _('Trophies')
#
# The following functions are patched onto the User model.
#
def _is_user_profile_visible(self, user=None):
"""Return whether or not the given user can view this user's profile.
Profiles are hidden from unauthenticated users. For authenticated users, a
profile is visible if one of the following is true:
* The profile is not marked as private.
* The viewing user owns the profile.
* The viewing user is a staff member.
* The viewing user is an administrator on a Local Site which the viewed
user is a member.
Args:
user (django.contrib.auth.models.User, optional):
The user for which visibility to the profile is to be determined.
Returns:
bool:
Whether or not the given user can view the profile.
"""
if user is None or user.is_anonymous():
return False
if hasattr(self, 'is_private'):
# This is an optimization used by the web API. It will set
# is_private on this User instance through a query, saving a
# lookup for each instance.
#
# This must be done because select_related() and
# prefetch_related() won't cache reverse foreign key relations.
is_private = self.is_private
else:
is_private = self.get_profile().is_private
return (not is_private or
user == self or
user.is_admin_for_user(self))
def _should_send_email(self):
"""Get whether a user wants to receive emails.
This is patched into the user object to make it easier to deal with missing
Profile objects.
"""
return self.get_profile().should_send_email
def _should_send_own_updates(self):
"""Get whether a user wants to receive emails about their activity.
This is patched into the user object to make it easier to deal with missing
Profile objects.
"""
return self.get_profile().should_send_own_updates
def _get_profile(self, cached_only=False, create_if_missing=True,
return_is_new=False):
"""Return the profile for the User.
The profile will be cached, preventing queries for future lookups.
If a profile doesn't exist in the database, and a cached-only copy
isn't being returned, then a profile will be created in the database.
Version Changed:
3.0.12:
Added support for ``create_if_missing`` and ``return_is_new``
arguments.
Args:
cached_only (bool, optional):
Whether we should only return the profile cached for the user.
If True, this function will not retrieve an uncached profile or
create one that doesn't exist. Instead, it will return ``None``.
create_if_missing (bool, optional):
Whether to create a site profile if one doesn't already exist.
return_is_new (bool, optional);
If ``True``, the result of the call will be a tuple containing
the profile and a boolean indicating if the profile was
newly-created.
Returns:
Profile or tuple.
The user's profile.
If ``return_is_new`` is ``True``, then this will instead return
``(Profile, is_new)``.
Raises:
Profile.DoesNotExist:
The profile did not exist. This can only be raised if passing
``create_if_missing=False``.
"""
# Note that we use the same cache variable that a select_related() call
# would use, ensuring that we benefit from Django's caching when possible.
profile = getattr(self, '_profile_set_cache', None)
is_new = False
if profile is None and not cached_only:
if create_if_missing:
profile, is_new = Profile.objects.get_or_create(user=self)
else:
# This may raise Profile.DoesNotExist.
profile = Profile.objects.get(user=self)
profile.user = self
self._profile_set_cache = profile
# While modern versions of Review Board set this to an empty dictionary,
# old versions would initialize this to None. Since we don't want to litter
# our code with extra None checks everywhere we use it, normalize it here.
if profile is not None and profile.extra_data is None:
profile.extra_data = {}
if return_is_new:
return profile, is_new
return profile
def _get_site_profile(self, local_site, cached_only=False,
create_if_missing=True, return_is_new=False):
"""Return the LocalSiteProfile for a given LocalSite for the User.
The site profile will be cached, preventing queries for future lookups.
If a site profile doesn't exist in the database, and a cached-only copy
isn't being returned, then a profile will be created in the database,
unless passing ``create_if_missing=False``.
Version Changed:
3.0.12:
* In previous versions, this would not create a site profile if one
didn't already exist. Now it does, unless passing
``create_if_missing=False``. This change was made to standardize
behavior between this and :py:meth:`User.get_profile`.
* Added support for ``cached_only``, ``create_if_missing`` and
``return_is_new`` arguments.
Args:
local_site (reviewboard.site.models.LocalSite):
The LocalSite to return a profile for. This is allowed to be
``None``, which means the profile applies to their global site
account.
cached_only (bool, optional):
Whether we should only return the profile cached for the user.
If True, this function will not retrieve an uncached profile or
create one that doesn't exist. Instead, it will return ``None``.
create_if_missing (bool, optional):
Whether to create a site profile if one doesn't already exist.
return_is_new (bool, optional);
If ``True``, the result of the call will be a tuple containing
the profile and a boolean indicating if the profile was
newly-created.
Returns:
LocalSiteProfile or tuple:
The user's LocalSite profile.
If ``return_is_new`` is ``True``, then this will instead return
``(LocalSiteProfile, is_new)``.
Raises:
LocalSiteProfile.DoesNotExist:
The profile did not exist. This can only be raised if passing
``create_if_missing=False``.
"""
if not hasattr(self, '_site_profiles'):
self._site_profiles = {}
if local_site is None:
local_site_id = None
else:
local_site_id = local_site.pk
is_new = False
site_profile = self._site_profiles.get(local_site_id)
if site_profile is None and not cached_only:
profile = self.get_profile()
if create_if_missing:
site_profile, is_new = LocalSiteProfile.objects.get_or_create(
user=self,
profile=profile,
local_site=local_site)
else:
# This may raise LocalSiteProfile.DoesNotExist.
site_profile = LocalSiteProfile.objects.get(
user=self,
profile=profile,
local_site=local_site)
# Set these directly in order to avoid further lookups.
site_profile.user = self
site_profile.profile = profile
site_profile.local_site = local_site
self._site_profiles[local_site_id] = site_profile
if return_is_new:
return site_profile, is_new
return site_profile
def _is_admin_for_user(self, user):
"""Return whether or not this user is an administrator for the given user.
Results will be cached for this user so that at most one query is done.
Args:
user (django.contrib.auth.models.User):
The user to check.
Returns:
bool:
Whether or not this user is an administrator for the given user.
"""
if self.is_staff:
return True
if not user or user.is_anonymous():
return False
if not hasattr(self, '_cached_admin_for_users'):
self._cached_admin_for_users = cache_memoize(
'%s-admin-for-users' % self.pk,
lambda: tuple(
User.objects
.filter(local_site__admins=self)
.values_list('pk', flat=True)
))
return user.pk in self._cached_admin_for_users
User.is_profile_visible = _is_user_profile_visible
User.get_profile = _get_profile
User.get_site_profile = _get_site_profile
User.should_send_email = _should_send_email
User.should_send_own_updates = _should_send_own_updates
User.is_admin_for_user = _is_admin_for_user
User._meta.ordering = ('username',)
@receiver(review_request_published)
def _call_compute_trophies(sender, review_request, **kwargs):
if review_request.public and not review_request.changedescs.exists():
Trophy.objects.compute_trophies(review_request)
@receiver(review_request_published)
def _call_unarchive_all_for_review_request(sender, review_request, **kwargs):
ReviewRequestVisit.objects.unarchive_all(review_request)
@receiver(review_published)
def _call_unarchive_all_for_review(sender, review, **kwargs):
ReviewRequestVisit.objects.unarchive_all(review.review_request_id)
@receiver(reply_published)
def _call_unarchive_all_for_reply(sender, reply, **kwargs):
ReviewRequestVisit.objects.unarchive_all(reply.review_request_id)
@receiver(user_registered)
@receiver(local_site_user_added)
def _add_default_groups(sender, user, local_site=None, **kwargs):
"""Add user to default groups.
When a user is registered, add the user to global default groups.
When a user is added to a LocalSite, add the user to default groups of the
LocalSite.
"""
if local_site:
default_groups = local_site.groups.filter(is_default_group=True)
else:
default_groups = Group.objects.filter(is_default_group=True,
local_site=None)
for default_group in default_groups:
default_group.users.add(user)
@receiver(m2m_changed, sender=Group.users.through)
def _on_group_user_membership_changed(instance, action, pk_set, reverse,
**kwargs):
"""Handler for when a review group's membership has changed.
When a user is added to or removed from a review group, their
:py:attr:`~LocalSiteProfile.total_incoming_request_count` counter will
be cleared, forcing it to be recomputed on next access. This ensures that
their incoming count will be correct when group memberships change.
Args:
instance (django.db.models.Model):
The instance that was updated. If ``reverse`` is ``True``, then
this will be a :py:class:`~django.contrib.auth.models.User`.
Otherwise, it will be ignored.
action (unicode):
The membership change action. The incoming count is only cleared
if this ``post_add``, ``post_remove``, or ``pre_clear``.
pk_set (set of int):
The user IDs added to the group. If ``reverse`` is ``True``,
then this is ignored in favor of ``instance``.
reverse (bool):
Whether this signal is emitted when adding through the forward
relation (``True`` -- :py:attr:`Group.users
<reviewboard.reviews.models.group.Group.users>`) or the reverse
relation (``False`` -- ``User.review_groups``).
**kwargs (dict):
Additional keyword arguments passed to the signal.
"""
if action in ('post_add', 'post_remove', 'pre_clear'):
q = None
if reverse:
if instance is not None:
q = Q(user=instance)
else:
if pk_set:
q = Q(user__in=pk_set)
if q is not None:
LocalSiteProfile.objects.filter(q).update(
total_incoming_request_count=None)
|
chipx86/reviewboard
|
reviewboard/accounts/models.py
|
Python
|
mit
| 28,471
|
[
"VisIt"
] |
5b5774049b60f103f8102960f7596a04c639063c5ee3866a2505efb60f4fa419
|
# This example illustrates how to measure the I-F curve of a neuron.
# The program creates a small group of neurons and injects a noisy current
# I(t)= I_mean + I_std*W(t)
# where W(t) is a white noise process.
# The programm systematically drives the current through a series of values on the
# two-dimensional I_mean/I_std space and measures the firing rate to the neurons.
#
# In this example, we measure the I-F curve of the adaptive exponential integrate
# and fire neuron (aeif_cond_exp), but any other neuron model that accepts current
# inputs is possible.
# the model and its parameters are supplied when the IF_curve object is created.
import numpy
import cynest as nest
import shelve
model='aeif_cond_exp'
params={'a': 4.0,
'b': 80.8,
'V_th': -50.4,
'Delta_T': 2.0,
'I_e': 0.0,
'C_m': 281.0,
'g_L': 30.0,
'V_reset': -70.6,
'tau_w': 144.0,
't_ref': 5.0,
'V_peak': -40.0,
'E_L': -70.6,
'E_ex': 0.,
'E_in': -70.}
class IF_curve():
"""
This example illustrates how to measure the I-F curve of a neuron.
The program creates a small group of neurons and injects a noisy current
I(t)= I_mean + I_std*W(t)
where W(t) is a white noise process.
The programm systematically drives the current through a series of values on the
two-dimensional I_mean/I_std space and measures the firing rate to the neurons.
"""
t_inter_trial=200. # Interval between two successive measuremtn trials
t_sim=1000. # Duration of a measurement trial.
n_neurons=100
n_threads=4
def __init__(self, model, params=False):
self.model=model
self.params=params
self.build()
self.connect()
def build(self):
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': self.n_threads})
if self.params:
nest.SetDefaults(self.model,self.params)
self.neuron=nest.Create(self.model,self.n_neurons)
self.noise=nest.Create('noise_generator')
self.spike=nest.Create('spike_detector')
nest.SetStatus(self.spike,[{'to_memory':True, 'to_file':False}])
def connect(self):
nest.DivergentConnect(self.noise,self.neuron)
nest.ConvergentConnect(self.neuron, self.spike)
def output_rate(self,mean, std):
t=nest.GetKernelStatus('time')
if t>100000: # prevent overflow of clock
self.build()
self.connect()
t=0.0
nest.Simulate(self.t_inter_trial)
nest.SetStatus(self.spike, "n_events", 0)
nest.SetStatus(self.noise,[{'mean':mean, 'std':std, 'start': 0.0, 'stop': 1000., 'origin':t}])
nest.Simulate(self.t_sim)
rate=nest.GetStatus(self.spike, "n_events")[0]*1000.0/(self.n_neurons*self.t_sim)
return rate
def compute_transfer(self, i_mean=(0.0,100.0, 10.0), i_std=(0.0,100.0, 10.0)):
self.i_range=numpy.arange(*i_mean)
self.std_range=numpy.arange(*i_std)
self.rate=numpy.zeros((self.i_range.size,self.std_range.size))
nest.sr('M_WARNING setverbosity')
for n,i in enumerate(self.i_range):
print "I= %s"%i
for m,std in enumerate(self.std_range):
self.rate[n,m]=self.output_rate(i,std)
transfer=IF_curve(model, params)
transfer.compute_transfer()
dat=shelve.open(model+'_transfer.dat')
dat['I_mean']=transfer.i_range
dat['I_std'] =transfer.std_range
dat['rate'] =transfer.rate
dat.close()
|
gewaltig/cython-neuron
|
cynest/examples/if_curve.py
|
Python
|
gpl-2.0
| 3,547
|
[
"NEURON"
] |
958bf6ee457e653b7a07f143363a1105e8d8748d4fd71b390fe555a2f539e3a2
|
"""
Copyright 2020 Kat Holt
Copyright 2020 Ryan Wick (rrwick@gmail.com)
https://github.com/katholt/Kleborate/
This file is part of Kleborate. Kleborate is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by the Free Software Foundation,
either version 3 of the License, or (at your option) any later version. Kleborate is distributed in
the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details. You should have received a copy of the GNU General Public License along with Kleborate. If
not, see <http://www.gnu.org/licenses/>.
"""
from Bio.Seq import Seq
def truncation_check(hit, cov_threshold=90.0):
"""
Checks to see if the gene is truncated at the amino acid level.
"""
nucl_seq, ref_start, _ = hit.get_seq_start_end_pos_strand()
# The hit must start at the first base of the gene. If not, the gene is considered 0%.
if ref_start != 1:
return '-0%', 0.0, ''
# If there are any ambiguous bases in the sequence, then they will break translation, probably
# resulting in truncation call.
ambiguous_bases = set(b for b in nucl_seq) - {'A', 'C', 'G', 'T'}
for b in ambiguous_bases:
nucl_seq = nucl_seq.split(b)[0]
# BioPython doesn't like it if the sequence isn't a multiple of 3.
nucl_seq = nucl_seq[:len(nucl_seq) // 3 * 3]
# The assumption is that the reference allele is a full CDS with a stop codon at the end. This
# isn't always true (the reference sequence is sometimes broken) but will serve to make our
# denominator for coverage.
ref_aa_length = (hit.ref_length - 3) // 3
coding_dna = Seq(nucl_seq)
translation = str(coding_dna.translate(table='Bacterial', to_stop=True))
coverage = 100.0 * len(translation) / ref_aa_length
if coverage >= cov_threshold:
return '', coverage, translation
else:
return '-{:.0f}%'.format(coverage), coverage, translation
|
katholt/Kleborate
|
kleborate/truncation.py
|
Python
|
gpl-3.0
| 2,102
|
[
"Biopython"
] |
85eae26c95dd4a46b3181796b1c825a9ceb2b939d85d4c7951e21af49b94c3b3
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import logging
import os
import unittest
from pymatgen import Molecule
from pymatgen.util.testing import PymatgenTest
from collections import OrderedDict
from pymatgen.io.qchem_io.inputs import QCInput
__author__ = "Brandon Wood, Samuel Blau, Shyam Dwaraknath"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "0.1"
__email__ = "b.wood@berkeley.edu"
__credits__ = "Xiaohui Qu"
logger = logging.getLogger(__name__)
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "test_files", "qchem")
class TestQCInput(PymatgenTest):
# ef setUpClass(cls):
# add things that show up over and over again
def test_molecule_template(self):
species = ["C", "O"]
coords = [[-9.5782000000, 0.6241500000, 0.0000000000], [-7.5827400000, 0.5127000000, -0.0000000000]]
mol = Molecule(species=species, coords=coords)
molecule_test = QCInput.molecule_template(mol)
molecule_actual = """$molecule
0 1
C -9.5782000000 0.6241500000 0.0000000000
O -7.5827400000 0.5127000000 -0.0000000000
$end"""
self.assertEqual(molecule_actual, molecule_test)
def test_rem_template(self):
rem_params = {
"jobtype": "opt",
"method": "wB97M-V",
"basis": "def2-QZVPPD",
"max_scf_cycles": 300,
"gen_scfman": "true"
}
rem_test = QCInput.rem_template(rem_params)
rem_actual = """$rem
jobtype = opt
method = wB97M-V
basis = def2-QZVPPD
max_scf_cycles = 300
gen_scfman = true
$end"""
self.assertEqual(rem_actual, rem_test)
def test_opt_template(self):
opt_params = OrderedDict({
"CONSTRAINT": ["tors 2 3 4 5 25.0", "bend 2 1 4 110.0"],
"FIXED": ["x y 2 4 5"],
"DUMMY": ["M 2 3 4 5"],
"CONNECT": ["4 3 2 3 5 6"]
})
opt_test = QCInput.opt_template(opt_params)
opt_actual = """$opt
CONSTRAINT
tors 2 3 4 5 25.0
bend 2 1 4 110.0
ENDCONSTRAINT
FIXED
x y 2 4 5
ENDFIXED
DUMMY
M 2 3 4 5
ENDDUMMY
CONNECT
4 3 2 3 5 6
ENDCONNECT
$end"""
self.assertEqual(opt_actual, opt_test)
def test_find_sections(self):
str_single_job_input = """$molecule
0 1
S -0.00250959 -0.05817469 -0.02921636
C 1.70755408 -0.03033788 -0.01382912
H 2.24317221 -0.05215019 0.92026728
C 2.21976393 0.01718014 -1.27293235
H 3.27786220 0.04082146 -1.48539646
C 1.20867399 0.04478540 -2.27007793
H 1.40292257 0.10591684 -3.33110912
C -0.05341046 0.01577217 -1.74839343
C -1.32843436 0.03545064 -2.45531187
C -1.55195156 0.08743920 -3.80184635
H -0.75245172 0.10267657 -4.52817967
C -2.93293778 0.08408786 -4.13352169
H -3.31125108 0.11340328 -5.14405819
C -3.73173288 0.02741365 -3.03412864
H -4.80776535 0.00535688 -2.99564645
S -2.81590978 -0.00516172 -1.58990580
$end
$rem
jobtype = opt
method = wb97m-v
basis = def2-tzvppd
gen_scfman = true
geom_opt_max_cycles = 75
max_scf_cycles = 300
scf_algorithm = diis
scf_guess = sad
sym_ignore = true
symmetry = false
thresh = 14
$end
$opt
CONSTRAINT
tors 6 8 9 10 0.0
ENDCONSTRAINT
$end
"""
sections_test = QCInput.find_sections(str_single_job_input)
section_actual = ["molecule", "rem", "opt"]
self.assertEqual(section_actual, sections_test)
def test_read_molecule(self):
str_molecule = """$molecule
0 1
C -9.5782000000 0.6241500000 0.0000000000
O -7.5827400000 0.5127000000 -0.0000000000
$end"""
molecule_test = QCInput.read_molecule(str_molecule)
species = ["C", "O"]
coords = [[-9.5782000000, 0.6241500000, 0.0000000000],
[-7.5827400000, 0.5127000000, -0.0000000000]]
molecule_actual = Molecule(species, coords)
self.assertEqual(molecule_actual, molecule_test)
def test_read_rem(self):
str_rem = """Trying to break you!
$rem
jobtype opt
method wB97M-V
basis def2-QZVPPD
max_scf_cycles 300
gen_scfman = true
$end"""
rem_test = QCInput.read_rem(str_rem)
rem_actual = OrderedDict({
"jobtype": "opt",
"method": "wB97M-V",
"basis": "def2-QZVPPD",
"max_scf_cycles": "300",
"gen_scfman": "true"
})
self.assertDictEqual(rem_actual, rem_test)
def test_read_opt(self):
str_opt = """$opt
CONSTRAINT
tors 2 3 4 5 25.0
bend 2 1 4 110.0
ENDCONSTRAINT
FIXED
x y 2 4 5
ENDFIXED
DUMMY
M 2 3 4 5
ENDDUMMY
CONNECT
4 3 2 3 5 6
ENDCONNECT
$end"""
opt_test = QCInput.read_opt(str_opt)
opt_actual = OrderedDict({
"CONSTRAINT": ["tors 2 3 4 5 25.0", "bend 2 1 4 110.0"],
"FIXED": ["x y 2 4 5"],
"DUMMY": ["M 2 3 4 5"],
"CONNECT": ["4 3 2 3 5 6"]
})
self.assertDictEqual(opt_actual, opt_test)
def test__str__(self):
species = ["C", "O"]
coords = [[-9.5782000000, 0.6241500000, 0.0000000000],
[-7.5827400000, 0.5127000000, -0.0000000000]]
molecule = Molecule(species=species, coords=coords)
rem = OrderedDict({
"jobtype": "opt",
"method": "wB97M-V",
"basis": "def2-QZVPPD",
"max_scf_cycles": "300",
"gen_scfman": "true"
})
str_test = QCInput(molecule=molecule, rem=rem).__str__()
str_actual = """$molecule
0 1
C -9.5782000000 0.6241500000 0.0000000000
O -7.5827400000 0.5127000000 -0.0000000000
$end
$rem
jobtype = opt
method = wB97M-V
basis = def2-QZVPPD
max_scf_cycles = 300
gen_scfman = true
$end
"""
self.assertEqual(str_actual, str_test)
def test_from_string(self):
string = """$molecule
0 1
S -0.00250959 -0.05817469 -0.02921636
C 1.70755408 -0.03033788 -0.01382912
H 2.24317221 -0.05215019 0.92026728
C 2.21976393 0.01718014 -1.27293235
H 3.27786220 0.04082146 -1.48539646
C 1.20867399 0.04478540 -2.27007793
H 1.40292257 0.10591684 -3.33110912
C -0.05341046 0.01577217 -1.74839343
C -1.32843436 0.03545064 -2.45531187
C -1.55195156 0.08743920 -3.80184635
H -0.75245172 0.10267657 -4.52817967
C -2.93293778 0.08408786 -4.13352169
H -3.31125108 0.11340328 -5.14405819
C -3.73173288 0.02741365 -3.03412864
H -4.80776535 0.00535688 -2.99564645
S -2.81590978 -0.00516172 -1.58990580
$end
$rem
jobtype = opt
method = wb97m-v
basis = def2-tzvppd
gen_scfman = true
geom_opt_max_cycles = 75
max_scf_cycles = 300
scf_algorithm = diis
scf_guess = sad
sym_ignore = true
symmetry = false
thresh = 14
$end
$opt
CONSTRAINT
tors 6 8 9 10 0.0
ENDCONSTRAINT
$end
"""
qcinput_test = QCInput.from_string(string)
species = ["S", "C", "H", "C", "H", "C", "H", "C", "C", "C", "H", "C", "H", "C", "H", "S"]
coords = [[-0.00250959, -0.05817469, -0.02921636],
[1.70755408, -0.03033788, -0.01382912],
[2.24317221, -0.05215019, 0.92026728],
[2.21976393, 0.01718014, -1.27293235],
[3.27786220, 0.04082146, -1.48539646],
[1.20867399, 0.04478540, -2.27007793],
[1.40292257, 0.10591684, -3.33110912],
[-0.05341046, 0.01577217, -1.74839343],
[-1.32843436, 0.03545064, -2.45531187],
[-1.55195156, 0.08743920, -3.80184635],
[-0.75245172, 0.10267657, -4.52817967],
[-2.93293778, 0.08408786, -4.13352169],
[-3.31125108, 0.11340328, -5.14405819],
[-3.73173288, 0.02741365, -3.03412864],
[-4.80776535, 0.00535688, -2.99564645],
[-2.81590978, -0.00516172, -1.58990580]]
molecule_actual = Molecule(species, coords)
self.assertEqual(molecule_actual, qcinput_test.molecule)
rem_actual = OrderedDict({
"jobtype": "opt",
"method": "wb97m-v",
"basis": "def2-tzvppd",
"gen_scfman": "true",
"geom_opt_max_cycles": "75",
"max_scf_cycles": "300",
"scf_algorithm": "diis",
"scf_guess": "sad",
"sym_ignore": "true",
"symmetry": "false",
"thresh": "14"
})
self.assertDictEqual(rem_actual, qcinput_test.rem)
opt_actual = OrderedDict({"CONSTRAINT": ["tors 6 8 9 10 0.0"]})
self.assertDictEqual(opt_actual, qcinput_test.opt)
def test_multi_job_string(self):
species = ["S", "C", "H", "C", "H", "C", "H", "C", "C", "C", "H", "C", "H", "C", "H", "S"]
coords = [[-0.00250959, -0.05817469, -0.02921636],
[1.70755408, -0.03033788, -0.01382912],
[2.24317221, -0.05215019, 0.92026728],
[2.21976393, 0.01718014, -1.27293235],
[3.27786220, 0.04082146, -1.48539646],
[1.20867399, 0.04478540, -2.27007793],
[1.40292257, 0.10591684, -3.33110912],
[-0.05341046, 0.01577217, -1.74839343],
[-1.32843436, 0.03545064, -2.45531187],
[-1.55195156, 0.08743920, -3.80184635],
[-0.75245172, 0.10267657, -4.52817967],
[-2.93293778, 0.08408786, -4.13352169],
[-3.31125108, 0.11340328, -5.14405819],
[-3.73173288, 0.02741365, -3.03412864],
[-4.80776535, 0.00535688, -2.99564645],
[-2.81590978, -0.00516172, -1.58990580]]
molecule_1 = Molecule(species, coords)
rem_1 = OrderedDict({
"jobtype": "opt",
"method": "wb97m-v",
"basis": "def2-tzvppd",
"gen_scfman": "true",
"geom_opt_max_cycles": "75",
"max_scf_cycles": "300",
"scf_algorithm": "diis",
"scf_guess": "sad",
"sym_ignore": "true",
"symmetry": "false",
"thresh": "14"
})
opt_1 = OrderedDict({"CONSTRAINT": ["tors 6 8 9 10 0.0"]})
job_1 = QCInput(molecule=molecule_1, rem=rem_1, opt=opt_1)
molecule_2 = "read"
rem_2 = OrderedDict({
"jobtype": "sp",
"method": "wb97m-v",
"basis": "def2-tzvppd",
"gen_scfman": "true",
"geom_opt_max_cycles": "75",
"max_scf_cycles": "300",
"scf_algorithm": "diis",
"scf_guess": "read",
"sym_ignore": "true",
"symmetry": "false",
"thresh": "14"
})
job_2 = QCInput(molecule=molecule_2, rem=rem_2)
job_list = [job_1, job_2]
multi_job_str_test = QCInput.multi_job_string(job_list=job_list)
multi_job_str_actual = """$molecule
0 1
S -0.0025095900 -0.0581746900 -0.0292163600
C 1.7075540800 -0.0303378800 -0.0138291200
H 2.2431722100 -0.0521501900 0.9202672800
C 2.2197639300 0.0171801400 -1.2729323500
H 3.2778622000 0.0408214600 -1.4853964600
C 1.2086739900 0.0447854000 -2.2700779300
H 1.4029225700 0.1059168400 -3.3311091200
C -0.0534104600 0.0157721700 -1.7483934300
C -1.3284343600 0.0354506400 -2.4553118700
C -1.5519515600 0.0874392000 -3.8018463500
H -0.7524517200 0.1026765700 -4.5281796700
C -2.9329377800 0.0840878600 -4.1335216900
H -3.3112510800 0.1134032800 -5.1440581900
C -3.7317328800 0.0274136500 -3.0341286400
H -4.8077653500 0.0053568800 -2.9956464500
S -2.8159097800 -0.0051617200 -1.5899058000
$end
$rem
jobtype = opt
method = wb97m-v
basis = def2-tzvppd
gen_scfman = true
geom_opt_max_cycles = 75
max_scf_cycles = 300
scf_algorithm = diis
scf_guess = sad
sym_ignore = true
symmetry = false
thresh = 14
$end
$opt
CONSTRAINT
tors 6 8 9 10 0.0
ENDCONSTRAINT
$end
@@@
$molecule
read
$end
$rem
jobtype = sp
method = wb97m-v
basis = def2-tzvppd
gen_scfman = true
geom_opt_max_cycles = 75
max_scf_cycles = 300
scf_algorithm = diis
scf_guess = read
sym_ignore = true
symmetry = false
thresh = 14
$end
"""
self.assertEqual(multi_job_str_actual, multi_job_str_test)
def test_from_multi_jobs_file(self):
job_list_test = QCInput.from_multi_jobs_file(os.path.join(test_dir, "pt_n2_wb97mv_0.0.in"))
species = ["S", "C", "H", "C", "H", "C", "H", "C", "C", "C", "H", "C", "H", "C", "H", "S"]
coords = [[-0.00250959, -0.05817469, -0.02921636],
[1.70755408, -0.03033788, -0.01382912],
[2.24317221, -0.05215019, 0.92026728],
[2.21976393, 0.01718014, -1.27293235],
[3.27786220, 0.04082146, -1.48539646],
[1.20867399, 0.04478540, -2.27007793],
[1.40292257, 0.10591684, -3.33110912],
[-0.05341046, 0.01577217, -1.74839343],
[-1.32843436, 0.03545064, -2.45531187],
[-1.55195156, 0.08743920, -3.80184635],
[-0.75245172, 0.10267657, -4.52817967],
[-2.93293778, 0.08408786, -4.13352169],
[-3.31125108, 0.11340328, -5.14405819],
[-3.73173288, 0.02741365, -3.03412864],
[-4.80776535, 0.00535688, -2.99564645],
[-2.81590978, -0.00516172, -1.58990580]]
molecule_1_actual = Molecule(species, coords)
rem_1_actual = OrderedDict({
"jobtype": "opt",
"method": "wb97m-v",
"basis": "def2-tzvppd",
"gen_scfman": "true",
"geom_opt_max_cycles": "75",
"max_scf_cycles": "300",
"scf_algorithm": "diis",
"scf_guess": "sad",
"sym_ignore": "true",
"symmetry": "false",
"thresh": "14"
})
opt_1_actual = OrderedDict({"CONSTRAINT": ["tors 6 8 9 10 0.0"]})
self.assertEqual(molecule_1_actual, job_list_test[0].molecule)
self.assertEqual(rem_1_actual, job_list_test[0].rem)
self.assertEqual(opt_1_actual, job_list_test[0].opt)
molecule_2_actual = "read"
rem_2_actual = OrderedDict({
"jobtype": "sp",
"method": "wb97m-v",
"basis": "def2-tzvppd",
"gen_scfman": "true",
"geom_opt_max_cycles": "75",
"max_scf_cycles": "300",
"scf_algorithm": "diis",
"scf_guess": "read",
"sym_ignore": "true",
"symmetry": "false",
"thresh": "14"
})
self.assertEqual(molecule_2_actual, job_list_test[1].molecule)
self.assertEqual(rem_2_actual, job_list_test[1].rem)
if __name__ == "__main__":
unittest.main()
|
czhengsci/pymatgen
|
pymatgen/io/qchem_io/tests/test_inputs.py
|
Python
|
mit
| 16,019
|
[
"pymatgen"
] |
c0cf2dc2fd248f20197d8daa6d7a93d45490bd026aea47147b72ef0eca29fe24
|
'''
Created on Aug 5, 2014
@author: gearsad
'''
import vtk
from SceneObject import SceneObject
class Axes(SceneObject):
'''
A template for drawing axes.
Shouldn't really be in a class of it's own, but it's cleaner here and like this we can move it easily.
Ref: http://vtk.org/gitweb?p=VTK.git;a=blob;f=Examples/GUI/Tcl/ProbeWithSplineWidget.tcl
'''
def __init__(self, renderers):
'''
Initialize the axes - not the parent version, we're going to assign a vtkAxesActor to it and add it ourselves.
'''
# Skip the parent constructor
#super(Axes,self).__init__(renderer)
# Ref: http://vtk.org/gitweb?p=VTK.git;a=blob;f=Examples/GUI/Tcl/ProbeWithSplineWidget.tcl
self.vtkActor = vtk.vtkAxesActor()
self.vtkActor.SetShaftTypeToCylinder()
self.vtkActor.SetCylinderRadius(0.05)
self.vtkActor.SetTotalLength(2.5, 2.5, 2.5)
# Change the font size to something reasonable
# Ref: http://vtk.1045678.n5.nabble.com/VtkAxesActor-Problem-td4311250.html
self.vtkActor.GetXAxisCaptionActor2D().GetTextActor().SetTextScaleMode(vtk.vtkTextActor.TEXT_SCALE_MODE_NONE)
self.vtkActor.GetXAxisCaptionActor2D().GetTextActor().GetTextProperty().SetFontSize(25);
self.vtkActor.GetYAxisCaptionActor2D().GetTextActor().SetTextScaleMode(vtk.vtkTextActor.TEXT_SCALE_MODE_NONE)
self.vtkActor.GetYAxisCaptionActor2D().GetTextActor().GetTextProperty().SetFontSize(25);
self.vtkActor.GetZAxisCaptionActor2D().GetTextActor().SetTextScaleMode(vtk.vtkTextActor.TEXT_SCALE_MODE_NONE)
self.vtkActor.GetZAxisCaptionActor2D().GetTextActor().GetTextProperty().SetFontSize(25);
# Add the actor.
for item in renderers:
item.AddActor(self.vtkActor)
#renderer.AddActor(self.vtkActor)
|
GearsAD/semisorted_arnerve
|
arnerve/scene/Axes.py
|
Python
|
mit
| 1,876
|
[
"VTK"
] |
e8318534d6a284c0cffd8705a65d73cf23eb09def6406dce9c4e47905f6ae23d
|
import pygame
import random
import json
from pgu import gui
from time import sleep
import ctypes
class Inquiry:
''' Class used for drawing the Inquiry UI.
@untestable - Just draws UI elements onscreen, makes no sense to test.
'''
def __init__(self, screen, config, project, site):
self.config = config
self.project = project
self.screen = screen
self.app = gui.App()
self.app.connect(gui.QUIT, self.app.quit, None)
self.contain = gui.Container(width=self.config["screenX"],
height=self.config["screenY"])
self.inquiry_site = site
self.inquiry_type = None
self.firstDraw = True
self.firstOptions = True
self.firstScroll = True
def choose_inquiry_site(self,site):
'''
Changes active site when an site is chosen to be inquired.
@untestable - function manipulates user interface, makes no sense to test.
'''
self.inquiry_site = site
self.inquiry_type = None
self.firstScroll = True
if self.contain.find("report_details"):
self.contain.remove(self.contain.find("report_details"))
def do_inquiry(self,inquiry_type):
'''
Sets the type of inquiry in UI.
@untestable - function manipulates user interface, makes no sense to test.
'''
self.inquiry_type = inquiry_type
self.firstScroll = True
if self.contain.find("report_details"):
self.contain.remove(self.contain.find("report_details"))
def refresh_screen(self):
'''
Refreshes the inquiry interface screen.
@untestable - Just draws UI elements onscreen, makes no sense to test.
'''
self.app.paint(self.screen)
self.app.update(self.screen)
#Attempt at removing thread crashing issue
# try:
# x11 = ctypes.cdll.LoadLibrary('libX11.so')
# x11.XInitThreads()
# print "XInitThreads"
# except:
# pass
pygame.display.update()
def draw_inquiry(self):
'''
Draws details of an inquiry onscreen.
@untestable - Just draws UI elements onscreen, makes no sense to test.
'''
pygame.draw.rect(self.screen, 0xFAFCA4,
(100,20,650,410))
info_x = 150
font = pygame.font.SysFont("Helvetica", 22)
smallfont = pygame.font.SysFont("Helvetica", 18)
label = smallfont.render( "Press Enter to close this window", 1, (0, 0, 0))
self.screen.blit(label, (info_x, 400))
if self.inquiry_site:
y_offset = 50
font = pygame.font.SysFont("Helvetica", 24)
bellerose_font = pygame.font.Font(self.config["bellerose_font"], 40)
label = bellerose_font.render("Inquiries - {}".format(self.inquiry_site.name)
, 1, (0, 0, 0))
#Centering
name_length = len("Inquiries - {}".format(self.inquiry_site.name))
name_length = name_length*10
self.screen.blit(label, (500 - name_length , y_offset - 50))
y_offset += 30
if self.firstOptions:
button = gui.Button('Send "are you on schedule?" email')
button.connect(gui.CLICK, self.do_inquiry,"on_schedule")
self.contain.add(button, info_x, y_offset)
font = pygame.font.SysFont("Helvetica", 16)
label = font.render("0 Working Days"
, 1, (0, 0, 0))
self.screen.blit(label, (info_x + 365 + 60, y_offset))
y_offset += 20
if self.firstOptions:
button = gui.Button('Send "please report status of modules" email')
button.connect(gui.CLICK, self.do_inquiry,"status")
self.contain.add(button, info_x, y_offset)
label = font.render("0.1 Working Days"
, 1, (0, 0, 0))
self.screen.blit(label, (info_x + 365 + 60, y_offset))
y_offset += 20
if self.firstOptions:
button = gui.Button('Send "please list completed tasks" email')
button.connect(gui.CLICK, self.do_inquiry,"list_c_tasks")
self.contain.add(button, info_x, y_offset)
label = font.render("0.5 Working Days"
, 1, (0, 0, 0))
self.screen.blit(label, (info_x + 365 + 60, y_offset))
y_offset += 20
if self.firstOptions:
button = gui.Button('Hold video conference')
button.connect(gui.CLICK, self.do_inquiry,"video_conf")
self.contain.add(button, info_x, y_offset)
label = font.render("2 Working Days"
, 1, (0, 0, 0))
self.screen.blit(label, (info_x + 365 + 60, y_offset))
y_offset += 20
if self.firstOptions:
button = gui.Button('Make site visit')
button.connect(gui.CLICK, self.do_inquiry,"visit")
self.contain.add(button, info_x, y_offset)
label = font.render("7 Working Days"
, 1, (0, 0, 0))
self.screen.blit(label, (info_x + 365 + 60, y_offset))
if self.firstOptions:
#make sure doesnt add next time
self.firstOptions = False
self.app.init(self.contain)
hel_font = pygame.font.SysFont("Helvetica", 12)
if self.inquiry_type:
inquiry_result = []
if self.firstScroll:
self.firstScroll = False
my_list = gui.List(width=560,height=200,name="report_details")
inquiry_result.append(gui.Label("Inquiry Results:"))
for team in self.inquiry_site.teams:
inquiry_result.append(gui.Label("Team " + team.name))
#Are you on Schedule?
if self.inquiry_type == "on_schedule":
if not team.module:
inquiry_result.append(gui.Label("We aren't working on anything at the moment" ))
else:
if self.inquiry_site.culture[0] == 0:
inquiry_result.append(gui.Label("Yes, We are on schedule."))
else:
if team.module.is_on_time:
inquiry_result.append(gui.Label("Yes, We are on schedule."))
else:
inquiry_result.append(gui.Label("No, We are not on schedule."))
#What is your status?
if self.inquiry_type == "status":
if not team.module:
inquiry_result.append(gui.Label("We aren't working on anything at the moment" ))
else:
team.module.actual_cost = team.module.actual_cost + 1
if self.inquiry_site.culture[0] == 0:
inquiry_result.append(gui.Label( "We are on schedule."))
else:
if team.module.is_on_time:
inquiry_result.append(gui.Label("We are on schedule."))
else:
inquiry_result.append(gui.Label("We are delayed & experiencing " + str(len(team.module.problems_occured)) + " problems." ))
# #Problems
# inquiry_result.append(gui.Label("Problems:"))
# for prob in team.module.problems_occured:
# inquiry_result.append(gui.Label(prob))
#List your completed tasks.
if self.inquiry_type == "list_c_tasks":
inquiry_result.append(gui.Label("Completed Tasks:"))
#Completed Modules.
for module in team.completed_modules:
for task in module.completed_tasks:
inquiry_result.append(gui.Label(module.name + " - " + task.name))
#Completed Tasks of the Current Module
if not team.module:
inquiry_result.append(gui.Label("We are not working on a module at the moment."))
else:
team.module.actual_cost = team.module.actual_cost + 4
if len(team.module.completed_tasks) == 0:
inquiry_result.append(gui.Label("We have not completed any tasks."))
else:
for task in team.module.completed_tasks:
inquiry_result.append(gui.Label(team.module.name + " - " + task.name))
#Host Video Conference
if self.inquiry_type == "video_conf":
#Completed Modules
inquiry_result.append(gui.Label("Completed Tasks:"))
for module in team.completed_modules:
for task in module.completed_tasks:
inquiry_result.append(gui.Label(module.name + " - " + task.name))
#Completed Tasks of the Current Module
if not team.module:
inquiry_result.append(gui.Label("We are not working on a module at the moment."))
else:
team.module.actual_cost = team.module.actual_cost + 16
if len(team.module.completed_tasks) == 0:
inquiry_result.append(gui.Label("We have not completed any tasks."))
else:
for task in team.module.completed_tasks:
inquiry_result.append(gui.Label(team.module.name + " - " + task.name))
#Current Task & If we are on schedule for it.
#Dishonest Culture
if self.inquiry_site.culture[0] == 0:
if random.randint(0,1) == 0:
#50% chance of continuing to lie
inquiry_result.append(gui.Label("We are on schedule for the current task: " + team.module.name + " - " + team.module.tasks[0].name))
else:
if team.module.is_on_time:
inquiry_result.appendd(gui.Label("We are on schedule for the current task : " + team.module.name + " - " + team.module.tasks[0].name))
else:
inquiry_result.append(gui.Label("We are delayed for the current task: " + team.module.name + " - " + team.module.tasks[0].name +" & experiencing " + str(len(team.module.problems_occured)) + " problems." ))
#Problems
inquiry_result.append(gui.Label("Problems for module " + team.module.name + ":"))
for prob in team.module.problems_occured:
inquiry_result.append(gui.Label(prob))
#Honest Culture
else:
if team.module.is_on_time:
inquiry_result.append(gui.Label("We are on schedule for the current task: " + team.module.name + " - " + team.module.tasks[0].name))
else:
inquiry_result.append(gui.Label("We are delayed for the current task: " + team.module.name + " - " + team.module.tasks[0].name +" & experiencing " + str(len(team.module.problems_occured)) + " problems." ))
#Problems
inquiry_result.append(gui.Label("Problems for module " + team.module.name + ":"))
for prob in team.module.problems_occured:
inquiry_result.append(gui.Label(prob))
if self.inquiry_type == "visit":
inquiry_result.append(gui.Label("Completed Tasks:"))
for module in team.completed_modules:
for task in module.completed_tasks:
inquiry_result.append(gui.Label(module.name + " - " + task.name))
if not team.module:
inquiry_result.append(gui.Label("We are not working on a module at the moment."))
else:
team.module.actual_cost = team.module.actual_cost + 56
if len(team.module.completed_tasks) == 0:
inquiry_result.append(gui.Label("We have not completed any tasks."))
else:
for task in team.module.completed_tasks:
inquiry_result.append(gui.Label(team.module.name + " - " + task.name))
if team.module.is_on_time:
inquiry_result.append(gui.Label("On schedule for the current task - " + team.module.name))
else:
inquiry_result.append(gui.Label("We are delayed & experiencing " + str(len(team.module.problems_occured)) + " problems." ))
#Problems
inquiry_result.append(gui.Label("Problems for module " + team.module.name + ":"))
for prob in team.module.problems_occured:
inquiry_result.append(gui.Label(prob))
for label in inquiry_result:
label.set_font(hel_font)
my_list.add(label)
self.contain.add(my_list,info_x,y_offset+30)
self.app.init(self.contain)
def draw(self):
'''
Draws UI for inquiry interface.
The parent draw function of the end game screen.
@untestable - Just draws UI elements onscreen, makes no sense to test.
'''
self.draw_inquiry()
self.refresh_screen()
# sleep(self.config["ui_refresh_period_seconds"])
|
ianfhunter/TeamCrab
|
src/UI/inquiry.py
|
Python
|
mit
| 15,299
|
[
"VisIt"
] |
cab6082f2f9370ed85d30451ed512977d264568064801c0031e5a2a3fe179509
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Timothy Berkelbach <tim.berkelbach@gmail.com>
# Qiming Sun <osirpt.sun@gmail.com>
#
import unittest
import numpy as np
from pyscf.pbc import gto as pbcgto
from pyscf.pbc import dft as pbcdft
L = 4.
cell = pbcgto.Cell()
cell.verbose = 0
cell.a = np.eye(3)*L
cell.atom =[['He' , ( L/2+0., L/2+0. , L/2+1.)],]
cell.basis = {'He': [[0, (4.0, 1.0)], [0, (1.0, 1.0)]]}
cell.build()
def build_cell(mesh):
cell = pbcgto.Cell()
cell.unit = 'A'
cell.a = '''3.5668 0. 0.
0. 3.5668 0.
0. 0. 3.5668'''
cell.mesh = mesh
cell.atom ='''
C, 0., 0., 0.
C, 0.8917, 0.8917, 0.8917
C, 1.7834, 1.7834, 0.
C, 2.6751, 2.6751, 0.8917
C, 1.7834, 0. , 1.7834
C, 2.6751, 0.8917, 2.6751
C, 0. , 1.7834, 1.7834
C, 0.8917, 2.6751, 2.6751'''
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.verbose = 7
cell.output = '/dev/null'
cell.build()
return cell
def make_primitive_cell(mesh):
cell = pbcgto.Cell()
cell.unit = 'A'
cell.atom = 'C 0., 0., 0.; C 0.8917, 0.8917, 0.8917'
cell.a = '''0. 1.7834 1.7834
1.7834 0. 1.7834
1.7834 1.7834 0. '''
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.mesh = mesh
cell.verbose = 7
cell.output = '/dev/null'
cell.build()
return cell
def tearDownModule():
global cell
del cell
class KnownValues(unittest.TestCase):
def test_klda8_cubic_gamma(self):
cell = build_cell([17]*3)
mf = pbcdft.RKS(cell)
mf.xc = 'lda,vwn'
#kmf.verbose = 7
e1 = mf.scf()
self.assertAlmostEqual(e1, -44.892502703975893, 8)
def test_klda8_cubic_kpt_222(self):
cell = build_cell([17]*3)
abs_kpts = cell.make_kpts([2]*3, with_gamma_point=False)
mf = pbcdft.KRKS(cell, abs_kpts)
#mf.analytic_int = False
mf.xc = 'lda,vwn'
#mf.verbose = 7
e1 = mf.scf()
self.assertAlmostEqual(e1, -45.425834895129569, 8)
def test_klda8_primitive_gamma(self):
cell = make_primitive_cell([17]*3)
mf = pbcdft.RKS(cell)
mf.xc = 'lda,vwn'
#kmf.verbose = 7
mf.conv_tol = 1e-8
e1 = mf.scf()
self.assertAlmostEqual(e1, -10.221426445656439, 8)
def test_klda8_primitive_kpt_222(self):
cell = make_primitive_cell([17]*3)
abs_kpts = cell.make_kpts([2]*3, with_gamma_point=False)
mf = pbcdft.KRKS(cell, abs_kpts)
#mf.analytic_int = False
mf.xc = 'lda,vwn'
#mf.verbose = 7
e1 = mf.scf()
self.assertAlmostEqual(e1, -11.353643583707452, 8)
def test_rsh_df(self):
mf = pbcdft.KRKS(cell).density_fit()
mf.xc = 'camb3lyp'
mf.omega = .15
mf.kernel()
self.assertAlmostEqual(mf.e_tot, -2.399571378419408, 7)
# TODO: test the reset method of pbcdft.KRKS, pbcdft.RKS whether the reset
# methods of all subsequent objects are called
if __name__ == '__main__':
print("Full Tests for pbc.dft.krks")
unittest.main()
|
gkc1000/pyscf
|
pyscf/pbc/dft/test/test_krks.py
|
Python
|
apache-2.0
| 3,754
|
[
"PySCF"
] |
8fbd2a7b8db46eb780ad60f57e41c75c77212a6b5074035bc2a6ad95b29f1b39
|
from openmesh import TriMesh
import projectconfig
import unittest
from graphicfile import Graphic_File
import customstructures
import time
import abstract_test
class TestVtkMethods(abstract_test.AbstractTest):
def setUp(self):
gf = Graphic_File()
self.mesh = gf.read_stl(projectconfig.TMP_PATH + "cube.vtk")
|
bajorekp/modelowanie_w_grafice
|
test/vtk_test.py
|
Python
|
mit
| 331
|
[
"VTK"
] |
30337907ffaf1715d573288336c2d9d273e4dea8ed77e42e50d44b5d09ca2bf1
|
from __future__ import division, print_function, absolute_import
from scipy import stats
import numpy as np
from numpy.testing import assert_almost_equal, assert_, assert_raises, \
assert_array_almost_equal, assert_array_almost_equal_nulp, run_module_suite
def test_kde_1d():
#some basic tests comparing to normal distribution
np.random.seed(8765678)
n_basesample = 500
xn = np.random.randn(n_basesample)
xnmean = xn.mean()
xnstd = xn.std(ddof=1)
# get kde for original sample
gkde = stats.gaussian_kde(xn)
# evaluate the density function for the kde for some points
xs = np.linspace(-7,7,501)
kdepdf = gkde.evaluate(xs)
normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd)
intervall = xs[1] - xs[0]
assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01)
prob1 = gkde.integrate_box_1d(xnmean, np.inf)
prob2 = gkde.integrate_box_1d(-np.inf, xnmean)
assert_almost_equal(prob1, 0.5, decimal=1)
assert_almost_equal(prob2, 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box(xnmean, np.inf), prob1, decimal=13)
assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), prob2, decimal=13)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*intervall, decimal=2)
assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2),
(kdepdf*normpdf).sum()*intervall, decimal=2)
def test_kde_2d():
#some basic tests comparing to normal distribution
np.random.seed(8765678)
n_basesample = 500
mean = np.array([1.0, 3.0])
covariance = np.array([[1.0, 2.0], [2.0, 6.0]])
# Need transpose (shape (2, 500)) for kde
xn = np.random.multivariate_normal(mean, covariance, size=n_basesample).T
# get kde for original sample
gkde = stats.gaussian_kde(xn)
# evaluate the density function for the kde for some points
x, y = np.mgrid[-7:7:500j, -7:7:500j]
grid_coords = np.vstack([x.ravel(), y.ravel()])
kdepdf = gkde.evaluate(grid_coords)
kdepdf = kdepdf.reshape(500, 500)
normpdf = stats.multivariate_normal.pdf(np.dstack([x, y]), mean=mean, cov=covariance)
intervall = y.ravel()[1] - y.ravel()[0]
assert_(np.sum((kdepdf - normpdf)**2) * (intervall**2) < 0.01)
small = -1e100
large = 1e100
prob1 = gkde.integrate_box([small, mean[1]], [large, large])
prob2 = gkde.integrate_box([small, small], [large, mean[1]])
assert_almost_equal(prob1, 0.5, decimal=1)
assert_almost_equal(prob2, 0.5, decimal=1)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*(intervall**2), decimal=2)
assert_almost_equal(gkde.integrate_gaussian(mean, covariance),
(kdepdf*normpdf).sum()*(intervall**2), decimal=2)
def test_kde_bandwidth_method():
def scotts_factor(kde_obj):
"""Same as default, just check that it works."""
return np.power(kde_obj.n, -1./(kde_obj.d+4))
np.random.seed(8765678)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = stats.gaussian_kde(xn)
# Supply a callable
gkde2 = stats.gaussian_kde(xn, bw_method=scotts_factor)
# Supply a scalar
gkde3 = stats.gaussian_kde(xn, bw_method=gkde.factor)
xs = np.linspace(-7,7,51)
kdepdf = gkde.evaluate(xs)
kdepdf2 = gkde2.evaluate(xs)
assert_almost_equal(kdepdf, kdepdf2)
kdepdf3 = gkde3.evaluate(xs)
assert_almost_equal(kdepdf, kdepdf3)
assert_raises(ValueError, stats.gaussian_kde, xn, bw_method='wrongstring')
# Subclasses that should stay working (extracted from various sources).
# Unfortunately the earlier design of gaussian_kde made it necessary for users
# to create these kinds of subclasses, or call _compute_covariance() directly.
class _kde_subclass1(stats.gaussian_kde):
def __init__(self, dataset):
self.dataset = np.atleast_2d(dataset)
self.d, self.n = self.dataset.shape
self.covariance_factor = self.scotts_factor
self._compute_covariance()
class _kde_subclass2(stats.gaussian_kde):
def __init__(self, dataset):
self.covariance_factor = self.scotts_factor
super(_kde_subclass2, self).__init__(dataset)
class _kde_subclass3(stats.gaussian_kde):
def __init__(self, dataset, covariance):
self.covariance = covariance
stats.gaussian_kde.__init__(self, dataset)
def _compute_covariance(self):
self.inv_cov = np.linalg.inv(self.covariance)
self._norm_factor = np.sqrt(np.linalg.det(2*np.pi * self.covariance)) \
* self.n
class _kde_subclass4(stats.gaussian_kde):
def covariance_factor(self):
return 0.5 * self.silverman_factor()
def test_gaussian_kde_subclassing():
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=50)
# gaussian_kde itself
kde = stats.gaussian_kde(x1)
ys = kde(xs)
# subclass 1
kde1 = _kde_subclass1(x1)
y1 = kde1(xs)
assert_array_almost_equal_nulp(ys, y1, nulp=10)
# subclass 2
kde2 = _kde_subclass2(x1)
y2 = kde2(xs)
assert_array_almost_equal_nulp(ys, y2, nulp=10)
# subclass 3
kde3 = _kde_subclass3(x1, kde.covariance)
y3 = kde3(xs)
assert_array_almost_equal_nulp(ys, y3, nulp=10)
# subclass 4
kde4 = _kde_subclass4(x1)
y4 = kde4(x1)
y_expected = [0.06292987, 0.06346938, 0.05860291, 0.08657652, 0.07904017]
assert_array_almost_equal(y_expected, y4, decimal=6)
# Not a subclass, but check for use of _compute_covariance()
kde5 = kde
kde5.covariance_factor = lambda: kde.factor
kde5._compute_covariance()
y5 = kde5(xs)
assert_array_almost_equal_nulp(ys, y5, nulp=10)
def test_gaussian_kde_covariance_caching():
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=5)
# These expected values are from scipy 0.10, before some changes to
# gaussian_kde. They were not compared with any external reference.
y_expected = [0.02463386, 0.04689208, 0.05395444, 0.05337754, 0.01664475]
# Set the bandwidth, then reset it to the default.
kde = stats.gaussian_kde(x1)
kde.set_bandwidth(bw_method=0.5)
kde.set_bandwidth(bw_method='scott')
y2 = kde(xs)
assert_array_almost_equal(y_expected, y2, decimal=7)
def test_gaussian_kde_monkeypatch():
"""Ugly, but people may rely on this. See scipy pull request 123,
specifically the linked ML thread "Width of the Gaussian in stats.kde".
If it is necessary to break this later on, that is to be discussed on ML.
"""
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=50)
# The old monkeypatched version to get at Silverman's Rule.
kde = stats.gaussian_kde(x1)
kde.covariance_factor = kde.silverman_factor
kde._compute_covariance()
y1 = kde(xs)
# The new saner version.
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
y2 = kde2(xs)
assert_array_almost_equal_nulp(y1, y2, nulp=10)
def test_kde_integer_input():
"""Regression test for #1181."""
x1 = np.arange(5)
kde = stats.gaussian_kde(x1)
y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869, 0.13480721]
assert_array_almost_equal(kde(x1), y_expected, decimal=6)
def test_pdf_logpdf():
np.random.seed(1)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = stats.gaussian_kde(xn)
xs = np.linspace(-15, 12, 25)
pdf = gkde.evaluate(xs)
pdf2 = gkde.pdf(xs)
assert_almost_equal(pdf, pdf2, decimal=12)
logpdf = np.log(pdf)
logpdf2 = gkde.logpdf(xs)
assert_almost_equal(logpdf, logpdf2, decimal=12)
# There are more points than data
gkde = stats.gaussian_kde(xs)
pdf = np.log(gkde.evaluate(xn))
pdf2 = gkde.logpdf(xn)
assert_almost_equal(pdf, pdf2, decimal=12)
if __name__ == "__main__":
run_module_suite()
|
asnorkin/sentiment_analysis
|
site/lib/python2.7/site-packages/scipy/stats/tests/test_kdeoth.py
|
Python
|
mit
| 7,964
|
[
"Gaussian"
] |
deb1f99c87325e936a4de88f1982b0417410bfce12e52ae6be50694554d46506
|
import logging
import os
import salt.modules.cmdmod as cmdmod
import salt.modules.pkg_resource as pkg_resource
import salt.modules.rpm_lowpkg as rpm
import salt.modules.yumpkg as yumpkg
import salt.utils.platform
from salt.exceptions import CommandExecutionError, SaltInvocationError
from tests.support.mock import MagicMock, Mock, call, patch
try:
import pytest
except ImportError:
pytest = None
log = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def list_repos_var():
return {
"base": {
"file": "/etc/yum.repos.d/CentOS-Base.repo",
"gpgcheck": "1",
"gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7",
"mirrorlist": "http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os&infra=$infra",
"name": "CentOS-$releasever - Base",
},
"base-source": {
"baseurl": "http://vault.centos.org/centos/$releasever/os/Source/",
"enabled": "0",
"file": "/etc/yum.repos.d/CentOS-Sources.repo",
"gpgcheck": "1",
"gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7",
"name": "CentOS-$releasever - Base Sources",
},
"updates": {
"file": "/etc/yum.repos.d/CentOS-Base.repo",
"gpgcheck": "1",
"gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7",
"mirrorlist": "http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates&infra=$infra",
"name": "CentOS-$releasever - Updates",
},
"updates-source": {
"baseurl": "http://vault.centos.org/centos/$releasever/updates/Source/",
"enabled": "0",
"file": "/etc/yum.repos.d/CentOS-Sources.repo",
"gpgcheck": "1",
"gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7",
"name": "CentOS-$releasever - Updates Sources",
},
}
@pytest.fixture
def configure_loader_modules():
return {
yumpkg: {
"__context__": {"yum_bin": "yum"},
"__grains__": {
"osarch": "x86_64",
"os": "CentOS",
"os_family": "RedHat",
"osmajorrelease": 7,
},
},
pkg_resource: {},
}
def test_list_pkgs():
"""
Test packages listing.
:return:
"""
def _add_data(data, key, value):
data.setdefault(key, []).append(value)
rpm_out = [
"python-urlgrabber_|-(none)_|-3.10_|-8.el7_|-noarch_|-(none)_|-1487838471",
"alsa-lib_|-(none)_|-1.1.1_|-1.el7_|-x86_64_|-(none)_|-1487838475",
"gnupg2_|-(none)_|-2.0.22_|-4.el7_|-x86_64_|-(none)_|-1487838477",
"rpm-python_|-(none)_|-4.11.3_|-21.el7_|-x86_64_|-(none)_|-1487838477",
"pygpgme_|-(none)_|-0.3_|-9.el7_|-x86_64_|-(none)_|-1487838478",
"yum_|-(none)_|-3.4.3_|-150.el7.centos_|-noarch_|-(none)_|-1487838479",
"lzo_|-(none)_|-2.06_|-8.el7_|-x86_64_|-(none)_|-1487838479",
"qrencode-libs_|-(none)_|-3.4.1_|-3.el7_|-x86_64_|-(none)_|-1487838480",
"ustr_|-(none)_|-1.0.4_|-16.el7_|-x86_64_|-(none)_|-1487838480",
"shadow-utils_|-2_|-4.1.5.1_|-24.el7_|-x86_64_|-(none)_|-1487838481",
"util-linux_|-(none)_|-2.23.2_|-33.el7_|-x86_64_|-(none)_|-1487838484",
"openssh_|-(none)_|-6.6.1p1_|-33.el7_3_|-x86_64_|-(none)_|-1487838485",
"virt-what_|-(none)_|-1.13_|-8.el7_|-x86_64_|-(none)_|-1487838486",
]
with patch.dict(yumpkg.__grains__, {"osarch": "x86_64"}), patch.dict(
yumpkg.__salt__,
{"cmd.run": MagicMock(return_value=os.linesep.join(rpm_out))},
), patch.dict(yumpkg.__salt__, {"pkg_resource.add_pkg": _add_data}), patch.dict(
yumpkg.__salt__,
{"pkg_resource.format_pkg_list": pkg_resource.format_pkg_list},
), patch.dict(
yumpkg.__salt__, {"pkg_resource.stringify": MagicMock()}
), patch.dict(
pkg_resource.__salt__, {"pkg.parse_arch": yumpkg.parse_arch}
):
pkgs = yumpkg.list_pkgs(versions_as_list=True)
for pkg_name, pkg_version in {
"python-urlgrabber": "3.10-8.el7",
"alsa-lib": "1.1.1-1.el7",
"gnupg2": "2.0.22-4.el7",
"rpm-python": "4.11.3-21.el7",
"pygpgme": "0.3-9.el7",
"yum": "3.4.3-150.el7.centos",
"lzo": "2.06-8.el7",
"qrencode-libs": "3.4.1-3.el7",
"ustr": "1.0.4-16.el7",
"shadow-utils": "2:4.1.5.1-24.el7",
"util-linux": "2.23.2-33.el7",
"openssh": "6.6.1p1-33.el7_3",
"virt-what": "1.13-8.el7",
}.items():
assert pkgs.get(pkg_name) is not None
assert pkgs[pkg_name] == [pkg_version]
def test_list_pkgs_no_context():
"""
Test packages listing.
:return:
"""
def _add_data(data, key, value):
data.setdefault(key, []).append(value)
rpm_out = [
"python-urlgrabber_|-(none)_|-3.10_|-8.el7_|-noarch_|-(none)_|-1487838471",
"alsa-lib_|-(none)_|-1.1.1_|-1.el7_|-x86_64_|-(none)_|-1487838475",
"gnupg2_|-(none)_|-2.0.22_|-4.el7_|-x86_64_|-(none)_|-1487838477",
"rpm-python_|-(none)_|-4.11.3_|-21.el7_|-x86_64_|-(none)_|-1487838477",
"pygpgme_|-(none)_|-0.3_|-9.el7_|-x86_64_|-(none)_|-1487838478",
"yum_|-(none)_|-3.4.3_|-150.el7.centos_|-noarch_|-(none)_|-1487838479",
"lzo_|-(none)_|-2.06_|-8.el7_|-x86_64_|-(none)_|-1487838479",
"qrencode-libs_|-(none)_|-3.4.1_|-3.el7_|-x86_64_|-(none)_|-1487838480",
"ustr_|-(none)_|-1.0.4_|-16.el7_|-x86_64_|-(none)_|-1487838480",
"shadow-utils_|-2_|-4.1.5.1_|-24.el7_|-x86_64_|-(none)_|-1487838481",
"util-linux_|-(none)_|-2.23.2_|-33.el7_|-x86_64_|-(none)_|-1487838484",
"openssh_|-(none)_|-6.6.1p1_|-33.el7_3_|-x86_64_|-(none)_|-1487838485",
"virt-what_|-(none)_|-1.13_|-8.el7_|-x86_64_|-(none)_|-1487838486",
]
with patch.dict(yumpkg.__grains__, {"osarch": "x86_64"}), patch.dict(
yumpkg.__salt__,
{"cmd.run": MagicMock(return_value=os.linesep.join(rpm_out))},
), patch.dict(yumpkg.__salt__, {"pkg_resource.add_pkg": _add_data}), patch.dict(
yumpkg.__salt__,
{"pkg_resource.format_pkg_list": pkg_resource.format_pkg_list},
), patch.dict(
yumpkg.__salt__, {"pkg_resource.stringify": MagicMock()}
), patch.dict(
pkg_resource.__salt__, {"pkg.parse_arch": yumpkg.parse_arch}
), patch.object(
yumpkg, "_list_pkgs_from_context"
) as list_pkgs_context_mock:
pkgs = yumpkg.list_pkgs(versions_as_list=True, use_context=False)
list_pkgs_context_mock.assert_not_called()
list_pkgs_context_mock.reset_mock()
pkgs = yumpkg.list_pkgs(versions_as_list=True, use_context=False)
list_pkgs_context_mock.assert_not_called()
list_pkgs_context_mock.reset_mock()
def test_list_pkgs_with_attr():
"""
Test packages listing with the attr parameter
:return:
"""
def _add_data(data, key, value):
data.setdefault(key, []).append(value)
rpm_out = [
"python-urlgrabber_|-(none)_|-3.10_|-8.el7_|-noarch_|-(none)_|-1487838471",
"alsa-lib_|-(none)_|-1.1.1_|-1.el7_|-x86_64_|-(none)_|-1487838475",
"gnupg2_|-(none)_|-2.0.22_|-4.el7_|-x86_64_|-(none)_|-1487838477",
"rpm-python_|-(none)_|-4.11.3_|-21.el7_|-x86_64_|-(none)_|-1487838477",
"pygpgme_|-(none)_|-0.3_|-9.el7_|-x86_64_|-(none)_|-1487838478",
"yum_|-(none)_|-3.4.3_|-150.el7.centos_|-noarch_|-(none)_|-1487838479",
"lzo_|-(none)_|-2.06_|-8.el7_|-x86_64_|-(none)_|-1487838479",
"qrencode-libs_|-(none)_|-3.4.1_|-3.el7_|-x86_64_|-(none)_|-1487838480",
"ustr_|-(none)_|-1.0.4_|-16.el7_|-x86_64_|-(none)_|-1487838480",
"shadow-utils_|-2_|-4.1.5.1_|-24.el7_|-x86_64_|-(none)_|-1487838481",
"util-linux_|-(none)_|-2.23.2_|-33.el7_|-x86_64_|-(none)_|-1487838484",
"openssh_|-(none)_|-6.6.1p1_|-33.el7_3_|-x86_64_|-(none)_|-1487838485",
"virt-what_|-(none)_|-1.13_|-8.el7_|-x86_64_|-(none)_|-1487838486",
]
with patch.dict(yumpkg.__grains__, {"osarch": "x86_64"}), patch.dict(
yumpkg.__salt__,
{"cmd.run": MagicMock(return_value=os.linesep.join(rpm_out))},
), patch.dict(yumpkg.__salt__, {"pkg_resource.add_pkg": _add_data}), patch.dict(
yumpkg.__salt__,
{"pkg_resource.format_pkg_list": pkg_resource.format_pkg_list},
), patch.dict(
yumpkg.__salt__, {"pkg_resource.stringify": MagicMock()}
), patch.dict(
pkg_resource.__salt__, {"pkg.parse_arch": yumpkg.parse_arch}
):
pkgs = yumpkg.list_pkgs(
attr=["epoch", "release", "arch", "install_date_time_t"]
)
for pkg_name, pkg_attr in {
"python-urlgrabber": {
"version": "3.10",
"release": "8.el7",
"arch": "noarch",
"install_date_time_t": 1487838471,
"epoch": None,
},
"alsa-lib": {
"version": "1.1.1",
"release": "1.el7",
"arch": "x86_64",
"install_date_time_t": 1487838475,
"epoch": None,
},
"gnupg2": {
"version": "2.0.22",
"release": "4.el7",
"arch": "x86_64",
"install_date_time_t": 1487838477,
"epoch": None,
},
"rpm-python": {
"version": "4.11.3",
"release": "21.el7",
"arch": "x86_64",
"install_date_time_t": 1487838477,
"epoch": None,
},
"pygpgme": {
"version": "0.3",
"release": "9.el7",
"arch": "x86_64",
"install_date_time_t": 1487838478,
"epoch": None,
},
"yum": {
"version": "3.4.3",
"release": "150.el7.centos",
"arch": "noarch",
"install_date_time_t": 1487838479,
"epoch": None,
},
"lzo": {
"version": "2.06",
"release": "8.el7",
"arch": "x86_64",
"install_date_time_t": 1487838479,
"epoch": None,
},
"qrencode-libs": {
"version": "3.4.1",
"release": "3.el7",
"arch": "x86_64",
"install_date_time_t": 1487838480,
"epoch": None,
},
"ustr": {
"version": "1.0.4",
"release": "16.el7",
"arch": "x86_64",
"install_date_time_t": 1487838480,
"epoch": None,
},
"shadow-utils": {
"epoch": "2",
"version": "4.1.5.1",
"release": "24.el7",
"arch": "x86_64",
"install_date_time_t": 1487838481,
},
"util-linux": {
"version": "2.23.2",
"release": "33.el7",
"arch": "x86_64",
"install_date_time_t": 1487838484,
"epoch": None,
},
"openssh": {
"version": "6.6.1p1",
"release": "33.el7_3",
"arch": "x86_64",
"install_date_time_t": 1487838485,
"epoch": None,
},
"virt-what": {
"version": "1.13",
"release": "8.el7",
"install_date_time_t": 1487838486,
"arch": "x86_64",
"epoch": None,
},
}.items():
assert pkgs.get(pkg_name) is not None
assert pkgs[pkg_name] == [pkg_attr]
def test_list_pkgs_with_attr_multiple_versions():
"""
Test packages listing with the attr parameter reporting multiple version installed
:return:
"""
def _add_data(data, key, value):
data.setdefault(key, []).append(value)
rpm_out = [
"glibc_|-(none)_|-2.12_|-1.212.el6_|-i686_|-(none)_|-1542394210"
"glibc_|-(none)_|-2.12_|-1.212.el6_|-x86_64_|-(none)_|-1542394204",
"virt-what_|-(none)_|-1.13_|-8.el7_|-x86_64_|-(none)_|-1487838486",
"virt-what_|-(none)_|-1.10_|-2.el7_|-x86_64_|-(none)_|-1387838486",
]
with patch.dict(yumpkg.__grains__, {"osarch": "x86_64"}), patch.dict(
yumpkg.__salt__,
{"cmd.run": MagicMock(return_value=os.linesep.join(rpm_out))},
), patch.dict(yumpkg.__salt__, {"pkg_resource.add_pkg": _add_data}), patch.dict(
yumpkg.__salt__,
{"pkg_resource.format_pkg_list": pkg_resource.format_pkg_list},
), patch.dict(
yumpkg.__salt__, {"pkg_resource.stringify": MagicMock()}
), patch.dict(
pkg_resource.__salt__, {"pkg.parse_arch": yumpkg.parse_arch}
):
pkgs = yumpkg.list_pkgs(
attr=["epoch", "release", "arch", "install_date_time_t"]
)
expected_pkg_list = {
"glibc": [
{
"version": "2.12",
"release": "1.212.el6",
"install_date_time_t": 1542394210,
"arch": "i686",
"epoch": None,
},
{
"version": "2.12",
"release": "1.212.el6",
"install_date_time_t": 1542394204,
"arch": "x86_64",
"epoch": None,
},
],
"virt-what": [
{
"version": "1.10",
"release": "2.el7",
"install_date_time_t": 1387838486,
"arch": "x86_64",
"epoch": None,
},
{
"version": "1.13",
"release": "8.el7",
"install_date_time_t": 1487838486,
"arch": "x86_64",
"epoch": None,
},
],
}
for pkgname, pkginfo in pkgs.items():
assert pkginfo == expected_pkg_list[pkgname]
assert len(pkginfo) == len(expected_pkg_list[pkgname])
def test_list_patches():
"""
Test patches listing.
:return:
"""
yum_out = [
"i my-fake-patch-not-installed-1234 recommended "
" spacewalk-usix-2.7.5.2-2.2.noarch",
" my-fake-patch-not-installed-1234 recommended "
" spacewalksd-5.0.26.2-21.2.x86_64",
"i my-fake-patch-not-installed-1234 recommended "
" suseRegisterInfo-3.1.1-18.2.x86_64",
"i my-fake-patch-installed-1234 recommended "
" my-package-one-1.1-0.1.x86_64",
"i my-fake-patch-installed-1234 recommended "
" my-package-two-1.1-0.1.x86_64",
]
expected_patches = {
"my-fake-patch-not-installed-1234": {
"installed": False,
"summary": [
"spacewalk-usix-2.7.5.2-2.2.noarch",
"spacewalksd-5.0.26.2-21.2.x86_64",
"suseRegisterInfo-3.1.1-18.2.x86_64",
],
},
"my-fake-patch-installed-1234": {
"installed": True,
"summary": [
"my-package-one-1.1-0.1.x86_64",
"my-package-two-1.1-0.1.x86_64",
],
},
}
with patch.dict(yumpkg.__grains__, {"osarch": "x86_64"}), patch.dict(
yumpkg.__salt__,
{"cmd.run_stdout": MagicMock(return_value=os.linesep.join(yum_out))},
):
patches = yumpkg.list_patches()
assert patches["my-fake-patch-not-installed-1234"]["installed"] is False
assert len(patches["my-fake-patch-not-installed-1234"]["summary"]) == 3
for _patch in expected_patches["my-fake-patch-not-installed-1234"]["summary"]:
assert _patch in patches["my-fake-patch-not-installed-1234"]["summary"]
assert patches["my-fake-patch-installed-1234"]["installed"] is True
assert len(patches["my-fake-patch-installed-1234"]["summary"]) == 2
for _patch in expected_patches["my-fake-patch-installed-1234"]["summary"]:
assert _patch in patches["my-fake-patch-installed-1234"]["summary"]
def test_latest_version_with_options():
with patch.object(yumpkg, "list_pkgs", MagicMock(return_value={})):
# with fromrepo
cmd = MagicMock(return_value={"retcode": 0, "stdout": ""})
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)},
):
yumpkg.latest_version("foo", refresh=False, fromrepo="good", branch="foo")
cmd.assert_called_once_with(
[
"yum",
"--quiet",
"--disablerepo=*",
"--enablerepo=good",
"--branch=foo",
"list",
"available",
"foo",
],
env={},
ignore_retcode=True,
output_loglevel="trace",
python_shell=False,
)
# without fromrepo
cmd = MagicMock(return_value={"retcode": 0, "stdout": ""})
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)},
):
yumpkg.latest_version(
"foo",
refresh=False,
enablerepo="good",
disablerepo="bad",
branch="foo",
)
cmd.assert_called_once_with(
[
"yum",
"--quiet",
"--disablerepo=bad",
"--enablerepo=good",
"--branch=foo",
"list",
"available",
"foo",
],
env={},
ignore_retcode=True,
output_loglevel="trace",
python_shell=False,
)
# without fromrepo, but within the scope
cmd = MagicMock(return_value={"retcode": 0, "stdout": ""})
with patch("salt.utils.systemd.has_scope", MagicMock(return_value=True)):
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd, "config.get": MagicMock(return_value=True)},
):
yumpkg.latest_version(
"foo",
refresh=False,
enablerepo="good",
disablerepo="bad",
branch="foo",
)
cmd.assert_called_once_with(
[
"systemd-run",
"--scope",
"yum",
"--quiet",
"--disablerepo=bad",
"--enablerepo=good",
"--branch=foo",
"list",
"available",
"foo",
],
env={},
ignore_retcode=True,
output_loglevel="trace",
python_shell=False,
)
def test_list_repo_pkgs_with_options(list_repos_var):
"""
Test list_repo_pkgs with and without fromrepo
NOTE: mock_calls is a stack. The most recent call is indexed
with 0, while the first call would have the highest index.
"""
really_old_yum = MagicMock(return_value="3.2.0")
older_yum = MagicMock(return_value="3.4.0")
newer_yum = MagicMock(return_value="3.4.5")
list_repos_mock = MagicMock(return_value=list_repos_var)
kwargs = {
"output_loglevel": "trace",
"ignore_retcode": True,
"python_shell": False,
"env": {},
}
with patch.object(yumpkg, "list_repos", list_repos_mock):
# Test with really old yum. The fromrepo argument has no effect on
# the yum commands we'd run.
with patch.dict(yumpkg.__salt__, {"cmd.run": really_old_yum}):
cmd = MagicMock(return_value={"retcode": 0, "stdout": ""})
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)},
):
yumpkg.list_repo_pkgs("foo")
# We should have called cmd.run_all twice
assert len(cmd.mock_calls) == 2
# Check args from first call
assert cmd.mock_calls[1][1] == (
["yum", "--quiet", "list", "available"],
)
# Check kwargs from first call
assert cmd.mock_calls[1][2] == kwargs
# Check args from second call
assert cmd.mock_calls[0][1] == (
["yum", "--quiet", "list", "installed"],
)
# Check kwargs from second call
assert cmd.mock_calls[0][2] == kwargs
# Test with really old yum. The fromrepo argument has no effect on
# the yum commands we'd run.
with patch.dict(yumpkg.__salt__, {"cmd.run": older_yum}):
cmd = MagicMock(return_value={"retcode": 0, "stdout": ""})
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)},
):
yumpkg.list_repo_pkgs("foo")
# We should have called cmd.run_all twice
assert len(cmd.mock_calls) == 2
# Check args from first call
assert cmd.mock_calls[1][1] == (
["yum", "--quiet", "--showduplicates", "list", "available"],
)
# Check kwargs from first call
assert cmd.mock_calls[1][2] == kwargs
# Check args from second call
assert cmd.mock_calls[0][1] == (
["yum", "--quiet", "--showduplicates", "list", "installed"],
)
# Check kwargs from second call
assert cmd.mock_calls[0][2] == kwargs
# Test with newer yum. We should run one yum command per repo, so
# fromrepo would limit how many calls we make.
with patch.dict(yumpkg.__salt__, {"cmd.run": newer_yum}):
# When fromrepo is used, we would only run one yum command, for
# that specific repo.
cmd = MagicMock(return_value={"retcode": 0, "stdout": ""})
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)},
):
yumpkg.list_repo_pkgs("foo", fromrepo="base")
# We should have called cmd.run_all once
assert len(cmd.mock_calls) == 1
# Check args
assert cmd.mock_calls[0][1] == (
[
"yum",
"--quiet",
"--showduplicates",
"repository-packages",
"base",
"list",
"foo",
],
)
# Check kwargs
assert cmd.mock_calls[0][2] == kwargs
# Test enabling base-source and disabling updates. We should
# get two calls, one for each enabled repo. Because dict
# iteration order will vary, different Python versions will be
# do them in different orders, which is OK, but it will just
# mean that we will have to check both the first and second
# mock call both times.
cmd = MagicMock(return_value={"retcode": 0, "stdout": ""})
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)},
):
yumpkg.list_repo_pkgs(
"foo", enablerepo="base-source", disablerepo="updates"
)
# We should have called cmd.run_all twice
assert len(cmd.mock_calls) == 2
for repo in ("base", "base-source"):
for index in (0, 1):
try:
# Check args
assert cmd.mock_calls[index][1] == (
[
"yum",
"--quiet",
"--showduplicates",
"repository-packages",
repo,
"list",
"foo",
],
)
# Check kwargs
assert cmd.mock_calls[index][2] == kwargs
break
except AssertionError:
continue
else:
pytest.fail("repo '{}' not checked".format(repo))
def test_list_upgrades_dnf():
"""
The subcommand should be "upgrades" with dnf
"""
with patch.dict(yumpkg.__context__, {"yum_bin": "dnf"}):
# with fromrepo
cmd = MagicMock(return_value={"retcode": 0, "stdout": ""})
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)},
):
yumpkg.list_upgrades(refresh=False, fromrepo="good", branch="foo")
cmd.assert_called_once_with(
[
"dnf",
"--quiet",
"--disablerepo=*",
"--enablerepo=good",
"--branch=foo",
"list",
"upgrades",
],
env={},
output_loglevel="trace",
ignore_retcode=True,
python_shell=False,
)
# without fromrepo
cmd = MagicMock(return_value={"retcode": 0, "stdout": ""})
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)},
):
yumpkg.list_upgrades(
refresh=False, enablerepo="good", disablerepo="bad", branch="foo"
)
cmd.assert_called_once_with(
[
"dnf",
"--quiet",
"--disablerepo=bad",
"--enablerepo=good",
"--branch=foo",
"list",
"upgrades",
],
env={},
output_loglevel="trace",
ignore_retcode=True,
python_shell=False,
)
def test_list_upgrades_yum():
"""
The subcommand should be "updates" with yum
"""
# with fromrepo
cmd = MagicMock(return_value={"retcode": 0, "stdout": ""})
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)},
):
yumpkg.list_upgrades(refresh=False, fromrepo="good", branch="foo")
cmd.assert_called_once_with(
[
"yum",
"--quiet",
"--disablerepo=*",
"--enablerepo=good",
"--branch=foo",
"list",
"updates",
],
env={},
output_loglevel="trace",
ignore_retcode=True,
python_shell=False,
)
# without fromrepo
cmd = MagicMock(return_value={"retcode": 0, "stdout": ""})
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd, "config.get": MagicMock(return_value=False)},
):
yumpkg.list_upgrades(
refresh=False, enablerepo="good", disablerepo="bad", branch="foo"
)
cmd.assert_called_once_with(
[
"yum",
"--quiet",
"--disablerepo=bad",
"--enablerepo=good",
"--branch=foo",
"list",
"updates",
],
env={},
output_loglevel="trace",
ignore_retcode=True,
python_shell=False,
)
def test_refresh_db_with_options():
with patch("salt.utils.pkg.clear_rtag", Mock()):
# With check_update=True we will do a cmd.run to run the clean_cmd, and
# then a separate cmd.retcode to check for updates.
# with fromrepo
yum_call = MagicMock()
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": yum_call, "config.get": MagicMock(return_value=False)},
):
yumpkg.refresh_db(check_update=True, fromrepo="good", branch="foo")
assert yum_call.call_count == 2
yum_call.assert_any_call(
[
"yum",
"--quiet",
"--assumeyes",
"clean",
"expire-cache",
"--disablerepo=*",
"--enablerepo=good",
"--branch=foo",
],
env={},
ignore_retcode=True,
output_loglevel="trace",
python_shell=False,
)
yum_call.assert_any_call(
[
"yum",
"--quiet",
"--assumeyes",
"check-update",
"--setopt=autocheck_running_kernel=false",
"--disablerepo=*",
"--enablerepo=good",
"--branch=foo",
],
output_loglevel="trace",
env={},
ignore_retcode=True,
python_shell=False,
)
# without fromrepo
yum_call = MagicMock()
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": yum_call, "config.get": MagicMock(return_value=False)},
):
yumpkg.refresh_db(
check_update=True,
enablerepo="good",
disablerepo="bad",
branch="foo",
)
assert yum_call.call_count == 2
yum_call.assert_any_call(
[
"yum",
"--quiet",
"--assumeyes",
"clean",
"expire-cache",
"--disablerepo=bad",
"--enablerepo=good",
"--branch=foo",
],
env={},
ignore_retcode=True,
output_loglevel="trace",
python_shell=False,
)
yum_call.assert_any_call(
[
"yum",
"--quiet",
"--assumeyes",
"check-update",
"--setopt=autocheck_running_kernel=false",
"--disablerepo=bad",
"--enablerepo=good",
"--branch=foo",
],
output_loglevel="trace",
env={},
ignore_retcode=True,
python_shell=False,
)
# With check_update=False we will just do a cmd.run for the clean_cmd
# with fromrepo
yum_call = MagicMock()
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": yum_call, "config.get": MagicMock(return_value=False)},
):
yumpkg.refresh_db(check_update=False, fromrepo="good", branch="foo")
assert yum_call.call_count == 1
yum_call.assert_called_once_with(
[
"yum",
"--quiet",
"--assumeyes",
"clean",
"expire-cache",
"--disablerepo=*",
"--enablerepo=good",
"--branch=foo",
],
env={},
output_loglevel="trace",
ignore_retcode=True,
python_shell=False,
)
# without fromrepo
yum_call = MagicMock()
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": yum_call, "config.get": MagicMock(return_value=False)},
):
yumpkg.refresh_db(
check_update=False,
enablerepo="good",
disablerepo="bad",
branch="foo",
)
assert yum_call.call_count == 1
yum_call.assert_called_once_with(
[
"yum",
"--quiet",
"--assumeyes",
"clean",
"expire-cache",
"--disablerepo=bad",
"--enablerepo=good",
"--branch=foo",
],
env={},
output_loglevel="trace",
ignore_retcode=True,
python_shell=False,
)
def test_install_with_options():
parse_targets = MagicMock(return_value=({"foo": None}, "repository"))
with patch.object(yumpkg, "list_pkgs", MagicMock(return_value={})), patch.object(
yumpkg, "list_holds", MagicMock(return_value=[])
), patch.dict(
yumpkg.__salt__, {"pkg_resource.parse_targets": parse_targets}
), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
):
# with fromrepo
cmd = MagicMock(return_value={"retcode": 0})
with patch.dict(yumpkg.__salt__, {"cmd.run_all": cmd}):
yumpkg.install(
refresh=False,
fromrepo="good",
branch="foo",
setopt="obsoletes=0,plugins=0",
)
cmd.assert_called_once_with(
[
"yum",
"-y",
"--disablerepo=*",
"--enablerepo=good",
"--branch=foo",
"--setopt",
"obsoletes=0",
"--setopt",
"plugins=0",
"install",
"foo",
],
env={},
output_loglevel="trace",
python_shell=False,
ignore_retcode=False,
redirect_stderr=True,
)
# without fromrepo
cmd = MagicMock(return_value={"retcode": 0})
with patch.dict(yumpkg.__salt__, {"cmd.run_all": cmd}):
yumpkg.install(
refresh=False,
enablerepo="good",
disablerepo="bad",
branch="foo",
setopt="obsoletes=0,plugins=0",
)
cmd.assert_called_once_with(
[
"yum",
"-y",
"--disablerepo=bad",
"--enablerepo=good",
"--branch=foo",
"--setopt",
"obsoletes=0",
"--setopt",
"plugins=0",
"install",
"foo",
],
env={},
output_loglevel="trace",
python_shell=False,
ignore_retcode=False,
redirect_stderr=True,
)
def test_remove_with_epoch():
"""
Tests that we properly identify a version containing an epoch for
deinstallation.
You can deinstall pkgs only without the epoch if no arch is provided:
.. code-block:: bash
yum remove PackageKit-yum-1.1.10-2.el7.centos
"""
name = "foo"
installed = "8:3.8.12-4.n.el7"
list_pkgs_mock = MagicMock(
side_effect=lambda **kwargs: {
name: [installed] if kwargs.get("versions_as_list", False) else installed
}
)
cmd_mock = MagicMock(
return_value={"pid": 12345, "retcode": 0, "stdout": "", "stderr": ""}
)
salt_mock = {
"cmd.run_all": cmd_mock,
"lowpkg.version_cmp": rpm.version_cmp,
"pkg_resource.parse_targets": MagicMock(
return_value=({name: installed}, "repository")
),
}
full_pkg_string = "-".join((name, installed[2:]))
with patch.object(yumpkg, "list_pkgs", list_pkgs_mock), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
), patch.dict(yumpkg.__salt__, salt_mock):
with patch.dict(yumpkg.__grains__, {"os": "CentOS", "osrelease": 7}):
expected = ["yum", "-y", "remove", full_pkg_string]
yumpkg.remove(name)
call = cmd_mock.mock_calls[0][1][0]
assert call == expected, call
def test_remove_with_epoch_and_arch_info():
"""
Tests that we properly identify a version containing an epoch and arch
deinstallation.
You can deinstall pkgs with or without epoch in combination with the arch.
Here we test for the absence of the epoch, but the presence for the arch:
.. code-block:: bash
yum remove PackageKit-yum-1.1.10-2.el7.centos.x86_64
"""
arch = "x86_64"
name = "foo"
name_and_arch = name + "." + arch
installed = "8:3.8.12-4.n.el7"
list_pkgs_mock = MagicMock(
side_effect=lambda **kwargs: {
name_and_arch: [installed]
if kwargs.get("versions_as_list", False)
else installed
}
)
cmd_mock = MagicMock(
return_value={"pid": 12345, "retcode": 0, "stdout": "", "stderr": ""}
)
salt_mock = {
"cmd.run_all": cmd_mock,
"lowpkg.version_cmp": rpm.version_cmp,
"pkg_resource.parse_targets": MagicMock(
return_value=({name_and_arch: installed}, "repository")
),
}
full_pkg_string = "-".join((name, installed[2:]))
with patch.object(yumpkg, "list_pkgs", list_pkgs_mock), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
), patch.dict(yumpkg.__salt__, salt_mock):
with patch.dict(yumpkg.__grains__, {"os": "CentOS", "osrelease": 7}):
expected = ["yum", "-y", "remove", full_pkg_string + "." + arch]
yumpkg.remove(name)
call = cmd_mock.mock_calls[0][1][0]
assert call == expected, call
def test_remove_with_wildcard():
"""
Tests that we properly identify a version containing an epoch for
deinstallation.
You can deinstall pkgs only without the epoch if no arch is provided:
.. code-block:: bash
yum remove foo*
yum remove pkgs='[{"foo*": "8:3.8.12-4.n.el7"}]'
"""
name = "foobarpkg"
installed = "8:3.8.12-4.n.el7"
list_pkgs_mock = MagicMock(
side_effect=lambda **kwargs: {
name: [installed] if kwargs.get("versions_as_list", False) else installed
}
)
cmd_mock = MagicMock(
return_value={"pid": 12345, "retcode": 0, "stdout": "", "stderr": ""}
)
salt_mock = {
"cmd.run_all": cmd_mock,
"lowpkg.version_cmp": rpm.version_cmp,
"pkg_resource.parse_targets": MagicMock(
return_value=({name: installed}, "repository")
),
}
full_pkg_string = "-".join((name, installed[2:]))
with patch.object(yumpkg, "list_pkgs", list_pkgs_mock), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
), patch.dict(yumpkg.__salt__, salt_mock):
with patch.dict(yumpkg.__grains__, {"os": "CentOS", "osrelease": 7}):
expected = ["yum", "-y", "remove", full_pkg_string]
yumpkg.remove("foo*")
call = cmd_mock.mock_calls[0][1][0]
assert call == expected, call
expected = ["yum", "-y", "remove", full_pkg_string]
yumpkg.remove(pkgs=[{"foo*": "8:3.8.12-4.n.el7"}])
call = cmd_mock.mock_calls[0][1][0]
assert call == expected, call
def test_install_with_epoch():
"""
Tests that we properly identify a version containing an epoch as an
upgrade instead of a downgrade.
"""
name = "foo"
old = "8:3.8.12-6.n.el7"
new = "9:3.8.12-4.n.el7"
list_pkgs_mock = MagicMock(
side_effect=lambda **kwargs: {
name: [old] if kwargs.get("versions_as_list", False) else old
}
)
cmd_mock = MagicMock(
return_value={"pid": 12345, "retcode": 0, "stdout": "", "stderr": ""}
)
salt_mock = {
"cmd.run_all": cmd_mock,
"lowpkg.version_cmp": rpm.version_cmp,
"pkg_resource.parse_targets": MagicMock(
return_value=({name: new}, "repository")
),
}
full_pkg_string = "-".join((name, new[2:]))
with patch.object(yumpkg, "list_pkgs", list_pkgs_mock), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
), patch.dict(yumpkg.__salt__, salt_mock):
# Test yum
expected = ["yum", "-y", "install", full_pkg_string]
with patch.dict(yumpkg.__context__, {"yum_bin": "yum"}), patch.dict(
yumpkg.__grains__, {"os": "CentOS", "osrelease": 7}
):
yumpkg.install("foo", version=new)
call = cmd_mock.mock_calls[0][1][0]
assert call == expected, call
# Test dnf
expected = [
"dnf",
"-y",
"--best",
"--allowerasing",
"install",
full_pkg_string,
]
yumpkg.__context__.pop("yum_bin")
cmd_mock.reset_mock()
with patch.dict(yumpkg.__context__, {"yum_bin": "dnf"}), patch.dict(
yumpkg.__grains__, {"os": "Fedora", "osrelease": 27}
):
yumpkg.install("foo", version=new)
call = cmd_mock.mock_calls[0][1][0]
assert call == expected, call
@pytest.mark.skipif(not salt.utils.platform.is_linux(), reason="Only run on Linux")
def test_install_error_reporting():
"""
Tests that we properly report yum/dnf errors.
"""
name = "foo"
old = "8:3.8.12-6.n.el7"
new = "9:3.8.12-4.n.el7"
list_pkgs_mock = MagicMock(
side_effect=lambda **kwargs: {
name: [old] if kwargs.get("versions_as_list", False) else old
}
)
salt_mock = {
"cmd.run_all": cmdmod.run_all,
"lowpkg.version_cmp": rpm.version_cmp,
"pkg_resource.parse_targets": MagicMock(
return_value=({name: new}, "repository")
),
}
full_pkg_string = "-".join((name, new[2:]))
with patch.object(yumpkg, "list_pkgs", list_pkgs_mock), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
), patch.dict(yumpkg.__salt__, salt_mock), patch.object(
yumpkg, "_yum", MagicMock(return_value="cat")
):
expected = {
"changes": {},
"errors": [
"cat: invalid option -- 'y'\nTry 'cat --help' for more information."
],
}
with pytest.raises(CommandExecutionError) as exc_info:
yumpkg.install("foo", version=new)
assert exc_info.value.info == expected, exc_info.value.info
def test_remove_not_installed():
"""
Tests that no exception raised on removing not installed package
"""
name = "foo"
list_pkgs_mock = MagicMock(return_value={})
cmd_mock = MagicMock(
return_value={"pid": 12345, "retcode": 0, "stdout": "", "stderr": ""}
)
salt_mock = {
"cmd.run_all": cmd_mock,
"lowpkg.version_cmp": rpm.version_cmp,
"pkg_resource.parse_targets": MagicMock(
return_value=({name: None}, "repository")
),
}
with patch.object(yumpkg, "list_pkgs", list_pkgs_mock), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
), patch.dict(yumpkg.__salt__, salt_mock):
# Test yum
with patch.dict(yumpkg.__context__, {"yum_bin": "yum"}), patch.dict(
yumpkg.__grains__, {"os": "CentOS", "osrelease": 7}
):
yumpkg.remove(name)
cmd_mock.assert_not_called()
# Test dnf
yumpkg.__context__.pop("yum_bin")
cmd_mock.reset_mock()
with patch.dict(yumpkg.__context__, {"yum_bin": "dnf"}), patch.dict(
yumpkg.__grains__, {"os": "Fedora", "osrelease": 27}
):
yumpkg.remove(name)
cmd_mock.assert_not_called()
def test_upgrade_with_options():
with patch.object(yumpkg, "list_pkgs", MagicMock(return_value={})), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
):
# with fromrepo
cmd = MagicMock(return_value={"retcode": 0})
with patch.dict(yumpkg.__salt__, {"cmd.run_all": cmd}):
yumpkg.upgrade(
refresh=False,
fromrepo="good",
exclude="kernel*",
branch="foo",
setopt="obsoletes=0,plugins=0",
)
cmd.assert_called_once_with(
[
"yum",
"--quiet",
"-y",
"--disablerepo=*",
"--enablerepo=good",
"--branch=foo",
"--setopt",
"obsoletes=0",
"--setopt",
"plugins=0",
"--exclude=kernel*",
"upgrade",
],
env={},
output_loglevel="trace",
python_shell=False,
)
# without fromrepo
cmd = MagicMock(return_value={"retcode": 0})
with patch.dict(yumpkg.__salt__, {"cmd.run_all": cmd}):
yumpkg.upgrade(
refresh=False,
enablerepo="good",
disablerepo="bad",
exclude="kernel*",
branch="foo",
setopt="obsoletes=0,plugins=0",
)
cmd.assert_called_once_with(
[
"yum",
"--quiet",
"-y",
"--disablerepo=bad",
"--enablerepo=good",
"--branch=foo",
"--setopt",
"obsoletes=0",
"--setopt",
"plugins=0",
"--exclude=kernel*",
"upgrade",
],
env={},
output_loglevel="trace",
python_shell=False,
)
def test_info_installed_with_all_versions():
"""
Test the return information of all versions for the named package(s), installed on the system.
:return:
"""
run_out = {
"virgo-dummy": [
{
"build_date": "2015-07-09T10:55:19Z",
"vendor": "openSUSE Build Service",
"description": (
"This is the Virgo dummy package used for testing SUSE Manager"
),
"license": "GPL-2.0",
"build_host": "sheep05",
"url": "http://www.suse.com",
"build_date_time_t": 1436432119,
"relocations": "(not relocatable)",
"source_rpm": "virgo-dummy-1.0-1.1.src.rpm",
"install_date": "2016-02-23T16:31:57Z",
"install_date_time_t": 1456241517,
"summary": "Virgo dummy package",
"version": "1.0",
"signature": (
"DSA/SHA1, Thu Jul 9 08:55:33 2015, Key ID 27fa41bd8a7c64f9"
),
"release": "1.1",
"group": "Applications/System",
"arch": "i686",
"size": "17992",
},
{
"build_date": "2015-07-09T10:15:19Z",
"vendor": "openSUSE Build Service",
"description": (
"This is the Virgo dummy package used for testing SUSE Manager"
),
"license": "GPL-2.0",
"build_host": "sheep05",
"url": "http://www.suse.com",
"build_date_time_t": 1436432119,
"relocations": "(not relocatable)",
"source_rpm": "virgo-dummy-1.0-1.1.src.rpm",
"install_date": "2016-02-23T16:31:57Z",
"install_date_time_t": 14562415127,
"summary": "Virgo dummy package",
"version": "1.0",
"signature": (
"DSA/SHA1, Thu Jul 9 08:55:33 2015, Key ID 27fa41bd8a7c64f9"
),
"release": "1.1",
"group": "Applications/System",
"arch": "x86_64",
"size": "13124",
},
],
"libopenssl1_0_0": [
{
"build_date": "2015-11-04T23:20:34Z",
"vendor": "SUSE LLC <https://www.suse.com/>",
"description": "The OpenSSL Project is a collaborative effort.",
"license": "OpenSSL",
"build_host": "sheep11",
"url": "https://www.openssl.org/",
"build_date_time_t": 1446675634,
"relocations": "(not relocatable)",
"source_rpm": "openssl-1.0.1i-34.1.src.rpm",
"install_date": "2016-02-23T16:31:35Z",
"install_date_time_t": 1456241495,
"summary": "Secure Sockets and Transport Layer Security",
"version": "1.0.1i",
"signature": (
"RSA/SHA256, Wed Nov 4 22:21:34 2015, Key ID 70af9e8139db7c82"
),
"release": "34.1",
"group": "Productivity/Networking/Security",
"packager": "https://www.suse.com/",
"arch": "x86_64",
"size": "2576912",
}
],
}
with patch.dict(yumpkg.__salt__, {"lowpkg.info": MagicMock(return_value=run_out)}):
installed = yumpkg.info_installed(all_versions=True)
# Test overall products length
assert len(installed) == 2
# Test multiple versions for the same package
for pkg_name, pkg_info_list in installed.items():
assert len(pkg_info_list) == 2 if pkg_name == "virgo-dummy" else 1
for info in pkg_info_list:
assert info["arch"] in ("x86_64", "i686")
def test_pkg_hold_yum():
"""
Tests that we properly identify versionlock plugin when using yum
for RHEL/CentOS 7 and Fedora < 22
"""
# Test RHEL/CentOS 7
list_pkgs_mock = {
"yum-plugin-versionlock": "0:1.0.0-0.n.el7",
"yum-versionlock": "0:1.0.0-0.n.el7",
}
cmd = MagicMock(return_value={"retcode": 0})
with patch.object(
yumpkg, "list_pkgs", MagicMock(return_value=list_pkgs_mock)
), patch.object(yumpkg, "list_holds", MagicMock(return_value=[])), patch.dict(
yumpkg.__salt__, {"cmd.run_all": cmd}
), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
):
yumpkg.hold("foo")
cmd.assert_called_once_with(
["yum", "versionlock", "foo"],
env={},
output_loglevel="trace",
python_shell=False,
)
# Test Fedora 20
cmd = MagicMock(return_value={"retcode": 0})
with patch.dict(yumpkg.__context__, {"yum_bin": "yum"}), patch.dict(
yumpkg.__grains__, {"os": "Fedora", "osrelease": 20}
), patch.object(
yumpkg, "list_pkgs", MagicMock(return_value=list_pkgs_mock)
), patch.object(
yumpkg, "list_holds", MagicMock(return_value=[])
), patch.dict(
yumpkg.__salt__, {"cmd.run_all": cmd}
), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
):
yumpkg.hold("foo")
cmd.assert_called_once_with(
["yum", "versionlock", "foo"],
env={},
output_loglevel="trace",
python_shell=False,
)
def test_pkg_hold_tdnf():
"""
Tests that we raise a SaltInvocationError if we try to use
hold-related functions on Photon OS.
"""
with patch.dict(yumpkg.__context__, {"yum_bin": "tdnf"}):
with pytest.raises(SaltInvocationError) as exc_info:
yumpkg.hold("foo")
def test_pkg_hold_dnf():
"""
Tests that we properly identify versionlock plugin when using dnf
for RHEL/CentOS 8 and Fedora >= 22
"""
# Test RHEL/CentOS 8
list_pkgs_mock = {
"python2-dnf-plugin-versionlock": "0:1.0.0-0.n.el8",
"python3-dnf-plugin-versionlock": "0:1.0.0-0.n.el8",
}
yumpkg.__context__.pop("yum_bin")
cmd = MagicMock(return_value={"retcode": 0})
with patch.dict(yumpkg.__context__, {"yum_bin": "dnf"}), patch.dict(
yumpkg.__grains__, {"osmajorrelease": 8}
), patch.object(
yumpkg, "list_pkgs", MagicMock(return_value=list_pkgs_mock)
), patch.object(
yumpkg, "list_holds", MagicMock(return_value=[])
), patch.dict(
yumpkg.__salt__, {"cmd.run_all": cmd}
), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
):
yumpkg.hold("foo")
cmd.assert_called_once_with(
["dnf", "versionlock", "foo"],
env={},
output_loglevel="trace",
python_shell=False,
)
# Test Fedora 26+
cmd = MagicMock(return_value={"retcode": 0})
with patch.dict(yumpkg.__context__, {"yum_bin": "dnf"}), patch.dict(
yumpkg.__grains__, {"os": "Fedora", "osrelease": 26}
), patch.object(
yumpkg, "list_pkgs", MagicMock(return_value=list_pkgs_mock)
), patch.object(
yumpkg, "list_holds", MagicMock(return_value=[])
), patch.dict(
yumpkg.__salt__, {"cmd.run_all": cmd}
), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
):
yumpkg.hold("foo")
cmd.assert_called_once_with(
["dnf", "versionlock", "foo"],
env={},
output_loglevel="trace",
python_shell=False,
)
# Test Fedora 22-25
list_pkgs_mock = {
"python-dnf-plugins-extras-versionlock": "0:1.0.0-0.n.el8",
"python3-dnf-plugins-extras-versionlock": "0:1.0.0-0.n.el8",
}
cmd = MagicMock(return_value={"retcode": 0})
with patch.dict(yumpkg.__context__, {"yum_bin": "dnf"}), patch.dict(
yumpkg.__grains__, {"os": "Fedora", "osrelease": 25}
), patch.object(
yumpkg, "list_pkgs", MagicMock(return_value=list_pkgs_mock)
), patch.object(
yumpkg, "list_holds", MagicMock(return_value=[])
), patch.dict(
yumpkg.__salt__, {"cmd.run_all": cmd}
), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
):
yumpkg.hold("foo")
cmd.assert_called_once_with(
["dnf", "versionlock", "foo"],
env={},
output_loglevel="trace",
python_shell=False,
)
@pytest.mark.skipif(not yumpkg.HAS_YUM, reason="Could not import yum")
def test_yum_base_error():
with patch("yum.YumBase") as mock_yum_yumbase:
mock_yum_yumbase.side_effect = CommandExecutionError
with pytest.raises(CommandExecutionError):
yumpkg._get_yum_config()
def test_group_info():
"""
Test yumpkg.group_info parsing
"""
expected = {
"conditional": [],
"default": ["qgnomeplatform", "xdg-desktop-portal-gtk"],
"description": (
"GNOME is a highly intuitive and user friendly desktop environment."
),
"group": "GNOME",
"id": "gnome-desktop",
"mandatory": [
"NetworkManager-libreswan-gnome",
"PackageKit-command-not-found",
"PackageKit-gtk3-module",
"abrt-desktop",
"at-spi2-atk",
"at-spi2-core",
"avahi",
"baobab",
"caribou",
"caribou-gtk2-module",
"caribou-gtk3-module",
"cheese",
"chrome-gnome-shell",
"compat-cheese314",
"control-center",
"dconf",
"empathy",
"eog",
"evince",
"evince-nautilus",
"file-roller",
"file-roller-nautilus",
"firewall-config",
"firstboot",
"fprintd-pam",
"gdm",
"gedit",
"glib-networking",
"gnome-bluetooth",
"gnome-boxes",
"gnome-calculator",
"gnome-classic-session",
"gnome-clocks",
"gnome-color-manager",
"gnome-contacts",
"gnome-dictionary",
"gnome-disk-utility",
"gnome-font-viewer",
"gnome-getting-started-docs",
"gnome-icon-theme",
"gnome-icon-theme-extras",
"gnome-icon-theme-symbolic",
"gnome-initial-setup",
"gnome-packagekit",
"gnome-packagekit-updater",
"gnome-screenshot",
"gnome-session",
"gnome-session-xsession",
"gnome-settings-daemon",
"gnome-shell",
"gnome-software",
"gnome-system-log",
"gnome-system-monitor",
"gnome-terminal",
"gnome-terminal-nautilus",
"gnome-themes-standard",
"gnome-tweak-tool",
"gnome-user-docs",
"gnome-weather",
"gucharmap",
"gvfs-afc",
"gvfs-afp",
"gvfs-archive",
"gvfs-fuse",
"gvfs-goa",
"gvfs-gphoto2",
"gvfs-mtp",
"gvfs-smb",
"initial-setup-gui",
"libcanberra-gtk2",
"libcanberra-gtk3",
"libproxy-mozjs",
"librsvg2",
"libsane-hpaio",
"metacity",
"mousetweaks",
"nautilus",
"nautilus-sendto",
"nm-connection-editor",
"orca",
"redhat-access-gui",
"sane-backends-drivers-scanners",
"seahorse",
"setroubleshoot",
"sushi",
"totem",
"totem-nautilus",
"vinagre",
"vino",
"xdg-user-dirs-gtk",
"yelp",
],
"optional": [
"",
"alacarte",
"dconf-editor",
"dvgrab",
"fonts-tweak-tool",
"gconf-editor",
"gedit-plugins",
"gnote",
"libappindicator-gtk3",
"seahorse-nautilus",
"seahorse-sharing",
"vim-X11",
"xguest",
],
"type": "package group",
}
cmd_out = """Group: GNOME
Group-Id: gnome-desktop
Description: GNOME is a highly intuitive and user friendly desktop environment.
Mandatory Packages:
=NetworkManager-libreswan-gnome
=PackageKit-command-not-found
=PackageKit-gtk3-module
abrt-desktop
=at-spi2-atk
=at-spi2-core
=avahi
=baobab
-caribou
-caribou-gtk2-module
-caribou-gtk3-module
=cheese
=chrome-gnome-shell
=compat-cheese314
=control-center
=dconf
=empathy
=eog
=evince
=evince-nautilus
=file-roller
=file-roller-nautilus
=firewall-config
=firstboot
fprintd-pam
=gdm
=gedit
=glib-networking
=gnome-bluetooth
=gnome-boxes
=gnome-calculator
=gnome-classic-session
=gnome-clocks
=gnome-color-manager
=gnome-contacts
=gnome-dictionary
=gnome-disk-utility
=gnome-font-viewer
=gnome-getting-started-docs
=gnome-icon-theme
=gnome-icon-theme-extras
=gnome-icon-theme-symbolic
=gnome-initial-setup
=gnome-packagekit
=gnome-packagekit-updater
=gnome-screenshot
=gnome-session
=gnome-session-xsession
=gnome-settings-daemon
=gnome-shell
=gnome-software
=gnome-system-log
=gnome-system-monitor
=gnome-terminal
=gnome-terminal-nautilus
=gnome-themes-standard
=gnome-tweak-tool
=gnome-user-docs
=gnome-weather
=gucharmap
=gvfs-afc
=gvfs-afp
=gvfs-archive
=gvfs-fuse
=gvfs-goa
=gvfs-gphoto2
=gvfs-mtp
=gvfs-smb
initial-setup-gui
=libcanberra-gtk2
=libcanberra-gtk3
=libproxy-mozjs
=librsvg2
=libsane-hpaio
=metacity
=mousetweaks
=nautilus
=nautilus-sendto
=nm-connection-editor
=orca
-redhat-access-gui
=sane-backends-drivers-scanners
=seahorse
=setroubleshoot
=sushi
=totem
=totem-nautilus
=vinagre
=vino
=xdg-user-dirs-gtk
=yelp
Default Packages:
=qgnomeplatform
=xdg-desktop-portal-gtk
Optional Packages:
alacarte
dconf-editor
dvgrab
fonts-tweak-tool
gconf-editor
gedit-plugins
gnote
libappindicator-gtk3
seahorse-nautilus
seahorse-sharing
vim-X11
xguest
"""
with patch.dict(
yumpkg.__salt__, {"cmd.run_stdout": MagicMock(return_value=cmd_out)}
):
info = yumpkg.group_info("@gnome-desktop")
assert info == expected
def test_get_repo_with_existent_repo(list_repos_var):
"""
Test get_repo with an existent repository
Expected return is a populated dictionary
"""
repo = "base-source"
kwargs = {
"baseurl": "http://vault.centos.org/centos/$releasever/os/Source/",
"gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7",
"name": "CentOS-$releasever - Base Sources",
"enabled": True,
}
parse_repo_file_return = (
"",
{
"base-source": {
"baseurl": "http://vault.centos.org/centos/$releasever/os/Source/",
"gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7",
"name": "CentOS-$releasever - Base Sources",
"enabled": "1",
}
},
)
expected = {
"baseurl": "http://vault.centos.org/centos/$releasever/os/Source/",
"gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7",
"name": "CentOS-$releasever - Base Sources",
"enabled": "1",
}
patch_list_repos = patch.object(
yumpkg, "list_repos", autospec=True, return_value=list_repos_var
)
patch_parse_repo_file = patch.object(
yumpkg,
"_parse_repo_file",
autospec=True,
return_value=parse_repo_file_return,
)
with patch_list_repos, patch_parse_repo_file:
ret = yumpkg.get_repo(repo, **kwargs)
assert ret == expected, ret
def test_get_repo_with_non_existent_repo(list_repos_var):
"""
Test get_repo with an non existent repository
Expected return is an empty dictionary
"""
repo = "non-existent-repository"
kwargs = {
"baseurl": "http://fake.centos.org/centos/$releasever/os/Non-Existent/",
"gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7",
"name": "CentOS-$releasever - Non-Existent Repository",
"enabled": True,
}
expected = {}
patch_list_repos = patch.object(
yumpkg, "list_repos", autospec=True, return_value=list_repos_var
)
with patch_list_repos:
ret = yumpkg.get_repo(repo, **kwargs)
assert ret == expected, ret
def test_pkg_update_dnf():
"""
Tests that the proper CLI options are added when obsoletes=False
"""
name = "foo"
old = "1.2.2-1.fc31"
new = "1.2.3-1.fc31"
cmd_mock = MagicMock(return_value={"retcode": 0})
list_pkgs_mock = MagicMock(side_effect=[{name: old}, {name: new}])
parse_targets_mock = MagicMock(return_value=({"foo": None}, "repository"))
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": cmd_mock, "pkg_resource.parse_targets": parse_targets_mock},
), patch.object(yumpkg, "refresh_db", MagicMock()), patch.object(
yumpkg, "list_pkgs", list_pkgs_mock
), patch.object(
yumpkg, "_yum", MagicMock(return_value="dnf")
), patch(
"salt.utils.systemd.has_scope", MagicMock(return_value=False)
):
ret = yumpkg.update(name, setopt="obsoletes=0,plugins=0")
expected = {name: {"old": old, "new": new}}
assert ret == expected, ret
cmd_mock.assert_called_once_with(
[
"dnf",
"--quiet",
"-y",
"--setopt",
"plugins=0",
"--setopt",
"obsoletes=False",
"upgrade",
"foo",
],
env={},
output_loglevel="trace",
python_shell=False,
)
def test_call_yum_default():
"""
Call default Yum/Dnf.
:return:
"""
with patch.dict(yumpkg.__context__, {"yum_bin": "fake-yum"}):
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": MagicMock(), "config.get": MagicMock(return_value=False)},
):
yumpkg._call_yum(["-y", "--do-something"]) # pylint: disable=W0106
yumpkg.__salt__["cmd.run_all"].assert_called_once_with(
["fake-yum", "-y", "--do-something"],
env={},
output_loglevel="trace",
python_shell=False,
)
@patch("salt.utils.systemd.has_scope", MagicMock(return_value=True))
def test_call_yum_in_scope():
"""
Call Yum/Dnf within the scope.
:return:
"""
with patch.dict(yumpkg.__context__, {"yum_bin": "fake-yum"}):
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": MagicMock(), "config.get": MagicMock(return_value=True)},
):
yumpkg._call_yum(["-y", "--do-something"]) # pylint: disable=W0106
yumpkg.__salt__["cmd.run_all"].assert_called_once_with(
["systemd-run", "--scope", "fake-yum", "-y", "--do-something"],
env={},
output_loglevel="trace",
python_shell=False,
)
def test_call_yum_with_kwargs():
"""
Call Yum/Dnf with the optinal keyword arguments.
:return:
"""
with patch.dict(yumpkg.__context__, {"yum_bin": "fake-yum"}):
with patch.dict(
yumpkg.__salt__,
{"cmd.run_all": MagicMock(), "config.get": MagicMock(return_value=False)},
):
yumpkg._call_yum(
["-y", "--do-something"],
python_shell=True,
output_loglevel="quiet",
ignore_retcode=False,
username="Darth Vader",
) # pylint: disable=W0106
yumpkg.__salt__["cmd.run_all"].assert_called_once_with(
["fake-yum", "-y", "--do-something"],
env={},
ignore_retcode=False,
output_loglevel="quiet",
python_shell=True,
username="Darth Vader",
)
@pytest.mark.skipif(not salt.utils.systemd.booted(), reason="Requires systemd")
def test_services_need_restart():
"""
Test that dnf needs-restarting output is parsed and
salt.utils.systemd.pid_to_service is called as expected.
"""
expected = ["firewalld", "salt-minion"]
dnf_mock = Mock(
return_value="123 : /usr/bin/firewalld\n456 : /usr/bin/salt-minion\n"
)
systemd_mock = Mock(side_effect=["firewalld", "salt-minion"])
with patch("salt.modules.yumpkg._yum", Mock(return_value="dnf")):
with patch.dict(yumpkg.__salt__, {"cmd.run_stdout": dnf_mock}), patch(
"salt.utils.systemd.pid_to_service", systemd_mock
):
assert sorted(yumpkg.services_need_restart()) == expected
systemd_mock.assert_has_calls([call("123"), call("456")])
def test_services_need_restart_requires_systemd():
"""Test that yumpkg.services_need_restart raises an error if systemd is unavailable."""
with patch("salt.modules.yumpkg._yum", Mock(return_value="dnf")):
with patch("salt.utils.systemd.booted", Mock(return_value=False)):
pytest.raises(CommandExecutionError, yumpkg.services_need_restart)
def test_services_need_restart_requires_dnf():
"""Test that yumpkg.services_need_restart raises an error if DNF is unavailable."""
with patch("salt.modules.yumpkg._yum", Mock(return_value="yum")):
pytest.raises(CommandExecutionError, yumpkg.services_need_restart)
def test_61003_pkg_should_not_fail_when_target_not_in_old_pkgs():
patch_list_pkgs = patch(
"salt.modules.yumpkg.list_pkgs", return_value={}, autospec=True
)
patch_salt = patch.dict(
yumpkg.__salt__,
{
"pkg_resource.parse_targets": Mock(
return_value=[
{
"fnord-this-is-not-actually-a-package": "fnord-this-is-not-actually-a-package-1.2.3"
}
]
)
},
)
with patch_list_pkgs, patch_salt:
# During the 3004rc1 we discoverd that if list_pkgs was missing
# packages that were returned by parse_targets that yumpkg.remove would
# catch on fire. This ensures that won't go undetected again.
yumpkg.remove()
|
saltstack/salt
|
tests/pytests/unit/modules/test_yumpkg.py
|
Python
|
apache-2.0
| 70,522
|
[
"ORCA"
] |
a69ce44b05749cbcdf7d36db11a8e95bcad705ad5a009664416731b23d2c1a1d
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2008-2009 Gary Burton
# Copyright (C) 2008 Robert Cheramy <robert@cheramy.net>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010 Nick Hall
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2012 Doug Blank <doug.blank@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"Export to GEDCOM"
#-------------------------------------------------------------------------
#
# Standard Python Modules
#
#-------------------------------------------------------------------------
import os
import time
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.lib import (AttributeType, ChildRefType, Citation, Date,
EventRoleType, EventType, LdsOrd, NameType,
PlaceType, NoteType, Person, UrlType)
from gramps.version import VERSION
import gramps.plugins.lib.libgedcom as libgedcom
from gramps.gen.errors import DatabaseError
from gramps.gen.updatecallback import UpdateCallback
from gramps.gen.utils.file import media_path_full
from gramps.gen.utils.place import conv_lat_lon
from gramps.gen.utils.location import get_main_location
from gramps.gen.display.place import displayer as _pd
#-------------------------------------------------------------------------
#
# GEDCOM tags representing attributes that may take a parameter, value or
# description on the same line as the tag
#
#-------------------------------------------------------------------------
NEEDS_PARAMETER = set(
["CAST", "DSCR", "EDUC", "IDNO", "NATI", "NCHI",
"NMR", "OCCU", "PROP", "RELI", "SSN", "TITL"])
LDS_ORD_NAME = {
LdsOrd.BAPTISM : 'BAPL',
LdsOrd.ENDOWMENT : 'ENDL',
LdsOrd.SEAL_TO_PARENTS : 'SLGC',
LdsOrd.SEAL_TO_SPOUSE : 'SLGS',
LdsOrd.CONFIRMATION : 'CONL',
}
LDS_STATUS = {
LdsOrd.STATUS_BIC : "BIC",
LdsOrd.STATUS_CANCELED : "CANCELED",
LdsOrd.STATUS_CHILD : "CHILD",
LdsOrd.STATUS_CLEARED : "CLEARED",
LdsOrd.STATUS_COMPLETED : "COMPLETED",
LdsOrd.STATUS_DNS : "DNS",
LdsOrd.STATUS_INFANT : "INFANT",
LdsOrd.STATUS_PRE_1970 : "PRE-1970",
LdsOrd.STATUS_QUALIFIED : "QUALIFIED",
LdsOrd.STATUS_DNS_CAN : "DNS/CAN",
LdsOrd.STATUS_STILLBORN : "STILLBORN",
LdsOrd.STATUS_SUBMITTED : "SUBMITTED",
LdsOrd.STATUS_UNCLEARED : "UNCLEARED",
}
LANGUAGES = {
'cs' : 'Czech', 'da' : 'Danish', 'nl' : 'Dutch', 'en' : 'English',
'eo' : 'Esperanto', 'fi' : 'Finnish', 'fr' : 'French', 'de' : 'German',
'hu' : 'Hungarian', 'it' : 'Italian', 'lt' : 'Latvian',
'lv' : 'Lithuanian', 'no' : 'Norwegian', 'po' : 'Polish',
'pt' : 'Portuguese', 'ro' : 'Romanian', 'sk' : 'Slovak',
'es' : 'Spanish', 'sv' : 'Swedish', 'ru' : 'Russian', }
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
MIME2GED = {
"image/bmp" : "bmp",
"image/gif" : "gif",
"image/jpeg" : "jpeg",
"image/x-pcx" : "pcx",
"image/tiff" : "tiff",
"audio/x-wav" : "wav"
}
QUALITY_MAP = {
Citation.CONF_VERY_HIGH : "3",
Citation.CONF_HIGH : "2",
Citation.CONF_LOW : "1",
Citation.CONF_VERY_LOW : "0",
}
PEDIGREE_TYPES = {
ChildRefType.BIRTH : 'birth',
ChildRefType.STEPCHILD: 'Step',
ChildRefType.ADOPTED : 'Adopted',
ChildRefType.FOSTER : 'Foster',
}
NOTES_PER_PERSON = 104 # fudge factor to make progress meter a bit smoother
#-------------------------------------------------------------------------
#
# sort_handles_by_id
#
#-------------------------------------------------------------------------
def sort_handles_by_id(handle_list, handle_to_object):
"""
Sort a list of handles by the Gramps ID.
The function that returns the object from the handle needs to be supplied
so that we get the right object.
"""
sorted_list = []
for handle in handle_list:
obj = handle_to_object(handle)
if obj:
data = (obj.get_gramps_id(), handle)
sorted_list.append(data)
sorted_list.sort()
return sorted_list
#-------------------------------------------------------------------------
#
# breakup
#
#-------------------------------------------------------------------------
def breakup(txt, limit):
"""
Break a line of text into a list of strings that conform to the
maximum length specified, while breaking words in the middle of a word
to avoid issues with spaces.
"""
if limit < 1:
raise ValueError("breakup: unexpected limit: %r" % limit)
data = []
while len(txt) > limit:
# look for non-space pair to break between
# do not break within a UTF-8 byte sequence, i. e. first char >127
idx = limit
while (idx > 0 and (txt[idx - 1].isspace() or txt[idx].isspace() or
ord(txt[idx - 1]) > 127)):
idx -= 1
if idx == 0:
#no words to break on, just break at limit anyway
idx = limit
data.append(txt[:idx])
txt = txt[idx:]
if len(txt) > 0:
data.append(txt)
return data
#-------------------------------------------------------------------------
#
# event_has_subordinate_data
# may want to compare description w/ auto-generated one, and
# if so, treat it same as if it were empty for this purpose
#
#-------------------------------------------------------------------------
def event_has_subordinate_data(event, event_ref):
""" determine if event is empty or not """
if event and event_ref:
return (event.get_description().strip() or
not event.get_date_object().is_empty() or
event.get_place_handle() or
event.get_attribute_list() or
event_ref.get_attribute_list() or
event.get_note_list() or
event.get_citation_list() or
event.get_media_list())
else:
return False
#-------------------------------------------------------------------------
#
# GedcomWriter class
#
#-------------------------------------------------------------------------
class GedcomWriter(UpdateCallback):
"""
The GEDCOM writer creates a GEDCOM file that contains the exported
information from the database. It derives from UpdateCallback
so that it can provide visual feedback via a progress bar if needed.
"""
def __init__(self, database, user, option_box=None):
UpdateCallback.__init__(self, user.callback)
self.dbase = database
self.dirname = None
self.gedcom_file = None
self.progress_cnt = 0
self.setup(option_box)
def setup(self, option_box):
"""
If the option_box is present (GUI interface), then we check the
"private", "restrict", and "cfilter" arguments to see if we need
to apply proxy databases.
"""
if option_box:
option_box.parse_options()
self.dbase = option_box.get_filtered_database(self.dbase, self)
def write_gedcom_file(self, filename):
"""
Write the actual GEDCOM file to the specified filename.
"""
self.dirname = os.path.dirname(filename)
with open(filename, "w", encoding='utf-8') as self.gedcom_file:
person_len = self.dbase.get_number_of_people()
family_len = self.dbase.get_number_of_families()
source_len = self.dbase.get_number_of_sources()
repo_len = self.dbase.get_number_of_repositories()
note_len = self.dbase.get_number_of_notes() / NOTES_PER_PERSON
total_steps = (person_len + family_len + source_len + repo_len +
note_len)
self.set_total(total_steps)
self._header(filename)
self._submitter()
self._individuals()
self._families()
self._sources()
self._repos()
self._notes()
self._writeln(0, "TRLR")
return True
def _writeln(self, level, token, textlines="", limit=72):
"""
Write a line of text to the output file in the form of:
LEVEL TOKEN text
If the line contains newlines, it is broken into multiple lines using
the CONT token. If any line is greater than the limit, it will broken
into multiple lines using CONC.
"""
assert token
if textlines:
# break the line into multiple lines if a newline is found
textlines = textlines.replace('\n\r', '\n')
textlines = textlines.replace('\r', '\n')
# Need to double '@' See Gedcom 5.5 spec 'any_char'
if not textlines.startswith('@'): # avoid xrefs
textlines = textlines.replace('@', '@@')
textlist = textlines.split('\n')
token_level = level
for text in textlist:
# make it unicode so that breakup below does the right thin.
text = str(text)
if limit:
prefix = "\n%d CONC " % (level + 1)
txt = prefix.join(breakup(text, limit))
else:
txt = text
self.gedcom_file.write("%d %s %s\n" %
(token_level, token, txt))
token_level = level + 1
token = "CONT"
else:
self.gedcom_file.write("%d %s\n" % (level, token))
def _header(self, filename):
"""
Write the GEDCOM header.
HEADER:=
n HEAD {1:1}
+1 SOUR <APPROVED_SYSTEM_ID> {1:1}
+2 VERS <VERSION_NUMBER> {0:1}
+2 NAME <NAME_OF_PRODUCT> {0:1}
+2 CORP <NAME_OF_BUSINESS> {0:1} # Not used
+3 <<ADDRESS_STRUCTURE>> {0:1} # Not used
+2 DATA <NAME_OF_SOURCE_DATA> {0:1} # Not used
+3 DATE <PUBLICATION_DATE> {0:1} # Not used
+3 COPR <COPYRIGHT_SOURCE_DATA> {0:1} # Not used
+1 DEST <RECEIVING_SYSTEM_NAME> {0:1*} # Not used
+1 DATE <TRANSMISSION_DATE> {0:1}
+2 TIME <TIME_VALUE> {0:1}
+1 SUBM @XREF:SUBM@ {1:1}
+1 SUBN @XREF:SUBN@ {0:1}
+1 FILE <FILE_NAME> {0:1}
+1 COPR <COPYRIGHT_GEDCOM_FILE> {0:1}
+1 GEDC {1:1}
+2 VERS <VERSION_NUMBER> {1:1}
+2 FORM <GEDCOM_FORM> {1:1}
+1 CHAR <CHARACTER_SET> {1:1}
+2 VERS <VERSION_NUMBER> {0:1}
+1 LANG <LANGUAGE_OF_TEXT> {0:1}
+1 PLAC {0:1}
+2 FORM <PLACE_HIERARCHY> {1:1}
+1 NOTE <GEDCOM_CONTENT_DESCRIPTION> {0:1}
+2 [CONT|CONC] <GEDCOM_CONTENT_DESCRIPTION> {0:M}
"""
local_time = time.localtime(time.time())
(year, mon, day, hour, minutes, sec) = local_time[0:6]
date_str = "%d %s %d" % (day, libgedcom.MONTH[mon], year)
time_str = "%02d:%02d:%02d" % (hour, minutes, sec)
rname = self.dbase.get_researcher().get_name()
self._writeln(0, "HEAD")
self._writeln(1, "SOUR", "Gramps")
self._writeln(2, "VERS", VERSION)
self._writeln(2, "NAME", "Gramps")
self._writeln(1, "DATE", date_str)
self._writeln(2, "TIME", time_str)
self._writeln(1, "SUBM", "@SUBM@")
self._writeln(1, "FILE", filename, limit=255)
self._writeln(1, "COPR", 'Copyright (c) %d %s.' % (year, rname))
self._writeln(1, "GEDC")
self._writeln(2, "VERS", "5.5.1")
self._writeln(2, "FORM", 'LINEAGE-LINKED')
self._writeln(1, "CHAR", "UTF-8")
# write the language string if the current LANG variable
# matches something we know about.
lang = glocale.language[0]
if lang and len(lang) >= 2:
lang_code = LANGUAGES.get(lang[0:2])
if lang_code:
self._writeln(1, 'LANG', lang_code)
def _submitter(self):
"""
n @<XREF:SUBM>@ SUBM {1:1}
+1 NAME <SUBMITTER_NAME> {1:1}
+1 <<ADDRESS_STRUCTURE>> {0:1}
+1 <<MULTIMEDIA_LINK>> {0:M} # not used
+1 LANG <LANGUAGE_PREFERENCE> {0:3} # not used
+1 RFN <SUBMITTER_REGISTERED_RFN> {0:1} # not used
+1 RIN <AUTOMATED_RECORD_ID> {0:1} # not used
+1 <<CHANGE_DATE>> {0:1} # not used
"""
owner = self.dbase.get_researcher()
name = owner.get_name()
phon = owner.get_phone()
mail = owner.get_email()
self._writeln(0, "@SUBM@", "SUBM")
self._writeln(1, "NAME", name)
# Researcher is a sub-type of LocationBase, so get_city etc. which are
# used in __write_addr work fine. However, the database owner street is
# stored in address, so we need to temporarily copy it into street so
# __write_addr works properly
owner.set_street(owner.get_address())
self.__write_addr(1, owner)
if phon:
self._writeln(1, "PHON", phon)
if mail:
self._writeln(1, "EMAIL", mail)
def _individuals(self):
"""
Write the individual people to the gedcom file.
Since people like to have the list sorted by ID value, we need to go
through a sorting step. We need to reset the progress bar, otherwise,
people will be confused when the progress bar is idle.
"""
self.set_text(_("Writing individuals"))
phandles = self.dbase.iter_person_handles()
sorted_list = []
for handle in phandles:
person = self.dbase.get_person_from_handle(handle)
if person:
data = (person.get_gramps_id(), handle)
sorted_list.append(data)
sorted_list.sort()
for data in sorted_list:
self.update()
self._person(self.dbase.get_person_from_handle(data[1]))
def _person(self, person):
"""
Write out a single person.
n @XREF:INDI@ INDI {1:1}
+1 RESN <RESTRICTION_NOTICE> {0:1} # not used
+1 <<PERSONAL_NAME_STRUCTURE>> {0:M}
+1 SEX <SEX_VALUE> {0:1}
+1 <<INDIVIDUAL_EVENT_STRUCTURE>> {0:M}
+1 <<INDIVIDUAL_ATTRIBUTE_STRUCTURE>> {0:M}
+1 <<LDS_INDIVIDUAL_ORDINANCE>> {0:M}
+1 <<CHILD_TO_FAMILY_LINK>> {0:M}
+1 <<SPOUSE_TO_FAMILY_LINK>> {0:M}
+1 SUBM @<XREF:SUBM>@ {0:M}
+1 <<ASSOCIATION_STRUCTURE>> {0:M}
+1 ALIA @<XREF:INDI>@ {0:M}
+1 ANCI @<XREF:SUBM>@ {0:M}
+1 DESI @<XREF:SUBM>@ {0:M}
+1 <<SOURCE_CITATION>> {0:M}
+1 <<MULTIMEDIA_LINK>> {0:M} ,*
+1 <<NOTE_STRUCTURE>> {0:M}
+1 RFN <PERMANENT_RECORD_FILE_NUMBER> {0:1}
+1 AFN <ANCESTRAL_FILE_NUMBER> {0:1}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
+2 TYPE <USER_REFERENCE_TYPE> {0:1}
+1 RIN <AUTOMATED_RECORD_ID> {0:1}
+1 <<CHANGE_DATE>> {0:1}
"""
if person is None:
return
self._writeln(0, "@%s@" % person.get_gramps_id(), "INDI")
self._names(person)
self._gender(person)
self._person_event_ref('BIRT', person.get_birth_ref())
self._person_event_ref('DEAT', person.get_death_ref())
self._remaining_events(person)
self._attributes(person)
self._lds_ords(person, 1)
self._child_families(person)
self._parent_families(person)
self._assoc(person, 1)
self._person_sources(person)
self._addresses(person)
self._photos(person.get_media_list(), 1)
self._url_list(person, 1)
self._note_references(person.get_note_list(), 1)
self._change(person.get_change_time(), 1)
def _assoc(self, person, level):
"""
n ASSO @<XREF:INDI>@ {0:M}
+1 RELA <RELATION_IS_DESCRIPTOR> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
"""
for ref in person.get_person_ref_list():
person = self.dbase.get_person_from_handle(ref.ref)
if person:
self._writeln(level, "ASSO", "@%s@" % person.get_gramps_id())
self._writeln(level + 1, "RELA", ref.get_relation())
self._note_references(ref.get_note_list(), level + 1)
self._source_references(ref.get_citation_list(), level + 1)
def _note_references(self, notelist, level):
"""
Write out the list of note handles to the current level.
We use the Gramps ID as the XREF for the GEDCOM file.
"""
for note_handle in notelist:
note = self.dbase.get_note_from_handle(note_handle)
if note:
self._writeln(level, 'NOTE', '@%s@' % note.get_gramps_id())
def _names(self, person):
"""
Write the names associated with the person to the current level.
Since nicknames in version < 3.3 are separate from the name structure,
we search the attribute list to see if we can find a nickname.
Because we do not know the mappings, we just take the first nickname
we find, and add it to the primary name.
If a nickname is present in the name structure, it has precedence
"""
nicknames = [attr.get_value() for attr in person.get_attribute_list()
if int(attr.get_type()) == AttributeType.NICKNAME]
if len(nicknames) > 0:
nickname = nicknames[0]
else:
nickname = ""
self._person_name(person.get_primary_name(), nickname)
for name in person.get_alternate_names():
self._person_name(name, "")
def _gender(self, person):
"""
Write out the gender of the person to the file.
If the gender is not male or female, simply do not output anything.
The only valid values are M (male) or F (female). So if the geneder is
unknown, we output nothing.
"""
if person.get_gender() == Person.MALE:
self._writeln(1, "SEX", "M")
elif person.get_gender() == Person.FEMALE:
self._writeln(1, "SEX", "F")
def _lds_ords(self, obj, level):
"""
Simply loop through the list of LDS ordinances, and call the function
that writes the LDS ordinance structure.
"""
for lds_ord in obj.get_lds_ord_list():
self.write_ord(lds_ord, level)
def _remaining_events(self, person):
"""
Output all events associated with the person that are not BIRTH or
DEATH events.
Because all we have are event references, we have to
extract the real event to discover the event type.
"""
global adop_written
# adop_written is only shared between this function and
# _process_person_event. This is rather ugly code, but it is difficult
# to support an Adoption event without an Adopted relationship from the
# parent(s), an Adopted relationship from the parent(s) without an
# event, and both an event and a relationship. All these need to be
# supported without duplicating the output of the ADOP GEDCOM tag. See
# bug report 2370.
adop_written = False
for event_ref in person.get_event_ref_list():
event = self.dbase.get_event_from_handle(event_ref.ref)
if not event:
continue
self._process_person_event(person, event, event_ref)
if not adop_written:
self._adoption_records(person, adop_written)
def _process_person_event(self, person, event, event_ref):
"""
Process a person event, which is not a BIRTH or DEATH event.
"""
global adop_written
etype = int(event.get_type())
# if the event is a birth or death, skip it.
if etype in (EventType.BIRTH, EventType.DEATH):
return
role = int(event_ref.get_role())
# if the event role is not primary, skip the event.
if role != EventRoleType.PRIMARY:
return
val = libgedcom.PERSONALCONSTANTEVENTS.get(etype, "").strip()
if val and val.strip():
if val in NEEDS_PARAMETER:
if event.get_description().strip():
self._writeln(1, val, event.get_description())
else:
self._writeln(1, val)
else:
if event_has_subordinate_data(event, event_ref):
self._writeln(1, val)
else:
self._writeln(1, val, 'Y')
if event.get_description().strip():
self._writeln(2, 'TYPE', event.get_description())
else:
descr = event.get_description()
if descr:
self._writeln(1, 'EVEN', descr)
else:
self._writeln(1, 'EVEN')
if val.strip():
self._writeln(2, 'TYPE', val)
else:
self._writeln(2, 'TYPE', str(event.get_type()))
self._dump_event_stats(event, event_ref)
if etype == EventType.ADOPT and not adop_written:
adop_written = True
self._adoption_records(person, adop_written)
def _adoption_records(self, person, adop_written):
"""
Write Adoption events for each child that has been adopted.
n ADOP
+1 <<INDIVIDUAL_EVENT_DETAIL>>
+1 FAMC @<XREF:FAM>@
+2 ADOP <ADOPTED_BY_WHICH_PARENT>
"""
adoptions = []
for family in [self.dbase.get_family_from_handle(fh)
for fh in person.get_parent_family_handle_list()]:
if family is None:
continue
for child_ref in [ref for ref in family.get_child_ref_list()
if ref.ref == person.handle]:
if child_ref.mrel == ChildRefType.ADOPTED \
or child_ref.frel == ChildRefType.ADOPTED:
adoptions.append((family, child_ref.frel, child_ref.mrel))
for (fam, frel, mrel) in adoptions:
if not adop_written:
self._writeln(1, 'ADOP', 'Y')
self._writeln(2, 'FAMC', '@%s@' % fam.get_gramps_id())
if mrel == frel:
self._writeln(3, 'ADOP', 'BOTH')
elif mrel == ChildRefType.ADOPTED:
self._writeln(3, 'ADOP', 'WIFE')
else:
self._writeln(3, 'ADOP', 'HUSB')
def _attributes(self, person):
"""
Write out the attributes to the GEDCOM file.
Since we have already looked at nicknames when we generated the names,
we filter them out here.
We use the GEDCOM 5.5.1 FACT command to write out attributes not
built in to GEDCOM.
"""
# filter out the nicknames
attr_list = [attr for attr in person.get_attribute_list()
if attr.get_type() != AttributeType.NICKNAME]
for attr in attr_list:
attr_type = int(attr.get_type())
name = libgedcom.PERSONALCONSTANTATTRIBUTES.get(attr_type)
key = str(attr.get_type())
value = attr.get_value().strip().replace('\r', ' ')
if key in ("AFN", "RFN", "REFN", "_UID", "_FSFTID"):
self._writeln(1, key, value)
continue
if key == "RESN":
self._writeln(1, 'RESN')
continue
if name and name.strip():
self._writeln(1, name, value)
elif value:
self._writeln(1, 'FACT', value)
self._writeln(2, 'TYPE', key)
else:
continue
self._note_references(attr.get_note_list(), 2)
self._source_references(attr.get_citation_list(), 2)
def _source_references(self, citation_list, level):
"""
Loop through the list of citation handles, writing the information
to the file.
"""
for citation_handle in citation_list:
self._source_ref_record(level, citation_handle)
def _addresses(self, person):
"""
Write out the addresses associated with the person as RESI events.
"""
for addr in person.get_address_list():
self._writeln(1, 'RESI')
self._date(2, addr.get_date_object())
self.__write_addr(2, addr)
if addr.get_phone():
self._writeln(2, 'PHON', addr.get_phone())
self._note_references(addr.get_note_list(), 2)
self._source_references(addr.get_citation_list(), 2)
def _photos(self, media_list, level):
"""
Loop through the list of media objects, writing the information
to the file.
"""
for photo in media_list:
self._photo(photo, level)
def _child_families(self, person):
"""
Write the Gramps ID as the XREF for each family in which the person
is listed as a child.
"""
# get the list of familes from the handle list
family_list = [self.dbase.get_family_from_handle(hndl)
for hndl in person.get_parent_family_handle_list()]
for family in family_list:
if family:
self._writeln(1, 'FAMC', '@%s@' % family.get_gramps_id())
for child in family.get_child_ref_list():
if child.get_reference_handle() == person.get_handle():
if child.frel == ChildRefType.ADOPTED and \
child.mrel == ChildRefType.ADOPTED:
self._writeln(2, 'PEDI adopted')
elif child.frel == ChildRefType.BIRTH and \
child.mrel == ChildRefType.BIRTH:
self._writeln(2, 'PEDI birth')
elif child.frel == ChildRefType.STEPCHILD and \
child.mrel == ChildRefType.STEPCHILD:
self._writeln(2, 'PEDI stepchild')
elif child.frel == ChildRefType.FOSTER and \
child.mrel == ChildRefType.FOSTER:
self._writeln(2, 'PEDI foster')
elif child.frel == child.mrel:
self._writeln(2, 'PEDI Unknown')
else:
self._writeln(2, '_FREL %s' %
PEDIGREE_TYPES.get(child.frel.value,
"Unknown"))
self._writeln(2, '_MREL %s' %
PEDIGREE_TYPES.get(child.mrel.value,
"Unknown"))
def _parent_families(self, person):
"""
Write the Gramps ID as the XREF for each family in which the person
is listed as a parent.
"""
# get the list of familes from the handle list
family_list = [self.dbase.get_family_from_handle(hndl)
for hndl in person.get_family_handle_list()]
for family in family_list:
if family:
self._writeln(1, 'FAMS', '@%s@' % family.get_gramps_id())
def _person_sources(self, person):
"""
Loop through the list of citations, writing the information
to the file.
"""
for citation_handle in person.get_citation_list():
self._source_ref_record(1, citation_handle)
def _url_list(self, obj, level):
"""
For Person's FAX, PHON, EMAIL, WWW lines;
n PHON <PHONE_NUMBER> {0:3}
n EMAIL <ADDRESS_EMAIL> {0:3}
n FAX <ADDRESS_FAX> {0:3}
n WWW <ADDRESS_WEB_PAGE> {0:3}
n OBJE {1:1}
+1 FORM <MULTIMEDIA_FORMAT> {1:1}
+1 TITL <DESCRIPTIVE_TITLE> {0:1}
+1 FILE <MULTIMEDIA_FILE_REFERENCE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
"""
for url in obj.get_url_list():
if url.get_type() == UrlType.EMAIL:
self._writeln(level, 'EMAIL', url.get_path())
elif url.get_type() == UrlType.WEB_HOME:
self._writeln(level, 'WWW', url.get_path())
elif url.get_type() == _('Phone'):
self._writeln(level, 'PHON', url.get_path())
elif url.get_type() == _('FAX'):
self._writeln(level, 'FAX', url.get_path())
else:
self._writeln(level, 'OBJE')
self._writeln(level + 1, 'FORM', 'URL')
if url.get_description():
self._writeln(level + 1, 'TITL', url.get_description())
if url.get_path():
self._writeln(level + 1, 'FILE', url.get_path(), limit=255)
def _families(self):
"""
Write out the list of families, sorting by Gramps ID.
"""
self.set_text(_("Writing families"))
# generate a list of (GRAMPS_ID, HANDLE) pairs. This list
# can then be sorted by the sort routine, which will use the
# first value of the tuple as the sort key.
sorted_list = sort_handles_by_id(self.dbase.get_family_handles(),
self.dbase.get_family_from_handle)
# loop through the sorted list, pulling of the handle. This list
# has already been sorted by GRAMPS_ID
for family_handle in [hndl[1] for hndl in sorted_list]:
self.update()
self._family(self.dbase.get_family_from_handle(family_handle))
def _family(self, family):
"""
n @<XREF:FAM>@ FAM {1:1}
+1 RESN <RESTRICTION_NOTICE> {0:1)
+1 <<FAMILY_EVENT_STRUCTURE>> {0:M}
+1 HUSB @<XREF:INDI>@ {0:1}
+1 WIFE @<XREF:INDI>@ {0:1}
+1 CHIL @<XREF:INDI>@ {0:M}
+1 NCHI <COUNT_OF_CHILDREN> {0:1}
+1 SUBM @<XREF:SUBM>@ {0:M}
+1 <<LDS_SPOUSE_SEALING>> {0:M}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
"""
if family is None:
return
gramps_id = family.get_gramps_id()
self._writeln(0, '@%s@' % gramps_id, 'FAM')
self._family_reference('HUSB', family.get_father_handle())
self._family_reference('WIFE', family.get_mother_handle())
self._lds_ords(family, 1)
self._family_events(family)
self._family_attributes(family.get_attribute_list(), 1)
self._family_child_list(family.get_child_ref_list())
self._source_references(family.get_citation_list(), 1)
self._photos(family.get_media_list(), 1)
self._note_references(family.get_note_list(), 1)
self._change(family.get_change_time(), 1)
def _family_child_list(self, child_ref_list):
"""
Write the child XREF values to the GEDCOM file.
"""
child_list = [
self.dbase.get_person_from_handle(cref.ref).get_gramps_id()
for cref in child_ref_list]
for gid in child_list:
if gid is None:
continue
self._writeln(1, 'CHIL', '@%s@' % gid)
def _family_reference(self, token, person_handle):
"""
Write the family reference to the file.
This is either 'WIFE' or 'HUSB'. As usual, we use the Gramps ID as the
XREF value.
"""
if person_handle:
person = self.dbase.get_person_from_handle(person_handle)
if person:
self._writeln(1, token, '@%s@' % person.get_gramps_id())
def _family_events(self, family):
"""
Output the events associated with the family.
Because all we have are event references, we have to extract the real
event to discover the event type.
"""
for event_ref in family.get_event_ref_list():
event = self.dbase.get_event_from_handle(event_ref.ref)
if event is None:
continue
self._process_family_event(event, event_ref)
self._dump_event_stats(event, event_ref)
def _process_family_event(self, event, event_ref):
"""
Process a single family event.
"""
etype = int(event.get_type())
val = libgedcom.FAMILYCONSTANTEVENTS.get(etype)
if val:
if event_has_subordinate_data(event, event_ref):
self._writeln(1, val)
else:
self._writeln(1, val, 'Y')
if event.get_type() == EventType.MARRIAGE:
self._family_event_attrs(event.get_attribute_list(), 2)
if event.get_description().strip() != "":
self._writeln(2, 'TYPE', event.get_description())
else:
descr = event.get_description()
if descr:
self._writeln(1, 'EVEN', descr)
else:
self._writeln(1, 'EVEN')
the_type = str(event.get_type())
if the_type:
self._writeln(2, 'TYPE', the_type)
def _family_event_attrs(self, attr_list, level):
"""
Write the attributes associated with the family event.
The only ones we really care about are FATHER_AGE and MOTHER_AGE which
we translate to WIFE/HUSB AGE attributes.
"""
for attr in attr_list:
if attr.get_type() == AttributeType.FATHER_AGE:
self._writeln(level, 'HUSB')
self._writeln(level + 1, 'AGE', attr.get_value())
elif attr.get_type() == AttributeType.MOTHER_AGE:
self._writeln(level, 'WIFE')
self._writeln(level + 1, 'AGE', attr.get_value())
def _family_attributes(self, attr_list, level):
"""
Write out the attributes associated with a family to the GEDCOM file.
Since we have already looked at nicknames when we generated the names,
we filter them out here.
We use the GEDCOM 5.5.1 FACT command to write out attributes not
built in to GEDCOM.
"""
for attr in attr_list:
attr_type = int(attr.get_type())
name = libgedcom.FAMILYCONSTANTATTRIBUTES.get(attr_type)
key = str(attr.get_type())
value = attr.get_value().replace('\r', ' ')
if key in ("AFN", "RFN", "REFN", "_UID"):
self._writeln(1, key, value)
continue
if name and name.strip():
self._writeln(1, name, value)
continue
else:
self._writeln(1, 'FACT', value)
self._writeln(2, 'TYPE', key)
self._note_references(attr.get_note_list(), level + 1)
self._source_references(attr.get_citation_list(),
level + 1)
def _sources(self):
"""
Write out the list of sources, sorting by Gramps ID.
"""
self.set_text(_("Writing sources"))
sorted_list = sort_handles_by_id(self.dbase.get_source_handles(),
self.dbase.get_source_from_handle)
for (source_id, handle) in sorted_list:
self.update()
source = self.dbase.get_source_from_handle(handle)
if source is None:
continue
self._writeln(0, '@%s@' % source_id, 'SOUR')
if source.get_title():
self._writeln(1, 'TITL', source.get_title())
if source.get_author():
self._writeln(1, "AUTH", source.get_author())
if source.get_publication_info():
self._writeln(1, "PUBL", source.get_publication_info())
if source.get_abbreviation():
self._writeln(1, 'ABBR', source.get_abbreviation())
self._photos(source.get_media_list(), 1)
for reporef in source.get_reporef_list():
self._reporef(reporef, 1)
# break
self._note_references(source.get_note_list(), 1)
self._change(source.get_change_time(), 1)
def _notes(self):
"""
Write out the list of notes, sorting by Gramps ID.
"""
self.set_text(_("Writing notes"))
note_cnt = 0
sorted_list = sort_handles_by_id(self.dbase.get_note_handles(),
self.dbase.get_note_from_handle)
for note_handle in [hndl[1] for hndl in sorted_list]:
# the following makes the progress bar a bit smoother
if not note_cnt % NOTES_PER_PERSON:
self.update()
note_cnt += 1
note = self.dbase.get_note_from_handle(note_handle)
if note is None:
continue
self._note_record(note)
def _note_record(self, note):
"""
n @<XREF:NOTE>@ NOTE <SUBMITTER_TEXT> {1:1}
+1 [ CONC | CONT] <SUBMITTER_TEXT> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
+2 TYPE <USER_REFERENCE_TYPE> {0:1}
+1 RIN <AUTOMATED_RECORD_ID> {0:1}
+1 <<CHANGE_DATE>> {0:1}
"""
if note:
self._writeln(0, '@%s@' % note.get_gramps_id(),
'NOTE ' + note.get())
def _repos(self):
"""
Write out the list of repositories, sorting by Gramps ID.
REPOSITORY_RECORD:=
n @<XREF:REPO>@ REPO {1:1}
+1 NAME <NAME_OF_REPOSITORY> {1:1}
+1 <<ADDRESS_STRUCTURE>> {0:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
+2 TYPE <USER_REFERENCE_TYPE> {0:1}
+1 RIN <AUTOMATED_RECORD_ID> {0:1}
+1 <<CHANGE_DATE>> {0:1}
"""
self.set_text(_("Writing repositories"))
sorted_list = sort_handles_by_id(self.dbase.get_repository_handles(),
self.dbase.get_repository_from_handle)
# GEDCOM only allows for a single repository per source
for (repo_id, handle) in sorted_list:
self.update()
repo = self.dbase.get_repository_from_handle(handle)
if repo is None:
continue
self._writeln(0, '@%s@' % repo_id, 'REPO')
if repo.get_name():
self._writeln(1, 'NAME', repo.get_name())
for addr in repo.get_address_list():
self.__write_addr(1, addr)
if addr.get_phone():
self._writeln(1, 'PHON', addr.get_phone())
for url in repo.get_url_list():
if url.get_type() == UrlType.EMAIL:
self._writeln(1, 'EMAIL', url.get_path())
elif url.get_type() == UrlType.WEB_HOME:
self._writeln(1, 'WWW', url.get_path())
elif url.get_type() == _('FAX'):
self._writeln(1, 'FAX', url.get_path())
self._note_references(repo.get_note_list(), 1)
def _reporef(self, reporef, level):
"""
n REPO [ @XREF:REPO@ | <NULL>] {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 CALN <SOURCE_CALL_NUMBER> {0:M}
+2 MEDI <SOURCE_MEDIA_TYPE> {0:1}
"""
if reporef.ref is None:
return
repo = self.dbase.get_repository_from_handle(reporef.ref)
if repo is None:
return
repo_id = repo.get_gramps_id()
self._writeln(level, 'REPO', '@%s@' % repo_id)
self._note_references(reporef.get_note_list(), level + 1)
if reporef.get_call_number():
self._writeln(level + 1, 'CALN', reporef.get_call_number())
if reporef.get_media_type():
self._writeln(level + 2, 'MEDI', str(reporef.get_media_type()))
def _person_event_ref(self, key, event_ref):
"""
Write out the BIRTH and DEATH events for the person.
"""
if event_ref:
event = self.dbase.get_event_from_handle(event_ref.ref)
if event_has_subordinate_data(event, event_ref):
self._writeln(1, key)
else:
self._writeln(1, key, 'Y')
if event.get_description().strip() != "":
self._writeln(2, 'TYPE', event.get_description())
self._dump_event_stats(event, event_ref)
def _change(self, timeval, level):
"""
CHANGE_DATE:=
n CHAN {1:1}
+1 DATE <CHANGE_DATE> {1:1}
+2 TIME <TIME_VALUE> {0:1}
+1 <<NOTE_STRUCTURE>> # not used
"""
self._writeln(level, 'CHAN')
time_val = time.gmtime(timeval)
self._writeln(level + 1, 'DATE', '%d %s %d' % (
time_val[2], libgedcom.MONTH[time_val[1]], time_val[0]))
self._writeln(level + 2, 'TIME', '%02d:%02d:%02d' % (
time_val[3], time_val[4], time_val[5]))
def _dump_event_stats(self, event, event_ref):
"""
Write the event details for the event, using the event and event
reference information.
GEDCOM does not make a distinction between the two.
"""
dateobj = event.get_date_object()
self._date(2, dateobj)
if self._datewritten:
# write out TIME if present
times = [attr.get_value() for attr in event.get_attribute_list()
if int(attr.get_type()) == AttributeType.TIME]
# Not legal, but inserted by PhpGedView
if len(times) > 0:
self._writeln(3, 'TIME', times[0])
place = None
if event.get_place_handle():
place = self.dbase.get_place_from_handle(event.get_place_handle())
self._place(place, dateobj, 2)
for attr in event.get_attribute_list():
attr_type = attr.get_type()
if attr_type == AttributeType.CAUSE:
self._writeln(2, 'CAUS', attr.get_value())
elif attr_type == AttributeType.AGENCY:
self._writeln(2, 'AGNC', attr.get_value())
elif attr_type == _("Phone"):
self._writeln(2, 'PHON', attr.get_value())
elif attr_type == _("FAX"):
self._writeln(2, 'FAX', attr.get_value())
elif attr_type == _("EMAIL"):
self._writeln(2, 'EMAIL', attr.get_value())
elif attr_type == _("WWW"):
self._writeln(2, 'WWW', attr.get_value())
for attr in event_ref.get_attribute_list():
attr_type = attr.get_type()
if attr_type == AttributeType.AGE:
self._writeln(2, 'AGE', attr.get_value())
elif attr_type == AttributeType.FATHER_AGE:
self._writeln(2, 'HUSB')
self._writeln(3, 'AGE', attr.get_value())
elif attr_type == AttributeType.MOTHER_AGE:
self._writeln(2, 'WIFE')
self._writeln(3, 'AGE', attr.get_value())
self._note_references(event.get_note_list(), 2)
self._source_references(event.get_citation_list(), 2)
self._photos(event.get_media_list(), 2)
if place:
self._photos(place.get_media_list(), 2)
def write_ord(self, lds_ord, index):
"""
LDS_INDIVIDUAL_ORDINANCE:=
[
n [ BAPL | CONL ] {1:1}
+1 DATE <DATE_LDS_ORD> {0:1}
+1 TEMP <TEMPLE_CODE> {0:1}
+1 PLAC <PLACE_LIVING_ORDINANCE> {0:1}
+1 STAT <LDS_BAPTISM_DATE_STATUS> {0:1}
+2 DATE <CHANGE_DATE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 <<SOURCE_CITATION>> {0:M} p.39
|
n ENDL {1:1}
+1 DATE <DATE_LDS_ORD> {0:1}
+1 TEMP <TEMPLE_CODE> {0:1}
+1 PLAC <PLACE_LIVING_ORDINANCE> {0:1}
+1 STAT <LDS_ENDOWMENT_DATE_STATUS> {0:1}
+2 DATE <CHANGE_DATE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
|
n SLGC {1:1}
+1 DATE <DATE_LDS_ORD> {0:1}
+1 TEMP <TEMPLE_CODE> {0:1}
+1 PLAC <PLACE_LIVING_ORDINANCE> {0:1}
+1 FAMC @<XREF:FAM>@ {1:1}
+1 STAT <LDS_CHILD_SEALING_DATE_STATUS> {0:1}
+2 DATE <CHANGE_DATE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
]
"""
self._writeln(index, LDS_ORD_NAME[lds_ord.get_type()])
self._date(index + 1, lds_ord.get_date_object())
if lds_ord.get_family_handle():
family_handle = lds_ord.get_family_handle()
family = self.dbase.get_family_from_handle(family_handle)
if family:
self._writeln(index + 1, 'FAMC', '@%s@' %
family.get_gramps_id())
if lds_ord.get_temple():
self._writeln(index + 1, 'TEMP', lds_ord.get_temple())
if lds_ord.get_place_handle():
place = self.dbase.get_place_from_handle(
lds_ord.get_place_handle())
self._place(place, lds_ord.get_date_object(), 2)
if lds_ord.get_status() != LdsOrd.STATUS_NONE:
self._writeln(2, 'STAT', LDS_STATUS[lds_ord.get_status()])
self._note_references(lds_ord.get_note_list(), index + 1)
self._source_references(lds_ord.get_citation_list(), index + 1)
def _date(self, level, date):
"""
Write the 'DATE' GEDCOM token, along with the date in GEDCOM's
expected format.
"""
self._datewritten = True
start = date.get_start_date()
if start != Date.EMPTY:
cal = date.get_calendar()
mod = date.get_modifier()
quality = date.get_quality()
if quality in libgedcom.DATE_QUALITY:
qual_text = libgedcom.DATE_QUALITY[quality] + " "
else:
qual_text = ""
if mod == Date.MOD_SPAN:
val = "%sFROM %s TO %s" % (
qual_text,
libgedcom.make_gedcom_date(start, cal, mod, None),
libgedcom.make_gedcom_date(date.get_stop_date(),
cal, mod, None))
elif mod == Date.MOD_RANGE:
val = "%sBET %s AND %s" % (
qual_text,
libgedcom.make_gedcom_date(start, cal, mod, None),
libgedcom.make_gedcom_date(date.get_stop_date(),
cal, mod, None))
else:
val = libgedcom.make_gedcom_date(start, cal, mod, quality)
self._writeln(level, 'DATE', val)
elif date.get_text():
self._writeln(level, 'DATE', date.get_text())
else:
self._datewritten = False
def _person_name(self, name, attr_nick):
"""
n NAME <NAME_PERSONAL> {1:1}
+1 NPFX <NAME_PIECE_PREFIX> {0:1}
+1 GIVN <NAME_PIECE_GIVEN> {0:1}
+1 NICK <NAME_PIECE_NICKNAME> {0:1}
+1 SPFX <NAME_PIECE_SURNAME_PREFIX {0:1}
+1 SURN <NAME_PIECE_SURNAME> {0:1}
+1 NSFX <NAME_PIECE_SUFFIX> {0:1}
+1 <<SOURCE_CITATION>> {0:M}
+1 <<NOTE_STRUCTURE>> {0:M}
"""
gedcom_name = name.get_gedcom_name()
firstname = name.get_first_name().strip()
surns = []
surprefs = []
for surn in name.get_surname_list():
surns.append(surn.get_surname().replace('/', '?'))
if surn.get_connector():
#we store connector with the surname
surns[-1] = surns[-1] + ' ' + surn.get_connector()
surprefs.append(surn.get_prefix().replace('/', '?'))
surname = ', '.join(surns)
surprefix = ', '.join(surprefs)
suffix = name.get_suffix()
title = name.get_title()
nick = name.get_nick_name()
if nick.strip() == '':
nick = attr_nick
self._writeln(1, 'NAME', gedcom_name)
if int(name.get_type()) == NameType.BIRTH:
pass
elif int(name.get_type()) == NameType.MARRIED:
self._writeln(2, 'TYPE', 'married')
elif int(name.get_type()) == NameType.AKA:
self._writeln(2, 'TYPE', 'aka')
else:
self._writeln(2, 'TYPE', name.get_type().xml_str())
if firstname:
self._writeln(2, 'GIVN', firstname)
if surprefix:
self._writeln(2, 'SPFX', surprefix)
if surname:
self._writeln(2, 'SURN', surname)
if name.get_suffix():
self._writeln(2, 'NSFX', suffix)
if name.get_title():
self._writeln(2, 'NPFX', title)
if nick:
self._writeln(2, 'NICK', nick)
self._source_references(name.get_citation_list(), 2)
self._note_references(name.get_note_list(), 2)
def _source_ref_record(self, level, citation_handle):
"""
n SOUR @<XREF:SOUR>@ /* pointer to source record */ {1:1}
+1 PAGE <WHERE_WITHIN_SOURCE> {0:1}
+1 EVEN <EVENT_TYPE_CITED_FROM> {0:1}
+2 ROLE <ROLE_IN_EVENT> {0:1}
+1 DATA {0:1}
+2 DATE <ENTRY_RECORDING_DATE> {0:1}
+2 TEXT <TEXT_FROM_SOURCE> {0:M}
+3 [ CONC | CONT ] <TEXT_FROM_SOURCE> {0:M}
+1 QUAY <CERTAINTY_ASSESSMENT> {0:1}
+1 <<MULTIMEDIA_LINK>> {0:M} ,*
+1 <<NOTE_STRUCTURE>> {0:M}
"""
citation = self.dbase.get_citation_from_handle(citation_handle)
src_handle = citation.get_reference_handle()
if src_handle is None:
return
src = self.dbase.get_source_from_handle(src_handle)
if src is None:
return
# Reference to the source
self._writeln(level, "SOUR", "@%s@" % src.get_gramps_id())
if citation.get_page() != "":
# PAGE <WHERE_WITHIN_SOURCE> can not have CONC lines.
# WHERE_WITHIN_SOURCE:= {Size=1:248}
# Maximize line to 248 and set limit to 248, for no line split
self._writeln(level + 1, 'PAGE', citation.get_page()[0:248],
limit=248)
conf = min(citation.get_confidence_level(),
Citation.CONF_VERY_HIGH)
if conf != Citation.CONF_NORMAL and conf != -1:
self._writeln(level + 1, "QUAY", QUALITY_MAP[conf])
if not citation.get_date_object().is_empty():
self._writeln(level + 1, 'DATA')
self._date(level + 2, citation.get_date_object())
if len(citation.get_note_list()) > 0:
note_list = [self.dbase.get_note_from_handle(h)
for h in citation.get_note_list()]
note_list = [n for n in note_list
if n.get_type() == NoteType.SOURCE_TEXT]
if note_list:
ref_text = note_list[0].get()
else:
ref_text = ""
if ref_text != "" and citation.get_date_object().is_empty():
self._writeln(level + 1, 'DATA')
if ref_text != "":
self._writeln(level + 2, "TEXT", ref_text)
note_list = [self.dbase.get_note_from_handle(h)
for h in citation.get_note_list()]
note_list = [n.handle for n in note_list
if n and n.get_type() != NoteType.SOURCE_TEXT]
self._note_references(note_list, level + 1)
self._photos(citation.get_media_list(), level + 1)
even = None
for srcattr in citation.get_attribute_list():
if str(srcattr.type) == "EVEN":
even = srcattr.value
self._writeln(level + 1, "EVEN", even)
break
if even:
for srcattr in citation.get_attribute_list():
if str(srcattr.type) == "EVEN:ROLE":
self._writeln(level + 2, "ROLE", srcattr.value)
break
def _photo(self, photo, level):
"""
n OBJE {1:1}
+1 FORM <MULTIMEDIA_FORMAT> {1:1}
+1 TITL <DESCRIPTIVE_TITLE> {0:1}
+1 FILE <MULTIMEDIA_FILE_REFERENCE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
"""
photo_obj_id = photo.get_reference_handle()
photo_obj = self.dbase.get_media_from_handle(photo_obj_id)
if photo_obj:
mime = photo_obj.get_mime_type()
form = MIME2GED.get(mime, mime)
path = media_path_full(self.dbase, photo_obj.get_path())
if not os.path.isfile(path):
return
self._writeln(level, 'OBJE')
if form:
self._writeln(level + 1, 'FORM', form)
self._writeln(level + 1, 'TITL', photo_obj.get_description())
self._writeln(level + 1, 'FILE', path, limit=255)
self._note_references(photo_obj.get_note_list(), level + 1)
def _place(self, place, dateobj, level):
"""
PLACE_STRUCTURE:=
n PLAC <PLACE_NAME> {1:1}
+1 FORM <PLACE_HIERARCHY> {0:1}
+1 FONE <PLACE_PHONETIC_VARIATION> {0:M} # not used
+2 TYPE <PHONETIC_TYPE> {1:1}
+1 ROMN <PLACE_ROMANIZED_VARIATION> {0:M} # not used
+2 TYPE <ROMANIZED_TYPE> {1:1}
+1 MAP {0:1}
+2 LATI <PLACE_LATITUDE> {1:1}
+2 LONG <PLACE_LONGITUDE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
"""
if place is None:
return
place_name = _pd.display(self.dbase, place, dateobj)
self._writeln(level, "PLAC", place_name.replace('\r', ' '), limit=120)
longitude = place.get_longitude()
latitude = place.get_latitude()
if longitude and latitude:
(latitude, longitude) = conv_lat_lon(latitude, longitude, "GEDCOM")
if longitude and latitude:
self._writeln(level + 1, "MAP")
self._writeln(level + 2, 'LATI', latitude)
self._writeln(level + 2, 'LONG', longitude)
# The Gedcom standard shows that an optional address structure can
# be written out in the event detail.
# http://homepages.rootsweb.com/~pmcbride/gedcom/55gcch2.htm#EVENT_DETAIL
location = get_main_location(self.dbase, place)
street = location.get(PlaceType.STREET)
locality = location.get(PlaceType.LOCALITY)
city = location.get(PlaceType.CITY)
state = location.get(PlaceType.STATE)
country = location.get(PlaceType.COUNTRY)
postal_code = place.get_code()
if street or locality or city or state or postal_code or country:
self._writeln(level, "ADDR", street)
if street:
self._writeln(level + 1, 'ADR1', street)
if locality:
self._writeln(level + 1, 'ADR2', locality)
if city:
self._writeln(level + 1, 'CITY', city)
if state:
self._writeln(level + 1, 'STAE', state)
if postal_code:
self._writeln(level + 1, 'POST', postal_code)
if country:
self._writeln(level + 1, 'CTRY', country)
self._note_references(place.get_note_list(), level + 1)
def __write_addr(self, level, addr):
"""
n ADDR <ADDRESS_LINE> {0:1}
+1 CONT <ADDRESS_LINE> {0:M}
+1 ADR1 <ADDRESS_LINE1> {0:1} (Street)
+1 ADR2 <ADDRESS_LINE2> {0:1} (Locality)
+1 CITY <ADDRESS_CITY> {0:1}
+1 STAE <ADDRESS_STATE> {0:1}
+1 POST <ADDRESS_POSTAL_CODE> {0:1}
+1 CTRY <ADDRESS_COUNTRY> {0:1}
This is done along the lines suggested by Tamura Jones in
http://www.tamurajones.net/GEDCOMADDR.xhtml as a result of bug 6382.
"GEDCOM writers should always use the structured address format,
and it use it for all addresses, including the submitter address and
their own corporate address." "Vendors that want their product to pass
even the strictest GEDCOM validation, should include export to the old
free-form format..." [This goes on to say the free-form should be an
option, but we have not made it an option in Gramps].
@param level: The level number for the ADDR tag
@type level: Integer
@param addr: The location or address
@type addr: [a super-type of] LocationBase
"""
if addr.get_street() or addr.get_locality() or addr.get_city() or \
addr.get_state() or addr.get_postal_code or addr.get_country():
self._writeln(level, 'ADDR', addr.get_street())
if addr.get_locality():
self._writeln(level + 1, 'CONT', addr.get_locality())
if addr.get_city():
self._writeln(level + 1, 'CONT', addr.get_city())
if addr.get_state():
self._writeln(level + 1, 'CONT', addr.get_state())
if addr.get_postal_code():
self._writeln(level + 1, 'CONT', addr.get_postal_code())
if addr.get_country():
self._writeln(level + 1, 'CONT', addr.get_country())
if addr.get_street():
self._writeln(level + 1, 'ADR1', addr.get_street())
if addr.get_locality():
self._writeln(level + 1, 'ADR2', addr.get_locality())
if addr.get_city():
self._writeln(level + 1, 'CITY', addr.get_city())
if addr.get_state():
self._writeln(level + 1, 'STAE', addr.get_state())
if addr.get_postal_code():
self._writeln(level + 1, 'POST', addr.get_postal_code())
if addr.get_country():
self._writeln(level + 1, 'CTRY', addr.get_country())
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
def export_data(database, filename, user, option_box=None):
"""
External interface used to register with the plugin system.
"""
ret = False
try:
ged_write = GedcomWriter(database, user, option_box)
ret = ged_write.write_gedcom_file(filename)
except IOError as msg:
msg2 = _("Could not create %s") % filename
user.notify_error(msg2, str(msg))
except DatabaseError as msg:
user.notify_db_error("%s\n%s" % (_("GEDCOM Export failed"), str(msg)))
return ret
|
jralls/gramps
|
gramps/plugins/export/exportgedcom.py
|
Python
|
gpl-2.0
| 59,838
|
[
"Brian"
] |
98bfed05e60024207f3ef90759814dd138f7ddbec1dd600d16227717257d710e
|
'''Class for version 2 of pattern radar data in
`netCDF <https://www.unidata.ucar.edu/software/netcdf/>`_-format
'''
# Python modules
from datetime import datetime
from netCDF4 import Dataset
# MasterModule
from .main_radar import Radar
from .radar_data import RadarData
class PatternRadarV2(Radar):
'''Class for version 2 of Pattern radar data in `netCDF <https://www.unidata.ucar.edu/software/netcdf/>`_-format
This class is designed for the new processing (started at May 2017)
of data coming from the 'Precipitation and Attenuation Estimates
from a High-Resolution Weather Radar Network' (PATTERN) in
`netCDF <https://www.unidata.ucar.edu/software/netcdf/>`_-format.
It is a subclass of the more general :any:`Radar` class. Using this
class, a newly created PATTERN data file can be read in.
Note:
There will be no distinction between starting and ending time
for the version 2 of pattern radar files.
Attributes:
name (:any:`str`): Name of operating institute. 'PATTERN' in
this case.
offset (:any:`int`): Angle, by which the pattern radar is
rotated.
data (:any:`RadarData`): Used to save all kind of general radar
data and meta data.
'''
def __init__(self, radar_par):
'''Initialization of object
Saves attributes to the object and calls the
:any:`Radar.__init__`-method.
Args:
radar_par (dict): Radar parameters, e.g. name of file,
minute to be plotted, processing step, factor to
increase azimuth resolution, offset of radars azimuth
angle.
'''
# Call init method of super class
super().__init__(radar_par)
# Save attributes
self.name = 'PATTERN'
self.offset = radar_par['offset']
def read_file(self, radar_par):
'''Read in data
Reads pattern radar data and saves data to object. Only
attributes needed for my calculations are read in. If more
information about the file and the attributes is wished, check
out the
`netCDF <https://www.unidata.ucar.edu/software/netcdf/>`_-file
with ncview or ncdump -h.
Args:
radar_par (dict): Radar parameters, e.g. name of file,
minute to be plotted, processing step, factor to
increase azimuth resolution, offset of radars azimuth
angle.
'''
minute = radar_par['minute']
# Create a RadarData object to generalize the radar properties
radar_data = RadarData()
# Open data file
nc = Dataset(radar_par['file'], mode='r')
# lon/lat coords of site
if nc.longitude[-1] == 'E':
radar_data.lon_site = float(nc.longitude[:-1])
elif nc.longitude[-1] == 'W':
radar_data.lon_site = -1*float(nc.longitude[:-1])
if nc.latitude[-1] == 'N':
radar_data.lat_site = float(nc.latitude[:-1])
elif nc.latitude[-1] == 'S':
radar_data.lat_site = -1*float(nc.latitude[:-1])
# Elevation of radar beam
radar_data.ele = nc.elevation
# Number of azimuth rays
radar_data.azi_rays = nc.dimensions['ang'].size
# Number of range bins
radar_data.r_bins = nc.dimensions['dist'].size
# Starting value of azimuth angle
radar_data.azi_start = (
(nc.variables['Azimuth'][0] + self.offset + 360) % 360
)
# Starting value of range
radar_data.r_start = float(nc.variables['Distance'][0])
# Azimuth angle steps between two measurements
radar_data.azi_steps = (
(nc.variables['Azimuth'][1]
- nc.variables['Azimuth'][0]
+ 360)
% 360
)
# Steps between 2 measurments in range
radar_data.r_steps = (
nc.variables['Distance'][1] - nc.variables['Distance'][0]
)
# Array of measured reflectivity
radar_data.refl = nc.variables[
'Att_Corr_Xband_Reflectivity'][:][int(minute*2)
]
# Time at which radar scan started
time_start = nc.variables['Time'][int(minute*2)]
radar_data.time_start = datetime.utcfromtimestamp(time_start)
# Time at which radar scan ended
time_end = nc.variables['Time'][int(minute*2)]
radar_data.time_end = datetime.utcfromtimestamp(time_end)
# Save the data to Pattern object
self.data = radar_data
|
GroovyGregor/master_thesis
|
python/ModuleSetup/MasterModule/pattern_radar_v2.py
|
Python
|
gpl-3.0
| 5,025
|
[
"NetCDF"
] |
846856455eef10c29c8b55972df5976f59e482dbb87e10d508617554a116f7e8
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# managejobs - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import cgi
import cgitb
cgitb.enable()
from shared.functionality.managejobs import main
from shared.cgiscriptstub import run_cgi_script
run_cgi_script(main)
|
heromod/migrid
|
mig/cgi-bin/managejobs.py
|
Python
|
gpl-2.0
| 1,108
|
[
"Brian"
] |
e46966a4ff20da1e911ba42f8a8d6a411bd22d1aa5b88f72d61fdcff26d9eec1
|
"""
=============================================
SNR estimation for Diffusion-Weighted Images
=============================================
Computing the Signal-to-Noise-Ratio (SNR) of DW images is still an open question,
as SNR depends on the white matter structure of interest as well as
the gradient direction corresponding to each DWI.
In classical MRI, SNR can be defined as the ratio of the mean
of the signal divided by the standard deviation of the
underlying Gaussian noise, that is SNR = mean(signal) / std(noise).
The noise standard deviation can be
computed from the background in any of the DW images. How do we compute
the mean of the signal, and what signal?
The strategy here is to compute a 'worst-case' SNR for DWI. Several white matter
structures such as the corpus callosum (CC), corticospinal tract (CST), or
the superior longitudinal fasciculus (SLF) can be easily identified from
the colored-FA (cfa) map. In this example, we will use voxels from the CC,
which have the characteristic of being highly RED in the cfa map since they are mainly oriented in
the left-right direction. We know that the DW image
closest to the x-direction will be the one with the most attenuated diffusion signal.
This is the strategy adopted in several recent papers (see [1]_ and [2]_). It gives a good
indication of the quality of the DWI data.
First, we compute the tensor model in a brain mask (see the DTI example for more explanation).
"""
from __future__ import division, print_function
import nibabel as nib
import numpy as np
from dipy.data import fetch_stanford_hardi, read_stanford_hardi
from dipy.segment.mask import median_otsu
from dipy.reconst.dti import TensorModel
fetch_stanford_hardi()
img, gtab = read_stanford_hardi()
data = img.get_data()
affine = img.get_affine()
print('Computing brain mask...')
b0_mask, mask = median_otsu(data)
print('Computing tensors...')
tenmodel = TensorModel(gtab)
tensorfit = tenmodel.fit(data, mask=mask)
"""Next, we set our red-green-blue thresholds to (0.6, 1) in the x axis
and (0, 0.1) in the y and z axes respectively.
These values work well in practice to isolate the very RED voxels of the cfa map.
Then, as assurance, we want just RED voxels in the CC (there could be
noisy red voxels around the brain mask and we don't want those). Unless the brain
acquisition was badly aligned, the CC is always close to the mid-sagittal slice.
The following lines perform these two operations and then saves the computed mask.
"""
print('Computing worst-case/best-case SNR using the corpus callosum...')
from dipy.segment.mask import segment_from_cfa
from dipy.segment.mask import bounding_box
threshold = (0.6, 1, 0, 0.1, 0, 0.1)
CC_box = np.zeros_like(data[..., 0])
mins, maxs = bounding_box(mask)
mins = np.array(mins)
maxs = np.array(maxs)
diff = (maxs - mins) // 4
bounds_min = mins + diff
bounds_max = maxs - diff
CC_box[bounds_min[0]:bounds_max[0],
bounds_min[1]:bounds_max[1],
bounds_min[2]:bounds_max[2]] = 1
mask_cc_part, cfa = segment_from_cfa(tensorfit, CC_box,
threshold, return_cfa=True)
cfa_img = nib.Nifti1Image((cfa*255).astype(np.uint8), affine)
mask_cc_part_img = nib.Nifti1Image(mask_cc_part.astype(np.uint8), affine)
nib.save(mask_cc_part_img, 'mask_CC_part.nii.gz')
import matplotlib.pyplot as plt
region = 40
fig = plt.figure('Corpus callosum segmentation')
plt.subplot(1, 2, 1)
plt.title("Corpus callosum (CC)")
plt.axis('off')
red = cfa[..., 0]
plt.imshow(np.rot90(red[region, ...]))
plt.subplot(1, 2, 2)
plt.title("CC mask used for SNR computation")
plt.axis('off')
plt.imshow(np.rot90(mask_cc_part[region, ...]))
fig.savefig("CC_segmentation.png", bbox_inches='tight')
"""
.. figure:: CC_segmentation.png
:align: center
"""
"""Now that we are happy with our crude CC mask that selected voxels in the x-direction,
we can use all the voxels to estimate the mean signal in this region.
"""
mean_signal = np.mean(data[mask_cc_part], axis=0)
"""Now, we need a good background estimation. We will re-use the brain mask
computed before and invert it to catch the outside of the brain. This could
also be determined manually with a ROI in the background.
[Warning: Certain MR manufacturers mask out the outside of the brain with 0's.
One thus has to be careful how the noise ROI is defined].
"""
from scipy.ndimage.morphology import binary_dilation
mask_noise = binary_dilation(mask, iterations=10)
mask_noise[..., :mask_noise.shape[-1]//2] = 1
mask_noise = ~mask_noise
mask_noise_img = nib.Nifti1Image(mask_noise.astype(np.uint8), affine)
nib.save(mask_noise_img, 'mask_noise.nii.gz')
noise_std = np.std(data[mask_noise, :])
print('Noise standard deviation sigma= ', noise_std)
"""We can now compute the SNR for each DWI. For example, report SNR
for DW images with gradient direction that lies the closest to
the X, Y and Z axes.
"""
# Exclude null bvecs from the search
idx = np.sum(gtab.bvecs, axis=-1) == 0
gtab.bvecs[idx] = np.inf
axis_X = np.argmin(np.sum((gtab.bvecs-np.array([1, 0, 0]))**2, axis=-1))
axis_Y = np.argmin(np.sum((gtab.bvecs-np.array([0, 1, 0]))**2, axis=-1))
axis_Z = np.argmin(np.sum((gtab.bvecs-np.array([0, 0, 1]))**2, axis=-1))
for direction in [0, axis_X, axis_Y, axis_Z]:
SNR = mean_signal[direction]/noise_std
if direction == 0 :
print("SNR for the b=0 image is :", SNR)
else :
print("SNR for direction", direction, " ", gtab.bvecs[direction], "is :", SNR)
"""SNR for the b=0 image is : ''42.0695455758''"""
"""SNR for direction 58 [ 0.98875 0.1177 -0.09229] is : ''5.46995373635''"""
"""SNR for direction 57 [-0.05039 0.99871 0.0054406] is : ''23.9329492871''"""
"""SNR for direction 126 [-0.11825 -0.039925 0.99218 ] is : ''23.9965694823''"""
"""
Since the CC is aligned with the X axis, the lowest SNR is for that gradient
direction. In comparison, the DW images in
the perpendical Y and Z axes have a high SNR. The b0 still exhibits the highest SNR,
since there is no signal attenuation.
Hence, we can say the Stanford diffusion
data has a 'worst-case' SNR of approximately 5, a
'best-case' SNR of approximately 24, and a SNR of 42 on the b0 image.
"""
"""
References:
.. [1] Descoteaux, M., Deriche, R., Le Bihan, D., Mangin, J.-F., and Poupon, C.
Multiple q-shell diffusion propagator imaging.
Medical image analysis, 15(4), 603, 2011.
.. [2] Jones, D. K., Knosche, T. R., & Turner, R.
White Matter Integrity, Fiber Count, and Other Fallacies: The Dos and Don'ts of Diffusion MRI.
NeuroImage, 73, 239, 2013.
"""
|
StongeEtienne/dipy
|
doc/examples/snr_in_cc.py
|
Python
|
bsd-3-clause
| 6,528
|
[
"Gaussian"
] |
4084a62dc52b8e3d44afd71b3a11879389099a4e33d6eb8243fa050b42c41a02
|
# -*- coding: utf-8 -*-
import unittest
from .. import fit2d
import numpy as np
import pylab
class test_fit2d(unittest.TestCase):
def test_leastsq(self):
nx = 501
ny = 401
y, x = np.indices((ny, nx))
x0 = 10
y0 = ny // 2
sx = nx // 4
sy = ny // 4
rho = 0.5
A = 1000.0
p1 = np.array([x0, y0, sx, sy, rho, A], dtype=np.float32)
x0, y0, sx, sy, rho, A = tuple(p1)
data = fit2d.gaussian(x, y, x0, y0, sx, sy, rho, A)
# self.plot(data)
p2, _ = fit2d.fitgaussian(x, y, data)
np.testing.assert_allclose(p1, p2)
def plot(self, img):
pylab.figure(1)
pylab.subplot(111)
pylab.imshow(img, origin="lower", interpolation="nearest")
pylab.pause(0.1)
raw_input("Press enter to continue...")
def test_suite():
"""Test suite including all test suites"""
testSuite = unittest.TestSuite()
testSuite.addTest(test_fit2d("test_leastsq"))
return testSuite
if __name__ == "__main__":
import sys
mysuite = test_suite()
runner = unittest.TextTestRunner()
if not runner.run(mysuite).wasSuccessful():
sys.exit(1)
|
woutdenolf/spectrocrunch
|
spectrocrunch/math/tests/test_fit2d.py
|
Python
|
mit
| 1,206
|
[
"Gaussian"
] |
495ccc9defe241333c0990ce7284fa22819e925fce7d9ac0b7fb0e58a63330cd
|
import tempfile
import os.path
from stat import S_IXUSR
from os import makedirs, stat, symlink, chmod, environ
from shutil import rmtree
from galaxy.tools.deps import DependencyManager, INDETERMINATE_DEPENDENCY
from galaxy.tools.deps.resolvers.galaxy_packages import GalaxyPackageDependency
from galaxy.tools.deps.resolvers.modules import ModuleDependencyResolver, ModuleDependency
from galaxy.util.bunch import Bunch
from contextlib import contextmanager
from subprocess import Popen, PIPE
def test_tool_dependencies():
# Setup directories
with __test_base_path() as base_path:
for name, version, sub in [ ( "dep1", "1.0", "env.sh" ), ( "dep1", "2.0", "bin" ), ( "dep2", "1.0", None ) ]:
if sub == "bin":
p = os.path.join( base_path, name, version, "bin" )
else:
p = os.path.join( base_path, name, version )
try:
makedirs( p )
except:
pass
if sub == "env.sh":
__touch( os.path.join( p, "env.sh" ) )
dm = DependencyManager( default_base_path=base_path )
dependency = dm.find_dep( "dep1", "1.0" )
assert dependency.script == os.path.join( base_path, 'dep1', '1.0', 'env.sh' )
assert dependency.path == os.path.join( base_path, 'dep1', '1.0' )
assert dependency.version == "1.0"
dependency = dm.find_dep( "dep1", "2.0" )
assert dependency.script == None
assert dependency.path == os.path.join( base_path, 'dep1', '2.0' )
assert dependency.version == "2.0"
## Test default versions
symlink( os.path.join( base_path, 'dep1', '2.0'), os.path.join( base_path, 'dep1', 'default' ) )
dependency = dm.find_dep( "dep1", None )
assert dependency.version == "2.0"
## Test default resolve will be fall back on default package dependency
## when using the default resolver.
dependency = dm.find_dep( "dep1", "2.1" )
assert dependency.version == "2.0" # 2.0 is defined as default_version
TEST_REPO_USER = "devteam"
TEST_REPO_NAME = "bwa"
TEST_REPO_CHANGESET = "12abcd41223da"
TEST_VERSION = "0.5.9"
def test_toolshed_set_enviornment_requiremetns():
with __test_base_path() as base_path:
test_repo = __build_test_repo('set_environment')
dm = DependencyManager( default_base_path=base_path )
env_settings_dir = os.path.join(base_path, "environment_settings", TEST_REPO_NAME, TEST_REPO_USER, TEST_REPO_NAME, TEST_REPO_CHANGESET)
os.makedirs(env_settings_dir)
dependency = dm.find_dep( TEST_REPO_NAME, version=None, type='set_environment', installed_tool_dependencies=[test_repo] )
assert dependency.version == None
assert dependency.script == os.path.join(env_settings_dir, "env.sh")
def test_toolshed_package_requirements():
with __test_base_path() as base_path:
test_repo = __build_test_repo('package', version=TEST_VERSION)
dm = DependencyManager( default_base_path=base_path )
package_dir = __build_ts_test_package(base_path)
dependency = dm.find_dep( TEST_REPO_NAME, version=TEST_VERSION, type='package', installed_tool_dependencies=[test_repo] )
assert dependency.version == TEST_VERSION
assert dependency.script == os.path.join(package_dir, "env.sh")
def test_toolshed_tools_fallback_on_manual_dependencies():
with __test_base_path() as base_path:
dm = DependencyManager( default_base_path=base_path )
test_repo = __build_test_repo('package', version=TEST_VERSION)
env_path = __setup_galaxy_package_dep(base_path, "dep1", "1.0")
dependency = dm.find_dep( "dep1", version="1.0", type='package', installed_tool_dependencies=[test_repo] )
assert dependency.version == "1.0"
assert dependency.script == env_path
def test_toolshed_greater_precendence():
with __test_base_path() as base_path:
dm = DependencyManager( default_base_path=base_path )
test_repo = __build_test_repo('package', version=TEST_VERSION)
ts_package_dir = __build_ts_test_package(base_path)
gx_env_path = __setup_galaxy_package_dep(base_path, TEST_REPO_NAME, TEST_VERSION)
ts_env_path = os.path.join(ts_package_dir, "env.sh")
dependency = dm.find_dep( TEST_REPO_NAME, version=TEST_VERSION, type='package', installed_tool_dependencies=[test_repo] )
assert dependency.script != gx_env_path # Not the galaxy path, it should be the tool shed path used.
assert dependency.script == ts_env_path
def __build_ts_test_package(base_path, script_contents=''):
package_dir = os.path.join(base_path, TEST_REPO_NAME, TEST_VERSION, TEST_REPO_USER, TEST_REPO_NAME, TEST_REPO_CHANGESET)
__touch(os.path.join(package_dir, 'env.sh'), script_contents)
return package_dir
def test_module_dependency_resolver():
with __test_base_path() as temp_directory:
module_script = os.path.join(temp_directory, "modulecmd")
__write_script(module_script, '''#!/bin/sh
cat %s/example_output 1>&2;
''' % temp_directory)
with open(os.path.join(temp_directory, "example_output"), "w") as f:
# Subset of module avail from MSI cluster.
f.write('''
-------------------------- /soft/modules/modulefiles ---------------------------
JAGS/3.2.0-gcc45
JAGS/3.3.0-gcc4.7.2
ProbABEL/0.1-3
ProbABEL/0.1-9e
R/2.12.2
R/2.13.1
R/2.14.1
R/2.15.0
R/2.15.1
R/3.0.1(default)
abokia-blast/2.0.2-130524/ompi_intel
abokia-blast/2.0.2-130630/ompi_intel
--------------------------- /soft/intel/modulefiles ----------------------------
advisor/2013/update1 intel/11.1.075 mkl/10.2.1.017
advisor/2013/update2 intel/11.1.080 mkl/10.2.5.035
advisor/2013/update3 intel/12.0 mkl/10.2.7.041
''')
resolver = ModuleDependencyResolver(None, modulecmd=module_script)
module = resolver.resolve( name="R", version=None, type="package" )
assert module.module_name == "R"
assert module.module_version == None
module = resolver.resolve( name="R", version="3.0.1", type="package" )
assert module.module_name == "R"
assert module.module_version == "3.0.1"
module = resolver.resolve( name="R", version="3.0.4", type="package" )
assert module == INDETERMINATE_DEPENDENCY
def test_module_dependency():
with __test_base_path() as temp_directory:
## Create mock modulecmd script that just exports a variable
## the way modulecmd sh load would, but also validate correct
## module name and version are coming through.
mock_modulecmd = os.path.join(temp_directory, 'modulecmd')
__write_script(mock_modulecmd, '''#!/bin/sh
if [ $3 != "foomodule/1.0" ];
then
exit 1
fi
echo 'FOO="bar"'
''')
resolver = Bunch(modulecmd=mock_modulecmd)
dependency = ModuleDependency(resolver, "foomodule", "1.0")
__assert_foo_exported( dependency.shell_commands( Bunch( type="package" ) ) )
def __write_script(path, contents):
with open(path, 'w') as f:
f.write(contents)
st = stat(path)
chmod(path, st.st_mode | S_IXUSR)
def test_galaxy_dependency_object_script():
with __test_base_path() as base_path:
## Create env.sh file that just exports variable FOO and verify it
## shell_commands export it correctly.
env_path = __setup_galaxy_package_dep(base_path, TEST_REPO_NAME, TEST_VERSION, "export FOO=\"bar\"")
dependency = GalaxyPackageDependency(env_path, os.path.dirname(env_path), TEST_VERSION)
__assert_foo_exported( dependency.shell_commands( Bunch( type="package" ) ) )
def test_shell_commands_built():
## Test that dependency manager builds valid shell commands for a list of
## requirements.
with __test_base_path() as base_path:
dm = DependencyManager( default_base_path=base_path )
__setup_galaxy_package_dep( base_path, TEST_REPO_NAME, TEST_VERSION, contents="export FOO=\"bar\"" )
mock_requirements = [ Bunch(type="package", version=TEST_VERSION, name=TEST_REPO_NAME ) ]
commands = dm.dependency_shell_commands( mock_requirements )
__assert_foo_exported( commands )
def __assert_foo_exported( commands ):
command = ["bash", "-c", "%s; echo \"$FOO\"" % "".join(commands)]
process = Popen(command, stdout=PIPE)
output = process.communicate()[0].strip()
assert output == b'bar', "Command %s exports FOO as %s, not bar" % (command, output)
def __setup_galaxy_package_dep(base_path, name, version, contents=""):
dep_directory = os.path.join( base_path, name, version )
env_path = os.path.join( dep_directory, "env.sh" )
__touch( env_path, contents )
return env_path
def __touch( fname, data=None ):
dirname = os.path.dirname( fname )
if not os.path.exists( dirname ):
makedirs( dirname )
f = open( fname, 'w' )
try:
if data:
f.write( data )
finally:
f.close()
def __build_test_repo(type, version=None):
return Bunch(
owner=TEST_REPO_USER,
name=TEST_REPO_NAME,
type=type,
version=version,
tool_shed_repository=Bunch(
owner=TEST_REPO_USER,
name=TEST_REPO_NAME,
installed_changeset_revision=TEST_REPO_CHANGESET
)
)
@contextmanager
def __test_base_path():
base_path = tempfile.mkdtemp()
try:
yield base_path
finally:
rmtree(base_path)
def test_parse():
with __parse_resolvers('''<dependency_resolvers>
<tool_shed_packages />
<galaxy_packages />
</dependency_resolvers>
''') as dependency_resolvers:
assert 'ToolShed' in dependency_resolvers[0].__class__.__name__
assert 'Galaxy' in dependency_resolvers[1].__class__.__name__
with __parse_resolvers('''<dependency_resolvers>
<galaxy_packages />
<tool_shed_packages />
</dependency_resolvers>
''') as dependency_resolvers:
assert 'Galaxy' in dependency_resolvers[0].__class__.__name__
assert 'ToolShed' in dependency_resolvers[1].__class__.__name__
with __parse_resolvers('''<dependency_resolvers>
<galaxy_packages />
<tool_shed_packages />
<galaxy_packages versionless="true" />
</dependency_resolvers>
''') as dependency_resolvers:
assert not dependency_resolvers[0].versionless
assert dependency_resolvers[2].versionless
with __parse_resolvers('''<dependency_resolvers>
<galaxy_packages />
<tool_shed_packages />
<galaxy_packages base_path="/opt/galaxy/legacy/"/>
</dependency_resolvers>
''') as dependency_resolvers:
# Unspecified base_paths are both default_base_paths
assert dependency_resolvers[0].base_path == dependency_resolvers[1].base_path
# Can specify custom base path...
assert dependency_resolvers[2].base_path == "/opt/galaxy/legacy"
# ... that is different from the default.
assert dependency_resolvers[0].base_path != dependency_resolvers[2].base_path
def test_uses_tool_shed_dependencies():
with __dependency_manager('''<dependency_resolvers>
<galaxy_packages />
</dependency_resolvers>
''') as dm:
assert not dm.uses_tool_shed_dependencies()
with __dependency_manager('''<dependency_resolvers>
<tool_shed_packages />
</dependency_resolvers>
''') as dm:
assert dm.uses_tool_shed_dependencies()
def test_config_module_defaults():
with __parse_resolvers('''<dependency_resolvers>
<modules prefetch="false" />
</dependency_resolvers>
''') as dependency_resolvers:
module_resolver = dependency_resolvers[0]
assert module_resolver.module_checker.__class__.__name__ == "AvailModuleChecker"
def test_config_modulepath():
# Test reads and splits MODULEPATH if modulepath is not specified.
with __parse_resolvers('''<dependency_resolvers>
<modules find_by="directory" modulepath="/opt/modules/modulefiles:/usr/local/modules/modulefiles" />
</dependency_resolvers>
''') as dependency_resolvers:
assert dependency_resolvers[0].module_checker.directories == ["/opt/modules/modulefiles", "/usr/local/modules/modulefiles"]
def test_config_MODULEPATH():
# Test reads and splits MODULEPATH if modulepath is not specified.
with __environ({"MODULEPATH": "/opt/modules/modulefiles:/usr/local/modules/modulefiles"}):
with __parse_resolvers('''<dependency_resolvers>
<modules find_by="directory" />
</dependency_resolvers>
''') as dependency_resolvers:
assert dependency_resolvers[0].module_checker.directories == ["/opt/modules/modulefiles", "/usr/local/modules/modulefiles"]
def test_config_MODULESHOME():
# Test fallbacks to read MODULESHOME if modulepath is not specified and
# neither is MODULEPATH.
with __environ({"MODULESHOME": "/opt/modules"}, remove="MODULEPATH"):
with __parse_resolvers('''<dependency_resolvers>
<modules find_by="directory" />
</dependency_resolvers>
''') as dependency_resolvers:
assert dependency_resolvers[0].module_checker.directories == ["/opt/modules/modulefiles"]
def test_config_module_directory_searcher():
with __parse_resolvers('''<dependency_resolvers>
<modules find_by="directory" modulepath="/opt/Modules/modulefiles" />
</dependency_resolvers>
''') as dependency_resolvers:
module_resolver = dependency_resolvers[0]
assert module_resolver.module_checker.directories == ["/opt/Modules/modulefiles"]
@contextmanager
def __environ(values, remove=[]):
"""
Modify the environment for a test, adding/updating values in dict `values` and
removing any environment variables mentioned in list `remove`.
"""
new_keys = set(environ.keys()) - set(values.keys())
old_environ = environ.copy()
try:
environ.update(values)
for to_remove in remove:
try:
del environ[remove]
except KeyError:
pass
yield
finally:
environ.update(old_environ)
for key in new_keys:
del environ[key]
@contextmanager
def __parse_resolvers(xml_content):
with __dependency_manager(xml_content) as dm:
yield dm.dependency_resolvers
@contextmanager
def __dependency_manager(xml_content):
with __test_base_path() as base_path:
f = tempfile.NamedTemporaryFile()
f.write(xml_content.encode("utf-8"))
f.flush()
dm = DependencyManager( default_base_path=base_path, conf_file=f.name )
yield dm
|
ssorgatem/pulsar
|
test/test_tool_deps.py
|
Python
|
apache-2.0
| 14,530
|
[
"BLAST",
"BWA",
"Galaxy"
] |
885c469b6766f649a5fda9e1e74666fb0ded9a20bcd546613ca96b6da2c3ac45
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*******************************************************
**espressopp.interaction.LennardJonesExpand**
*******************************************************
.. math::
V(r) = 4 \varepsilon \left[ \left( \frac{\sigma}{r} \right)^{12} -
\left( \frac{\sigma}{r} \right)^{6} \right]
.. function:: espressopp.interaction.LennardJonesExpand(epsilon, sigma, delta, cutoff, shift)
:param epsilon: (default: 1.0)
:param sigma: (default: 1.0)
:param delta: (default: 0.0)
:param cutoff: (default: infinity)
:param shift: (default: "auto")
:type epsilon: real
:type sigma: real
:type delta: real
:type cutoff:
:type shift:
.. function:: espressopp.interaction.VerletListLennardJonesExpand(vl)
:param vl:
:type vl:
.. function:: espressopp.interaction.VerletListLennardJonesExpand.getPotential(type1, type2)
:param type1:
:param type2:
:type type1:
:type type2:
:rtype:
.. function:: espressopp.interaction.VerletListLennardJonesExpand.setPotential(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.CellListLennardJonesExpand(stor)
:param stor:
:type stor:
.. function:: espressopp.interaction.CellListLennardJonesExpand.setPotential(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.FixedPairListLennardJonesExpand(system, vl, potential)
:param system:
:param vl:
:param potential:
:type system:
:type vl:
:type potential:
.. function:: espressopp.interaction.FixedPairListLennardJonesExpand.setPotential(potential)
:param potential:
:type potential:
"""
from espressopp import pmi, infinity
from espressopp.esutil import *
from espressopp.interaction.Potential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_LennardJonesExpand, \
interaction_VerletListLennardJonesExpand, \
interaction_CellListLennardJonesExpand, \
interaction_FixedPairListLennardJonesExpand
class LennardJonesExpandLocal(PotentialLocal, interaction_LennardJonesExpand):
def __init__(self, epsilon=1.0, sigma=1.0, delta=0.0,
cutoff=infinity, shift="auto"):
"""Initialize the local LennardJonesExpand object."""
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if shift =="auto":
cxxinit(self, interaction_LennardJonesExpand,
epsilon, sigma, delta, cutoff)
else:
cxxinit(self, interaction_LennardJonesExpand,
epsilon, sigma, delta, cutoff, shift)
class VerletListLennardJonesExpandLocal(InteractionLocal, interaction_VerletListLennardJonesExpand):
def __init__(self, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListLennardJonesExpand, vl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
class CellListLennardJonesExpandLocal(InteractionLocal, interaction_CellListLennardJonesExpand):
def __init__(self, stor):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_CellListLennardJonesExpand, stor)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
class FixedPairListLennardJonesExpandLocal(InteractionLocal, interaction_FixedPairListLennardJonesExpand):
def __init__(self, system, vl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListLennardJonesExpand, system, vl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
if pmi.isController:
class LennardJonesExpand(Potential):
'The LennardJonesExpand potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.LennardJonesExpandLocal',
pmiproperty = ['epsilon', 'sigma', 'delta']
)
class VerletListLennardJonesExpand(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListLennardJonesExpandLocal',
pmicall = ['setPotential','getPotential']
)
class CellListLennardJonesExpand(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.CellListLennardJonesExpandLocal',
pmicall = ['setPotential']
)
class FixedPairListLennardJonesExpand(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedPairListLennardJonesExpandLocal',
pmicall = ['setPotential']
)
|
capoe/espressopp.soap
|
src/interaction/LennardJonesExpand.py
|
Python
|
gpl-3.0
| 6,677
|
[
"ESPResSo"
] |
b5ce9557892680966b4e682afe19024de9a3ec615713b797659ef06c05bd4218
|
# -*- coding: utf-8 -*-
#
# plot_weight_matrices.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Plot weight matrices example
----------------------------
This example demonstrates how to extract the connection strength
for all the synapses among two populations of neurons and gather
these values in weight matrices for further analysis and visualization.
All connection types between these populations are considered, i.e.,
four weight matrices are created and plotted.
'''
'''
First, we import all necessary modules to extract, handle and plot
the connectivity matrices
'''
import numpy as np
import pylab
import nest
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
'''
We now specify a function which takes as arguments lists of neuron gids
corresponding to each population
'''
def plot_weight_matrices(E_neurons, I_neurons):
'''
Function to extract and plot weight matrices for all connections
among E_neurons and I_neurons
'''
'''
First, we initialize all the matrices, whose dimensionality is
determined by the number of elements in each population
Since in this example, we have 2 populations (E/I), 2^2 possible
synaptic connections exist (EE, EI, IE, II)
'''
W_EE = np.zeros([len(E_neurons), len(E_neurons)])
W_EI = np.zeros([len(I_neurons), len(E_neurons)])
W_IE = np.zeros([len(E_neurons), len(I_neurons)])
W_II = np.zeros([len(I_neurons), len(I_neurons)])
'''
Using `GetConnections`, we extract the information about all the
connections involving the populations of interest. `GetConnections`
returns a list of arrays (connection objects), one per connection.
Each array has the following elements:
[source-gid target-gid target-thread synapse-model-id port]
'''
a_EE = nest.GetConnections(E_neurons, E_neurons)
'''
Using `GetStatus`, we can extract the value of the connection weight,
for all the connections between these populations
'''
c_EE = nest.GetStatus(a_EE, keys='weight')
'''
Repeat the two previous steps for all other connection types
'''
a_EI = nest.GetConnections(I_neurons, E_neurons)
c_EI = nest.GetStatus(a_EI, keys='weight')
a_IE = nest.GetConnections(E_neurons, I_neurons)
c_IE = nest.GetStatus(a_IE, keys='weight')
a_II = nest.GetConnections(I_neurons, I_neurons)
c_II = nest.GetStatus(a_II, keys='weight')
'''
We now iterate through the list of all connections of each type.
To populate the corresponding weight matrix, we begin by identifying
the source-gid (first element of each connection object, n[0])
and the target-gid (second element of each connection object, n[1]).
For each gid, we subtract the minimum gid within the corresponding
population, to assure the matrix indices range from 0 to the size of
the population.
After determining the matrix indices [i, j], for each connection
object, the corresponding weight is added to the entry W[i,j].
The procedure is then repeated for all the different connection types.
'''
for idx, n in enumerate(a_EE):
W_EE[n[0] - min(E_neurons), n[1] - min(E_neurons)] += c_EE[idx]
for idx, n in enumerate(a_EI):
W_EI[n[0] - min(I_neurons), n[1] - min(E_neurons)] += c_EI[idx]
for idx, n in enumerate(a_IE):
W_IE[n[0] - min(E_neurons), n[1] - min(I_neurons)] += c_IE[idx]
for idx, n in enumerate(a_II):
W_II[n[0] - min(I_neurons), n[1] - min(I_neurons)] += c_II[idx]
'''
We can now specify the figure and axes properties. For this specific
example, we wish to display all the weight matrices in a single
figure, which requires us to use ``GridSpec`` (for example)
to specify the spatial arrangement of the axes.
A subplot is subsequently created for each connection type.
'''
fig = pylab.figure()
fig.suptitle('Weight matrices', fontsize=14)
gs = gridspec.GridSpec(4, 4)
ax1 = pylab.subplot(gs[:-1, :-1])
ax2 = pylab.subplot(gs[:-1, -1])
ax3 = pylab.subplot(gs[-1, :-1])
ax4 = pylab.subplot(gs[-1, -1])
'''
Using ``imshow``, we can visualize the weight matrix in the corresponding
axis. We can also specify the colormap for this image.
'''
plt1 = ax1.imshow(W_EE, cmap='jet')
'''
Using the ``axis_divider`` module from ``mpl_toolkits``, we can
allocate a small extra space on the right of the current axis,
which we reserve for a colorbar.
'''
divider = make_axes_locatable(ax1)
cax = divider.append_axes("right", "5%", pad="3%")
pylab.colorbar(plt1, cax=cax)
'''
We now set the title of each axis and adjust the axis subplot parameters
'''
ax1.set_title('W_{EE}')
pylab.tight_layout()
'''
Finally, the last three steps are repeated for each synapse type
'''
plt2 = ax2.imshow(W_IE)
plt2.set_cmap('jet')
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", "5%", pad="3%")
pylab.colorbar(plt2, cax=cax)
ax2.set_title('W_{EI}')
pylab.tight_layout()
plt3 = ax3.imshow(W_EI)
plt3.set_cmap('jet')
divider = make_axes_locatable(ax3)
cax = divider.append_axes("right", "5%", pad="3%")
pylab.colorbar(plt3, cax=cax)
ax3.set_title('W_{IE}')
pylab.tight_layout()
plt4 = ax4.imshow(W_II)
plt4.set_cmap('jet')
divider = make_axes_locatable(ax4)
cax = divider.append_axes("right", "5%", pad="3%")
pylab.colorbar(plt4, cax=cax)
ax4.set_title('W_{II}')
pylab.tight_layout()
|
mschmidt87/nest-simulator
|
pynest/examples/plot_weight_matrices.py
|
Python
|
gpl-2.0
| 6,243
|
[
"NEURON"
] |
938a73febfe0cb73e07889074a9d340f8e0b01ee27bbe4dafbbda3cc74b38577
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010 Nicolas Rougier - INRIA - CORTEX Project
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact: CORTEX Project - INRIA
# INRIA Lorraine,
# Campus Scientifique, BP 239
# 54506 VANDOEUVRE-LES-NANCY CEDEX
# FRANCE
''' Numerical integration of dynamic neural fields.
This script implements the numerical integration of dynamic neural fields [1]_
of the form:
1 ∂U(x,t) ⌠+∞
- ------- = -U(x,t) + ⎮ w(|x-y|).f(U(y,t)).dy + I(x,t) + h
α ∂t ⌡-∞
where U(x,t) is the potential of a neural population at position x and time t
W(d) is a neighborhood function from ℝ⁺ → ℝ
f(u) is the firing rate of a single neuron from ℝ → ℝ
I(x,t) is the input at position x and time t
h is the resting potential
α is the temporal decay of the synapse
:References:
_[1] http://www.scholarpedia.org/article/Neural_fields
'''
import glumpy
import numpy as np
import scipy.linalg
def fromdistance(fn, shape, center=None, dtype=float):
'''Construct an array by executing a function over a normalized distance.
The resulting array therefore has a value
``fn(sqrt((x-x0)²+(y-y0)²))`` at coordinate ``(x,y)`` where x,y ∈ [-1,+1]²
Parameters
----------
fn : callable
The function is called with one parameter representing the normalized
distance. `fn` must be capable of operating on arrays, and should
return a scalar value.
shape : (N,) tuple of ints
Shape of the output array, which also determines the shape of
the coordinate arrays passed to `fn`.
dtype : data-type, optional
Data-type of the coordinate arrays passed to `fn`. By default,
`dtype` is float.
'''
def distance(*args):
d = 0
for i in range(len(shape)):
d += (2*args[i]/float(shape[i]-1)-1 -center[i])**2
return np.sqrt(d) #/np.sqrt(len(shape))
if center == None:
center = [0.,]*len(shape)
return fn(np.fromfunction(distance,shape,dtype=dtype))
def convolve1d( Z, K ):
''' Discrete, clamped, linear convolution of two one-dimensional sequences.
:Parameters:
Z : (N,) array_like
First one-dimensional input array (input).
K : (M,) array_like
Second one-dimensional input array (kernel).
:Returns:
out : array
Discrete, clamped, linear convolution of `Z` and `K`.
'''
R = np.convolve(Z, K, 'same')
i0 = 0
if R.shape[0] > Z.shape[0]:
i0 = (R.shape[0]-Z.shape[0])/2 + 1 - Z.shape[0]%2
i1 = i0+ Z.shape[0]
return R[i0:i1]
def convolve2d(Z, K, USV = None):
''' Discrete, clamped convolution of two two-dimensional arrays.
:Parameters:
Z : (N1,N2) array_like
First two-dimensional input array (input)
K : (M1,M2) array_like
Second two-dimensional input array (kernel)
:*Returns:
out : ndarray
Discrete, clamped, linear convolution of `Z` and `K`
'''
epsilon = 1e-9
if USV is None:
U,S,V = scipy.linalg.svd(K)
U,S,V = U.astype(K.dtype), S.astype(K.dtype), V.astype(K.dtype)
else:
U,S,V = USV
n = (S > epsilon).sum()
R = np.zeros( Z.shape )
for k in range(n):
Zt = Z.copy() * S[k]
for i in range(Zt.shape[0]):
Zt[i,:] = convolve1d(Zt[i,:], V[k,::-1])
for i in range(Zt.shape[1]):
Zt[:,i] = convolve1d(Zt[:,i], U[::-1,k])
R += Zt
return R
if __name__ == '__main__':
# Parameters
n = 40
dt = 0.05
alpha = 12.0
tau = 0.25
h = 0.0
s = (n*n)/(40.*40.)
noise = 0.01
theta = 0
dtheta = 0.025
rho = 0.75
n_stims = 3
u = (dt/tau)/alpha
def f(x):
return np.maximum(x,0)
def g(x, width=0.1):
return np.exp(-(x/width)**2/2)
def w(x):
return 1.0*g(x,0.1)-0.5*g(x,1.0)
def stimulus(shape,center,width):
def g2(x) : return g(x,width)
return fromdistance(g2,shape,center)
# Initialization
I = np.zeros((n,n),dtype=np.float32) # input
Z = np.zeros((n,n), dtype=np.float32) # output
Z_ = np.zeros((n,n), dtype=np.float32) # membrane potential
# Kernel
K = fromdistance(w,(2*n+1,2*n+1))
USV = scipy.linalg.svd(K)
# Output decoding
X,Y = np.mgrid[0:n,0:n]
X = 2*X/float(n-1) - 1
Y = 2*Y/float(n-1) - 1
window = glumpy.Window(2*512, 512)
Ii = glumpy.Image(I, interpolation='bicubic',
cmap=glumpy.colormap.Grey_r, vmin=0.0, vmax=2.5)
Zi = glumpy.Image(Z, interpolation='bicubic',
cmap=glumpy.colormap.Grey_r, vmin=0.0, vmax=0.25)
@window.event
def on_draw():
global Zi, Ii
window.clear()
Ii.blit(0,0,512,512)
Zi.blit(512,0,512,512)
@window.event
def on_key_press(key, modifiers):
global Z
if key == glumpy.key.SPACE:
Z[...] = 0
Z_[...] = 0
@window.event
def on_idle(*args):
global I, Z, Z_, Ii, Zi, dt, n, h, s, tau, alpha, theta, dtheta, rho, u
theta += dtheta
I[...] = np.zeros((n,n))
for j in range(n_stims):
t = theta+ j*2*np.pi/n_stims
x,y = rho*np.cos(t),rho*np.sin(t)
I += 2.5*stimulus((n,n), (x,y), 0.1)
I += (2*np.random.random((n,n))-1)*noise
# Compute field activity
for i in range(1):
L = convolve2d(Z,K,USV)/s
Z_ *= (1-tau)
L += I
L *= u
Z_ += L
#Z_[...] = np.minimum(np.maximum(Z_,0),1)
Z[...] = f(Z_)
Zi.update()
Ii.update()
window.draw()
window.mainloop()
|
davidcox/glumpy
|
experimental/cnft.py
|
Python
|
bsd-3-clause
| 6,621
|
[
"NEURON"
] |
b105d8270e5dc7b7f74fd68cbe4524c3489a20fd719467d57ac39548ea87f79b
|
def package_for_fah(process_only_these_targets=None, verbose=False, nclones=10, archive=True):
'''Create the input files and directory structure necessary to start a Folding@Home project.
MPI-enabled.
'''
import os
import Bio.SeqIO
import numpy
import mpi4py.MPI
import simtk.openmm as openmm
import simtk.unit as unit
comm = mpi4py.MPI.COMM_WORLD
rank = comm.rank
size = comm.size
targets_dir = os.path.abspath('targets')
templates_dir = os.path.abspath('templates')
models_dir = os.path.abspath('models')
packaged_models_dir = os.path.abspath('packaged-models')
projects_dir = os.path.join(packaged_models_dir, 'fah-projects')
if not os.path.exists(projects_dir):
os.mkdir(projects_dir)
targets_fasta_filename = os.path.join(targets_dir, 'targets.fa')
targets = Bio.SeqIO.parse(targets_fasta_filename, 'fasta')
templates_fasta_filename = os.path.join(templates_dir, 'templates.fa')
templates = list( Bio.SeqIO.parse(templates_fasta_filename, 'fasta') )
def generateRun(project_dir, source_dir, run, nclones, verbose=False):
"""
Build Folding@Home RUN and CLONE subdirectories from (possibly compressed) OpenMM serialized XML files.
ARGUMENTS
project_dir (string) - base project directory to place RUN in
source_dir (string) - source directory for OpenMM serialized XML files
run (int) - run index
nclones (int) - number of clones to generate
"""
if verbose: print "Building RUN %d" % run
try:
import os, shutil
import gzip
# Determine directory and pathnames.
rundir = os.path.join(project_dir, 'RUN%d' % run)
template_filename = os.path.join(rundir, 'template.txt')
seqid_filename = os.path.join(rundir, 'sequence-identity.txt')
system_filename = os.path.join(rundir, 'system.xml')
integrator_filename = os.path.join(rundir, 'integrator.xml')
protein_structure_filename = os.path.join(rundir, 'protein.pdb')
system_structure_filename = os.path.join(rundir, 'system.pdb')
protein_structure_filename_source = os.path.join(source_dir, 'implicit-refined.pdb')
system_structure_filename_source = os.path.join(source_dir, 'explicit-refined.pdb')
# Return if this directory has already been set up.
if os.path.exists(rundir):
if os.path.exists(template_filename) and os.path.exists(seqid_filename) and os.path.exists(system_filename) and os.path.exists(integrator_filename) and os.path.exists(protein_structure_filename) and os.path.exists(system_structure_filename): return
else:
# Construct run directory if it does not exist.
os.makedirs(rundir)
# Write template information.
[filepath, template_name] = os.path.split(source_dir)
with open(template_filename, 'w') as outfile:
outfile.write(template_name + '\n')
# Copy the protein and system structure pdbs
shutil.copyfile(protein_structure_filename_source, protein_structure_filename)
shutil.copyfile(system_structure_filename_source, system_structure_filename)
# Read system, integrator, and state.
def readFileContents(filename):
fullpath = os.path.join(source_dir, filename)
if os.path.exists(fullpath):
infile = open(fullpath, 'r')
elif os.path.exists(fullpath+'.gz'):
infile = gzip.open(fullpath+'.gz', 'r')
else:
raise IOError('File %s not found' % filename)
contents = infile.read()
infile.close()
return contents
def writeFileContents(filename, contents):
with open(filename, 'w') as outfile:
outfile.write(contents)
system = openmm.XmlSerializer.deserialize(readFileContents('explicit-system.xml'))
state = openmm.XmlSerializer.deserialize(readFileContents('explicit-state.xml'))
# Substitute default box vectors.
box_vectors = state.getPeriodicBoxVectors()
system.setDefaultPeriodicBoxVectors(*box_vectors)
# Write sequence identity.
contents = readFileContents(os.path.join(source_dir, 'sequence-identity.txt'))
writeFileContents(seqid_filename, contents)
# Integrator settings.
constraint_tolerance = 1.0e-5
timestep = 2.0 * unit.femtoseconds
collision_rate = 5.0 / unit.picosecond
temperature = 300.0 * unit.kelvin
# Create new integrator to use.
integrator = openmm.LangevinIntegrator(temperature, collision_rate, timestep)
# TODO: Make sure MonteCarloBarostat temperature matches set temperature.
# Serialize System.
writeFileContents(system_filename, openmm.XmlSerializer.serialize(system))
# Serialize Integrator
writeFileContents(integrator_filename, openmm.XmlSerializer.serialize(integrator))
# Create Context so we can randomize velocities.
platform = openmm.Platform.getPlatformByName('Reference')
context = openmm.Context(system, integrator, platform)
context.setPositions(state.getPositions())
context.setVelocities(state.getVelocities())
box_vectors = state.getPeriodicBoxVectors()
context.setPeriodicBoxVectors(*box_vectors)
# Create clones with different random initial velocities.
for clone_index in range(nclones):
context.setVelocitiesToTemperature(temperature)
state = context.getState(getPositions=True, getVelocities=True, getForces=True, getEnergy=True, getParameters=True, enforcePeriodicBox=True)
state_filename = os.path.join(rundir, 'state%d.xml' % clone_index)
writeFileContents(state_filename, openmm.XmlSerializer.serialize(state))
# Clean up.
del context, integrator, state, system
except Exception as e:
import traceback
print traceback.format_exc()
print str(e)
return
for target in targets:
# Process only specified targets if directed.
if process_only_these_targets and (target.id not in process_only_these_targets): continue
models_target_dir = os.path.join(models_dir, target.id)
if rank == 0:
if not os.path.exists(models_target_dir): continue
comm.Barrier()
if rank == 0:
print "-------------------------------------------------------------------------"
print "Building FAH OpenMM project for target %s" % target.id
print "-------------------------------------------------------------------------"
# ========
# Build a list of valid templates
# ========
# Process all templates.
if verbose: print "Building list of valid templates..."
valid_templates = list()
for template in templates:
# Check to make sure all files needed are present.
is_valid = True
filenames = ['explicit-system.xml', 'explicit-state.xml', 'explicit-integrator.xml']
for filename in filenames:
fullpath = os.path.join(models_target_dir, template.id, filename)
if not (os.path.exists(fullpath) or os.path.exists(fullpath+'.gz')):
is_valid = False
# Exclude those that are not unique by clustering.
unique_by_clustering = os.path.exists(os.path.join(models_target_dir, template.id, 'unique_by_clustering'))
if not unique_by_clustering:
is_valid = False
# TODO: Exclude if final potential energies from explicit solvent equilibration are too high.
# Append if valid.
if is_valid:
valid_templates.append(template)
nvalid = len(valid_templates)
if verbose: print "%d valid unique initial starting conditions found" % nvalid
# ========
# Sort by sequence identity
# ========
if verbose: print "Sorting templates in order of decreasing sequence identity..."
sequence_identities = numpy.zeros([nvalid], numpy.float32)
for (template_index, template) in enumerate(valid_templates):
filename = os.path.join(models_target_dir, template.id, 'sequence-identity.txt')
with open(filename, 'r') as infile:
contents = infile.readline().strip()
sequence_identity = float(contents)
sequence_identities[template_index] = sequence_identity
sorted_indices = numpy.argsort(-sequence_identities)
valid_templates = [ valid_templates[index] for index in sorted_indices ]
if verbose:
print "Sorted"
print sequence_identities[sorted_indices]
# ========
# Create project directory
# ========
project_dir = os.path.join(projects_dir, target.id)
if rank == 0:
if not os.path.exists(project_dir):
os.makedirs(project_dir)
comm.Barrier()
# ========
# Build runs in parallel
# ========
if verbose: print "Building RUNs in parallel..."
for run_index in range(rank, len(valid_templates), size):
print "-------------------------------------------------------------------------"
print "Building RUN for template %s" % valid_templates[run_index].id
print "-------------------------------------------------------------------------"
source_dir = os.path.join(models_target_dir, valid_templates[run_index].id)
generateRun(project_dir, source_dir, run_index, nclones, verbose)
# ========
# Archive
# ========
comm.Barrier()
if archive:
if rank == 0:
if verbose: print "Building compressed archive of results..."
import subprocess
archive_filename = os.path.join(projects_dir, target.id + '.tgz')
subprocess.call(['tar', 'zcf', archive_filename, project_dir])
def package_for_transfer(process_only_these_targets=None):
raise Exception, 'Not implemented yet.'
|
danielparton/MSMSeeder
|
MSMSeeder/packaging.py
|
Python
|
gpl-2.0
| 10,699
|
[
"OpenMM"
] |
6b9a94c327da98b6048d18c86ed5d64f8f34b4c0effc4cd6a938e73686dd1797
|
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# Copyright (c) 2009-2015 Richard Jones, Claudio Canepa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Transitions between Scenes"""
from __future__ import division, print_function, unicode_literals
__docformat__ = 'restructuredtext'
import pyglet
from pyglet.gl import *
from cocos.actions import *
import cocos.scene as scene
from cocos.director import director
from cocos.layer import ColorLayer
from cocos.sprite import Sprite
__all__ = ['TransitionScene',
'RotoZoomTransition', 'JumpZoomTransition',
'MoveInLTransition', 'MoveInRTransition',
'MoveInBTransition', 'MoveInTTransition',
'SlideInLTransition', 'SlideInRTransition',
'SlideInBTransition', 'SlideInTTransition',
'FlipX3DTransition', 'FlipY3DTransition', 'FlipAngular3DTransition',
'ShuffleTransition',
'TurnOffTilesTransition',
'FadeTRTransition', 'FadeBLTransition',
'FadeUpTransition', 'FadeDownTransition',
'ShrinkGrowTransition',
'CornerMoveTransition',
'EnvelopeTransition',
'SplitRowsTransition', 'SplitColsTransition',
'FadeTransition',
'ZoomTransition', ]
class TransitionScene(scene.Scene):
"""TransitionScene
A Scene that takes two scenes and makes a transition between them.
The input scenes are put into envelopes (Scenes) that are made childs to
the transition scene.
Proper transitions are allowed to modify any parameter for the envelopes,
but must not modify directly the input scenes; that would corrupt the input
scenes in the general case.
"""
def __init__(self, dst, duration=1.25, src=None):
"""Initializes the transition
:Parameters:
`dst` : Scene
Incoming scene, the one that remains visible when the transition ends.
`duration` : float
Duration of the transition in seconds. Default: 1.25
`src` : Scene
Outgoing scene. Default: current scene
"""
super(TransitionScene, self).__init__()
if src is None:
src = director.scene
# if the director is already running a transition scene then terminate
# it so we may move on
if isinstance(src, TransitionScene):
tmp = src.in_scene.get('dst')
src.finish()
src = tmp
if src is dst:
raise Exception("Incoming scene must be different from outgoing scene")
envelope = scene.Scene()
envelope.add(dst, name='dst')
self.in_scene = envelope #: envelope with scene that will replace the old one
envelope = scene.Scene()
envelope.add(src, name='src')
self.out_scene = envelope #: envelope with scene that will be replaced
self.duration = duration #: duration in seconds of the transition
if not self.duration:
self.duration = 1.25
self.start()
def start(self):
"""Adds the incoming scene with z=1 and the outgoing scene with z=0"""
self.add(self.in_scene, z=1)
self.add(self.out_scene, z=0)
def finish(self):
"""Called when the time is over.
Envelopes are discarded and the dst scene will be the one runned by director.
"""
# devs:
# try to not override this method
# if you should, try to remain compatible with the recipe TransitionsWithPop
# if you can't, add in the docstring for your class that is not usable
# for that recipe, and bonus points if you add to the recipe that
# your class is not elegible for pop transitions
dst = self.in_scene.get('dst')
src = self.out_scene.get('src')
director.replace(dst)
def hide_out_show_in(self):
"""Hides the outgoing scene and shows the incoming scene"""
self.in_scene.visible = True
self.out_scene.visible = False
def hide_all(self):
"""Hides both the incoming and outgoing scenes"""
self.in_scene.visible = False
self.out_scene.visible = False
def visit(self):
# preserve modelview matrix
glPushMatrix()
super(TransitionScene, self).visit()
glPopMatrix()
class RotoZoomTransition(TransitionScene):
"""Rotate and zoom out the outgoing scene, and then rotate and zoom in the incoming
"""
def __init__(self, *args, **kwargs):
super(RotoZoomTransition, self).__init__(*args, **kwargs)
width, height = director.get_window_size()
self.in_scene.scale = 0.001
self.out_scene.scale = 1.0
self.in_scene.transform_anchor = (width // 2, height // 2)
self.out_scene.transform_anchor = (width // 2, height // 2)
rotozoom = (ScaleBy(0.001, duration=self.duration / 2.0) |
Rotate(360 * 2, duration=self.duration / 2.0)) + Delay(self.duration / 2.0)
self.out_scene.do(rotozoom)
self.in_scene.do(Reverse(rotozoom) + CallFunc(self.finish))
class JumpZoomTransition(TransitionScene):
"""Zoom out and jump the outgoing scene, and then jump and zoom in the incoming
"""
def __init__(self, *args, **kwargs):
super(JumpZoomTransition, self).__init__(*args, **kwargs)
width, height = director.get_window_size()
self.in_scene.scale = 0.5
self.in_scene.position = (width, 0)
self.in_scene.transform_anchor = (width // 2, height // 2)
self.out_scene.transform_anchor = (width // 2, height // 2)
jump = JumpBy((-width, 0), width // 4, 2, duration=self.duration / 4.0)
scalein = ScaleTo(1, duration=self.duration / 4.0)
scaleout = ScaleTo(0.5, duration=self.duration / 4.0)
jumpzoomout = scaleout + jump
jumpzoomin = jump + scalein
delay = Delay(self.duration / 2.0)
self.out_scene.do(jumpzoomout)
self.in_scene.do(delay + jumpzoomin + CallFunc(self.finish))
class MoveInLTransition(TransitionScene):
"""Move in from to the left the incoming scene.
"""
def __init__(self, *args, **kwargs):
super(MoveInLTransition, self).__init__(*args, **kwargs)
self.init()
a = self.get_action()
self.in_scene.do((Accelerate(a, 0.5)) + CallFunc(self.finish))
def init(self):
width, height = director.get_window_size()
self.in_scene.position = (-width, 0)
def get_action(self):
return MoveTo((0, 0), duration=self.duration)
class MoveInRTransition(MoveInLTransition):
"""Move in from to the right the incoming scene.
"""
def init(self):
width, height = director.get_window_size()
self.in_scene.position = (width, 0)
def get_action(self):
return MoveTo((0, 0), duration=self.duration)
class MoveInTTransition(MoveInLTransition):
"""Move in from to the top the incoming scene.
"""
def init(self):
width, height = director.get_window_size()
self.in_scene.position = (0, height)
def get_action(self):
return MoveTo((0, 0), duration=self.duration)
class MoveInBTransition(MoveInLTransition):
"""Move in from to the bottom the incoming scene.
"""
def init(self):
width, height = director.get_window_size()
self.in_scene.position = (0, -height)
def get_action(self):
return MoveTo((0, 0), duration=self.duration)
class SlideInLTransition(TransitionScene):
"""Slide in the incoming scene from the left border.
"""
def __init__(self, *args, **kwargs):
super(SlideInLTransition, self).__init__(*args, **kwargs)
self.width, self.height = director.get_window_size()
self.init()
move = self.get_action()
self.in_scene.do(Accelerate(move, 0.5))
self.out_scene.do(Accelerate(move, 0.5) + CallFunc(self.finish))
def init(self):
self.in_scene.position = (-self.width, 0)
def get_action(self):
return MoveBy((self.width, 0), duration=self.duration)
class SlideInRTransition(SlideInLTransition):
"""Slide in the incoming scene from the right border.
"""
def init(self):
self.in_scene.position = (self.width, 0)
def get_action(self):
return MoveBy((-self.width, 0), duration=self.duration)
class SlideInTTransition(SlideInLTransition):
"""Slide in the incoming scene from the top border.
"""
def init(self):
self.in_scene.position = (0, self.height)
def get_action(self):
return MoveBy((0, -self.height), duration=self.duration)
class SlideInBTransition(SlideInLTransition):
"""Slide in the incoming scene from the bottom border.
"""
def init(self):
self.in_scene.position = (0, -self.height)
def get_action(self):
return MoveBy((0, self.height), duration=self.duration)
class FlipX3DTransition(TransitionScene):
"""Flips the screen horizontally.
The front face is the outgoing scene and the back face is the incoming scene.
"""
def __init__(self, *args, **kwargs):
super(FlipX3DTransition, self).__init__(*args, **kwargs)
width, height = director.get_window_size()
turnongrid = Waves3D(amplitude=0, duration=0, grid=(1, 1), waves=2)
flip90 = OrbitCamera(angle_x=0, delta_z=90, duration=self.duration / 2.0)
flipback90 = OrbitCamera(angle_x=0, angle_z=90, delta_z=90, duration=self.duration / 2.0)
self.in_scene.visible = False
flip = turnongrid + \
flip90 + \
CallFunc(self.hide_all) + \
FlipX3D(duration=0) + \
CallFunc(self.hide_out_show_in) + \
flipback90
self.do(flip +
CallFunc(self.finish) +
StopGrid())
class FlipY3DTransition(TransitionScene):
"""Flips the screen vertically.
The front face is the outgoing scene and the back face is the incoming scene.
"""
def __init__(self, *args, **kwargs):
super(FlipY3DTransition, self).__init__(*args, **kwargs)
width, height = director.get_window_size()
turnongrid = Waves3D(amplitude=0, duration=0, grid=(1, 1), waves=2)
flip90 = OrbitCamera(angle_x=90, delta_z=-90, duration=self.duration / 2.0)
flipback90 = OrbitCamera(angle_x=90, angle_z=90, delta_z=90, duration=self.duration / 2.0)
self.in_scene.visible = False
flip = turnongrid + \
flip90 + \
CallFunc(self.hide_all) + \
FlipX3D(duration=0) + \
CallFunc(self.hide_out_show_in) + \
flipback90
self.do(flip +
CallFunc(self.finish) +
StopGrid())
class FlipAngular3DTransition(TransitionScene):
"""Flips the screen half horizontally and half vertically.
The front face is the outgoing scene and the back face is the incoming scene.
"""
def __init__(self, *args, **kwargs):
super(FlipAngular3DTransition, self).__init__(*args, **kwargs)
width, height = director.get_window_size()
turnongrid = Waves3D(amplitude=0, duration=0, grid=(1, 1), waves=2)
flip90 = OrbitCamera(angle_x=45, delta_z=90, duration=self.duration / 2.0)
flipback90 = OrbitCamera(angle_x=45, angle_z=90, delta_z=90, duration=self.duration / 2.0)
self.in_scene.visible = False
flip = turnongrid + \
flip90 + \
CallFunc(self.hide_all) + \
FlipX3D(duration=0) + \
CallFunc(self.hide_out_show_in) + \
flipback90
self.do(flip +
CallFunc(self.finish) +
StopGrid())
class ShuffleTransition(TransitionScene):
"""Shuffle the outgoing scene, and then reorder the tiles with the incoming scene.
"""
def __init__(self, *args, **kwargs):
super(ShuffleTransition, self).__init__(*args, **kwargs)
width, height = director.get_window_size()
aspect = width / height
x, y = int(12*aspect), 12
shuffle = ShuffleTiles(grid=(x, y), duration=self.duration / 2.0, seed=15)
self.in_scene.visible = False
self.do(shuffle +
CallFunc(self.hide_out_show_in) +
Reverse(shuffle) +
CallFunc(self.finish) +
StopGrid())
class ShrinkGrowTransition(TransitionScene):
"""Shrink the outgoing scene while grow the incoming scene
"""
def __init__(self, *args, **kwargs):
super(ShrinkGrowTransition, self).__init__(*args, **kwargs)
width, height = director.get_window_size()
self.in_scene.scale = 0.001
self.out_scene.scale = 1
self.in_scene.transform_anchor = (2*width / 3.0, height / 2.0)
self.out_scene.transform_anchor = (width / 3.0, height / 2.0)
scale_out = ScaleTo(0.01, duration=self.duration)
scale_in = ScaleTo(1.0, duration=self.duration)
self.in_scene.do(Accelerate(scale_in, 0.5))
self.out_scene.do(Accelerate(scale_out, 0.5) + CallFunc(self.finish))
class CornerMoveTransition(TransitionScene):
"""Moves the bottom-right corner of the incoming scene to the top-left corner
"""
def __init__(self, *args, **kwargs):
super(CornerMoveTransition, self).__init__(*args, **kwargs)
self.out_scene.do(MoveCornerUp(duration=self.duration) +
CallFunc(self.finish) +
StopGrid())
def start(self):
# don't call super. overriding order
self.add(self.in_scene, z=0)
self.add(self.out_scene, z=1)
class EnvelopeTransition(TransitionScene):
"""From the outgoing scene:
- moves the top-right corner to the center
- moves the bottom-left corner to the center
From the incoming scene:
- performs the reverse action of the outgoing scene
"""
def __init__(self, *args, **kwargs):
super(EnvelopeTransition, self).__init__(*args, **kwargs)
self.in_scene.visible = False
move = QuadMoveBy(delta0=(320, 240), delta1=(-630, 0),
delta2=(-320, -240), delta3=(630, 0),
duration=self.duration / 2.0)
# move = Accelerate(move)
self.do(move +
CallFunc(self.hide_out_show_in) +
Reverse(move) +
CallFunc(self.finish) +
StopGrid())
class FadeTRTransition(TransitionScene):
"""Fade the tiles of the outgoing scene from the left-bottom corner the to top-right corner.
"""
def __init__(self, *args, **kwargs):
super(FadeTRTransition, self).__init__(*args, **kwargs)
width, height = director.get_window_size()
aspect = width / height
x, y = int(12 * aspect), 12
a = self.get_action(x, y)
# a = Accelerate(a)
self.out_scene.do(a +
CallFunc(self.finish) +
StopGrid())
def start(self):
# don't call super. overriding order
self.add(self.in_scene, z=0)
self.add(self.out_scene, z=1)
def get_action(self, x, y):
return FadeOutTRTiles(grid=(x, y), duration=self.duration)
class FadeBLTransition(FadeTRTransition):
"""Fade the tiles of the outgoing scene from the top-right corner to the bottom-left corner.
"""
def get_action(self, x, y):
return FadeOutBLTiles(grid=(x, y), duration=self.duration)
class FadeUpTransition(FadeTRTransition):
"""Fade the tiles of the outgoing scene from the bottom to the top.
"""
def get_action(self, x, y):
return FadeOutUpTiles(grid=(x, y), duration=self.duration)
class FadeDownTransition(FadeTRTransition):
"""Fade the tiles of the outgoing scene from the top to the bottom.
"""
def get_action(self, x, y):
return FadeOutDownTiles(grid=(x, y), duration=self.duration)
class TurnOffTilesTransition(TransitionScene):
"""Turn off the tiles of the outgoing scene in random order
"""
def __init__(self, *args, **kwargs):
super(TurnOffTilesTransition, self).__init__(*args, **kwargs)
width, height = director.get_window_size()
aspect = width / height
x, y = int(12 * aspect), 12
a = TurnOffTiles(grid=(x, y), duration=self.duration)
# a = Accelerate(a)
self.out_scene.do(a +
CallFunc(self.finish) +
StopGrid())
def start(self):
# don't call super. overriding order
self.add(self.in_scene, z=0)
self.add(self.out_scene, z=1)
class FadeTransition(TransitionScene):
"""Fade out the outgoing scene and then fade in the incoming scene.
Optionally supply the color to fade to in-between as an RGB color tuple.
"""
def __init__(self, *args, **kwargs):
color = kwargs.pop('color', (0, 0, 0)) + (0,)
super(FadeTransition, self).__init__(*args, **kwargs)
self.fadelayer = ColorLayer(*color)
self.in_scene.visible = False
self.add(self.fadelayer, z=2)
def on_enter(self):
super(FadeTransition, self).on_enter()
self.fadelayer.do(FadeIn(duration=self.duration / 2.0) +
CallFunc(self.hide_out_show_in) +
FadeOut(duration=self.duration / 2.0) +
CallFunc(self.finish))
def on_exit(self):
super(FadeTransition, self).on_exit()
self.remove(self.fadelayer)
class SplitColsTransition(TransitionScene):
"""Splits the screen in columns.
The odd columns goes upwards while the even columns goes downwards.
"""
def __init__(self, *args, **kwargs):
super(SplitColsTransition, self).__init__(*args, **kwargs)
width, height = director.get_window_size()
self.in_scene.visible = False
flip_a = self.get_action()
flip = flip_a + \
CallFunc(self.hide_out_show_in) + \
Reverse(flip_a)
self.do(AccelDeccel(flip) +
CallFunc(self.finish) +
StopGrid())
def get_action(self):
return SplitCols(cols=3, duration=self.duration / 2.0)
class SplitRowsTransition(SplitColsTransition):
"""Splits the screen in rows.
The odd rows goes to the left while the even rows goes to the right.
"""
def get_action(self):
return SplitRows(rows=3, duration=self.duration / 2.0)
class ZoomTransition(TransitionScene):
"""Zoom and FadeOut the outgoing scene."""
def __init__(self, *args, **kwargs):
if 'src' in kwargs or len(args) == 3:
raise Exception("ZoomTransition does not accept 'src' parameter.")
super(ZoomTransition, self).__init__(*args, **kwargs)
# fixme: if scene was never run and some drawable need to initialize
# in scene on enter the next line will render bad
self.out_scene.visit()
def start(self):
screensprite = self._create_out_screenshot()
zoom = ScaleBy(2, self.duration) | FadeOut(self.duration)
restore = CallFunc(self.finish)
screensprite.do(zoom + restore)
self.add(screensprite, z=1)
self.add(self.in_scene, z=0)
def finish(self):
# tested with the recipe TransitionsWithPop, works.
dst = self.in_scene.get('dst')
director.replace(dst)
def _create_out_screenshot(self):
# TODO: try to use `pyglet.image.get_buffer_manager().get_color_buffer()`
# instead of create a new BufferManager... note that pyglet uses
# a BufferManager singleton that fail when you change the window
# size.
buffer = pyglet.image.BufferManager()
image = buffer.get_color_buffer()
width, height = director.window.width, director.window.height
actual_width, actual_height = director.get_window_size()
out = Sprite(image)
out.position = actual_width // 2, actual_height // 2
out.scale = max(actual_width / width, actual_height / height)
return out
|
vyscond/cocos
|
cocos/scenes/transitions.py
|
Python
|
bsd-3-clause
| 21,876
|
[
"VisIt"
] |
90553079466ff6b46fec29c0785e2b949df83ac14da4a39bb01858feb23f86f9
|
"""
Views related to the Custom Courses feature.
"""
import csv
import datetime
import functools
import json
import logging
import pytz
from copy import deepcopy
from cStringIO import StringIO
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import (
HttpResponse,
HttpResponseForbidden,
)
from django.contrib import messages
from django.db import transaction
from django.http import Http404
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import ensure_csrf_cookie
from django.contrib.auth.models import User
from courseware.courses import get_course_by_id
from courseware.field_overrides import disable_overrides
from courseware.grades import iterate_grades_for
from edxmako.shortcuts import render_to_response
from opaque_keys.edx.keys import CourseKey
from ccx_keys.locator import CCXLocator
from student.roles import CourseCcxCoachRole
from student.models import CourseEnrollment
from instructor.views.api import _split_input_list
from instructor.views.gradebook_api import get_grade_book_page
from instructor.enrollment import (
enroll_email,
unenroll_email,
get_email_params,
)
from lms.djangoapps.ccx.models import CustomCourseForEdX
from lms.djangoapps.ccx.overrides import (
get_override_for_ccx,
override_field_for_ccx,
clear_ccx_field_info_from_ccx_map,
bulk_delete_ccx_override_fields,
)
from lms.djangoapps.ccx.utils import (
assign_coach_role_to_ccx,
ccx_course,
ccx_students_enrolling_center,
get_ccx_for_coach,
get_date,
parse_date,
prep_course_for_grading,
)
log = logging.getLogger(__name__)
TODAY = datetime.datetime.today # for patching in tests
def coach_dashboard(view):
"""
View decorator which enforces that the user have the CCX coach role on the
given course and goes ahead and translates the course_id from the Django
route into a course object.
"""
@functools.wraps(view)
def wrapper(request, course_id):
"""
Wraps the view function, performing access check, loading the course,
and modifying the view's call signature.
"""
course_key = CourseKey.from_string(course_id)
ccx = None
if isinstance(course_key, CCXLocator):
ccx_id = course_key.ccx
ccx = CustomCourseForEdX.objects.get(pk=ccx_id)
course_key = ccx.course_id
role = CourseCcxCoachRole(course_key)
if not role.has_user(request.user):
return HttpResponseForbidden(
_('You must be a CCX Coach to access this view.'))
course = get_course_by_id(course_key, depth=None)
# if there is a ccx, we must validate that it is the ccx for this coach
if ccx is not None:
coach_ccx = get_ccx_for_coach(course, request.user)
if coach_ccx is None or coach_ccx.id != ccx.id:
return HttpResponseForbidden(
_('You must be the coach for this ccx to access this view')
)
return view(request, course, ccx)
return wrapper
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def dashboard(request, course, ccx=None):
"""
Display the CCX Coach Dashboard.
"""
# right now, we can only have one ccx per user and course
# so, if no ccx is passed in, we can sefely redirect to that
if ccx is None:
ccx = get_ccx_for_coach(course, request.user)
if ccx:
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(course.id, ccx.id)}
)
return redirect(url)
context = {
'course': course,
'ccx': ccx,
}
if ccx:
ccx_locator = CCXLocator.from_course_locator(course.id, unicode(ccx.id))
# At this point we are done with verification that current user is ccx coach.
assign_coach_role_to_ccx(ccx_locator, request.user, course.id)
schedule = get_ccx_schedule(course, ccx)
grading_policy = get_override_for_ccx(
ccx, course, 'grading_policy', course.grading_policy)
context['schedule'] = json.dumps(schedule, indent=4)
context['save_url'] = reverse(
'save_ccx', kwargs={'course_id': ccx_locator})
context['ccx_members'] = CourseEnrollment.objects.filter(course_id=ccx_locator, is_active=True)
context['gradebook_url'] = reverse(
'ccx_gradebook', kwargs={'course_id': ccx_locator})
context['grades_csv_url'] = reverse(
'ccx_grades_csv', kwargs={'course_id': ccx_locator})
context['grading_policy'] = json.dumps(grading_policy, indent=4)
context['grading_policy_url'] = reverse(
'ccx_set_grading_policy', kwargs={'course_id': ccx_locator})
else:
context['create_ccx_url'] = reverse(
'create_ccx', kwargs={'course_id': course.id})
return render_to_response('ccx/coach_dashboard.html', context)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def create_ccx(request, course, ccx=None):
"""
Create a new CCX
"""
name = request.POST.get('name')
# prevent CCX objects from being created for deprecated course ids.
if course.id.deprecated:
messages.error(request, _(
"You cannot create a CCX from a course using a deprecated id. "
"Please create a rerun of this course in the studio to allow "
"this action."))
url = reverse('ccx_coach_dashboard', kwargs={'course_id': course.id})
return redirect(url)
ccx = CustomCourseForEdX(
course_id=course.id,
coach=request.user,
display_name=name)
ccx.save()
# Make sure start/due are overridden for entire course
start = TODAY().replace(tzinfo=pytz.UTC)
override_field_for_ccx(ccx, course, 'start', start)
override_field_for_ccx(ccx, course, 'due', None)
# Enforce a static limit for the maximum amount of students that can be enrolled
override_field_for_ccx(ccx, course, 'max_student_enrollments_allowed', settings.CCX_MAX_STUDENTS_ALLOWED)
# Hide anything that can show up in the schedule
hidden = 'visible_to_staff_only'
for chapter in course.get_children():
override_field_for_ccx(ccx, chapter, hidden, True)
for sequential in chapter.get_children():
override_field_for_ccx(ccx, sequential, hidden, True)
for vertical in sequential.get_children():
override_field_for_ccx(ccx, vertical, hidden, True)
ccx_id = CCXLocator.from_course_locator(course.id, ccx.id)
url = reverse('ccx_coach_dashboard', kwargs={'course_id': ccx_id})
# Enroll the coach in the course
email_params = get_email_params(course, auto_enroll=True, course_key=ccx_id, display_name=ccx.display_name)
enroll_email(
course_id=ccx_id,
student_email=request.user.email,
auto_enroll=True,
email_students=True,
email_params=email_params,
)
assign_coach_role_to_ccx(ccx_id, request.user, course.id)
return redirect(url)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def save_ccx(request, course, ccx=None):
"""
Save changes to CCX.
"""
if not ccx:
raise Http404
def override_fields(parent, data, graded, earliest=None, ccx_ids_to_delete=None):
"""
Recursively apply CCX schedule data to CCX by overriding the
`visible_to_staff_only`, `start` and `due` fields for units in the
course.
"""
if ccx_ids_to_delete is None:
ccx_ids_to_delete = []
blocks = {
str(child.location): child
for child in parent.get_children()}
for unit in data:
block = blocks[unit['location']]
override_field_for_ccx(
ccx, block, 'visible_to_staff_only', unit['hidden'])
start = parse_date(unit['start'])
if start:
if not earliest or start < earliest:
earliest = start
override_field_for_ccx(ccx, block, 'start', start)
else:
ccx_ids_to_delete.append(get_override_for_ccx(ccx, block, 'start_id'))
clear_ccx_field_info_from_ccx_map(ccx, block, 'start')
# Only subsection (aka sequential) and unit (aka vertical) have due dates.
if 'due' in unit: # checking that the key (due) exist in dict (unit).
due = parse_date(unit['due'])
if due:
override_field_for_ccx(ccx, block, 'due', due)
else:
ccx_ids_to_delete.append(get_override_for_ccx(ccx, block, 'due_id'))
clear_ccx_field_info_from_ccx_map(ccx, block, 'due')
else:
# In case of section aka chapter we do not have due date.
ccx_ids_to_delete.append(get_override_for_ccx(ccx, block, 'due_id'))
clear_ccx_field_info_from_ccx_map(ccx, block, 'due')
if not unit['hidden'] and block.graded:
graded[block.format] = graded.get(block.format, 0) + 1
children = unit.get('children', None)
# For a vertical, override start and due dates of all its problems.
if unit.get('category', None) == u'vertical':
for component in block.get_children():
# override start and due date of problem (Copy dates of vertical into problems)
if start:
override_field_for_ccx(ccx, component, 'start', start)
if due:
override_field_for_ccx(ccx, component, 'due', due)
if children:
override_fields(block, children, graded, earliest, ccx_ids_to_delete)
return earliest, ccx_ids_to_delete
graded = {}
earliest, ccx_ids_to_delete = override_fields(course, json.loads(request.body), graded, [])
bulk_delete_ccx_override_fields(ccx, ccx_ids_to_delete)
if earliest:
override_field_for_ccx(ccx, course, 'start', earliest)
# Attempt to automatically adjust grading policy
changed = False
policy = get_override_for_ccx(
ccx, course, 'grading_policy', course.grading_policy
)
policy = deepcopy(policy)
grader = policy['GRADER']
for section in grader:
count = graded.get(section.get('type'), 0)
if count < section.get('min_count', 0):
changed = True
section['min_count'] = count
if changed:
override_field_for_ccx(ccx, course, 'grading_policy', policy)
return HttpResponse(
json.dumps({
'schedule': get_ccx_schedule(course, ccx),
'grading_policy': json.dumps(policy, indent=4)}),
content_type='application/json',
)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def set_grading_policy(request, course, ccx=None):
"""
Set grading policy for the CCX.
"""
if not ccx:
raise Http404
override_field_for_ccx(
ccx, course, 'grading_policy', json.loads(request.POST['policy']))
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(course.id, ccx.id)}
)
return redirect(url)
def get_ccx_schedule(course, ccx):
"""
Generate a JSON serializable CCX schedule.
"""
def visit(node, depth=1):
"""
Recursive generator function which yields CCX schedule nodes.
We convert dates to string to get them ready for use by the js date
widgets, which use text inputs.
Visits students visible nodes only; nodes children of hidden ones
are skipped as well.
Dates:
Only start date is applicable to a section. If ccx coach did not override start date then
getting it from the master course.
Both start and due dates are applicable to a subsection (aka sequential). If ccx coach did not override
these dates then getting these dates from corresponding subsection in master course.
Unit inherits start date and due date from its subsection. If ccx coach did not override these dates
then getting them from corresponding subsection in master course.
"""
for child in node.get_children():
# in case the children are visible to staff only, skip them
if child.visible_to_staff_only:
continue
hidden = get_override_for_ccx(
ccx, child, 'visible_to_staff_only',
child.visible_to_staff_only)
start = get_date(ccx, child, 'start')
if depth > 1:
# Subsection has both start and due dates and unit inherit dates from their subsections
if depth == 2:
due = get_date(ccx, child, 'due')
elif depth == 3:
# Get start and due date of subsection in case unit has not override dates.
due = get_date(ccx, child, 'due', node)
start = get_date(ccx, child, 'start', node)
visited = {
'location': str(child.location),
'display_name': child.display_name,
'category': child.category,
'start': start,
'due': due,
'hidden': hidden,
}
else:
visited = {
'location': str(child.location),
'display_name': child.display_name,
'category': child.category,
'start': start,
'hidden': hidden,
}
if depth < 3:
children = tuple(visit(child, depth + 1))
if children:
visited['children'] = children
yield visited
else:
yield visited
with disable_overrides():
return tuple(visit(course))
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_schedule(request, course, ccx=None): # pylint: disable=unused-argument
"""
get json representation of ccx schedule
"""
if not ccx:
raise Http404
schedule = get_ccx_schedule(course, ccx)
json_schedule = json.dumps(schedule, indent=4)
return HttpResponse(json_schedule, content_type='application/json')
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_invite(request, course, ccx=None):
"""
Invite users to new ccx
"""
if not ccx:
raise Http404
action = request.POST.get('enrollment-button')
identifiers_raw = request.POST.get('student-ids')
identifiers = _split_input_list(identifiers_raw)
email_students = 'email-students' in request.POST
course_key = CCXLocator.from_course_locator(course.id, ccx.id)
email_params = get_email_params(course, auto_enroll=True, course_key=course_key, display_name=ccx.display_name)
ccx_students_enrolling_center(action, identifiers, email_students, course_key, email_params)
url = reverse('ccx_coach_dashboard', kwargs={'course_id': course_key})
return redirect(url)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_student_management(request, course, ccx=None):
"""
Manage the enrollment of individual students in a CCX
"""
if not ccx:
raise Http404
action = request.POST.get('student-action', None)
student_id = request.POST.get('student-id', '')
email_students = 'email-students' in request.POST
identifiers = [student_id]
course_key = CCXLocator.from_course_locator(course.id, ccx.id)
email_params = get_email_params(course, auto_enroll=True, course_key=course_key, display_name=ccx.display_name)
errors = ccx_students_enrolling_center(action, identifiers, email_students, course_key, email_params)
for error_message in errors:
messages.error(request, error_message)
url = reverse('ccx_coach_dashboard', kwargs={'course_id': course_key})
return redirect(url)
# Grades can potentially be written - if so, let grading manage the transaction.
@transaction.non_atomic_requests
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_gradebook(request, course, ccx=None):
"""
Show the gradebook for this CCX.
"""
if not ccx:
raise Http404
ccx_key = CCXLocator.from_course_locator(course.id, ccx.id)
with ccx_course(ccx_key) as course:
prep_course_for_grading(course, request)
student_info, page = get_grade_book_page(request, course, course_key=ccx_key)
return render_to_response('courseware/gradebook.html', {
'page': page,
'page_url': reverse('ccx_gradebook', kwargs={'course_id': ccx_key}),
'students': student_info,
'course': course,
'course_id': course.id,
'staff_access': request.user.is_staff,
'ordered_grades': sorted(
course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True),
})
# Grades can potentially be written - if so, let grading manage the transaction.
@transaction.non_atomic_requests
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_grades_csv(request, course, ccx=None):
"""
Download grades as CSV.
"""
if not ccx:
raise Http404
ccx_key = CCXLocator.from_course_locator(course.id, ccx.id)
with ccx_course(ccx_key) as course:
prep_course_for_grading(course, request)
enrolled_students = User.objects.filter(
courseenrollment__course_id=ccx_key,
courseenrollment__is_active=1
).order_by('username').select_related("profile")
grades = iterate_grades_for(course, enrolled_students)
header = None
rows = []
for student, gradeset, __ in grades:
if gradeset:
# We were able to successfully grade this student for this
# course.
if not header:
# Encode the header row in utf-8 encoding in case there are
# unicode characters
header = [section['label'].encode('utf-8')
for section in gradeset[u'section_breakdown']]
rows.append(["id", "email", "username", "grade"] + header)
percents = {
section['label']: section.get('percent', 0.0)
for section in gradeset[u'section_breakdown']
if 'label' in section
}
row_percents = [percents.get(label, 0.0) for label in header]
rows.append([student.id, student.email, student.username,
gradeset['percent']] + row_percents)
buf = StringIO()
writer = csv.writer(buf)
for row in rows:
writer.writerow(row)
response = HttpResponse(buf.getvalue(), content_type='text/csv')
response['Content-Disposition'] = 'attachment'
return response
|
franosincic/edx-platform
|
lms/djangoapps/ccx/views.py
|
Python
|
agpl-3.0
| 19,713
|
[
"VisIt"
] |
987d81a763f6c1d46fafb20cba6ec575067fa3403e1bc50bb6870b561167564e
|
# Copyright (C) 2013, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from xml.dom import minidom, XMLNS_NAMESPACE, Node
from zeroinstall import SafeException
from zeroinstall.injector.namespaces import XMLNS_IFACE
from zeroinstall.injector import model
from . import namespace, formatting
ns = namespace.Namespace()
class DuplicateIDException(SafeException):
pass
def childNodes(parent, namespaceURI = None, localName = None):
for x in parent.childNodes:
if x.nodeType != Node.ELEMENT_NODE: continue
if namespaceURI is not None and x.namespaceURI != namespaceURI: continue
if localName is None or x.localName == localName:
yield x
requires_names = frozenset(['requires', 'restricts'] + list(model.binding_names))
class Context:
def __init__(self, impl):
self.attribs = {} # (ns, localName) -> value
self.requires = [] # Actually, requires, restricts and bindings
self.commands = {} # (name, version-expr) -> <command>
node = impl
while True:
for name, value in node.attributes.itemsNS():
if name[0] == XMLNS_NAMESPACE:
ns.register_namespace(value, name[1])
elif name not in self.attribs:
self.attribs[name] = value
for x in childNodes(node, XMLNS_IFACE):
if x.localName in requires_names:
self.requires.append(x)
elif x.localName == 'command':
command_name = (x.getAttribute('name'), x.getAttribute('if-0install-version'))
if command_name not in self.commands:
self.commands[command_name] = x
# (else the existing definition on the child should be used)
node = node.parentNode
if node.nodeName != 'group':
break
@property
def has_main_and_run(self):
"""Checks whether we have a main and a <command name='run'>.
This case requires special care."""
for name, expr in self.commands:
if name == 'run':
break
else:
return False # No run command
return (None, 'main') in self.attribs
def find_impls(parent):
"""Return all <implementation> children, including those inside groups."""
for x in childNodes(parent, XMLNS_IFACE):
if x.localName == 'implementation':
yield x
elif x.localName == 'group':
for y in find_impls(x):
yield y
def find_groups(parent):
"""Return all <group> children, including those inside other groups."""
for x in childNodes(parent, XMLNS_IFACE, 'group'):
yield x
for y in find_groups(x):
yield y
def _iter_child_nodes_skipping_ws(elem):
text_so_far = ""
for node in elem.childNodes:
if node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
text_so_far += node.data.strip()
else:
if text_so_far:
yield elem.ownerDocument.createTextNode(text_so_far)
text_so_far = ""
yield node
if text_so_far:
yield elem.ownerDocument.createTextNode(text_so_far)
yield None
def _compare_children(a, b):
"""@rtype: bool"""
ai = _iter_child_nodes_skipping_ws(a)
bi = _iter_child_nodes_skipping_ws(b)
while True:
ae = next(ai)
be = next(bi)
if ae == be == None:
return True
if ae is None or be is None:
return False
if not nodes_equal(ae, be):
return False
return True
def nodes_equal(a, b):
"""Compare two DOM nodes.
Warning: only supports documents containing elements, comments, text
nodes and attributes (will crash on processing instructions, etc).
Strips whitespace from text nodes (except the initial a, b nodes).
@rtype: bool"""
if a.nodeType != b.nodeType:
return False
if a.nodeType == Node.ELEMENT_NODE:
if a.namespaceURI != b.namespaceURI:
return False
if a.nodeName != b.nodeName:
return False
a_attrs = set([(name, value) for name, value in a.attributes.itemsNS()])
b_attrs = set([(name, value) for name, value in b.attributes.itemsNS()])
if a_attrs != b_attrs:
#print "%s != %s" % (a_attrs, b_attrs)
return False
return _compare_children(a, b)
elif a.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
return a.data == b.data
elif a.nodeType == Node.DOCUMENT_NODE:
return _compare_children(a, b)
elif a.nodeType == Node.COMMENT_NODE:
return a.nodeValue == b.nodeValue
else:
assert 0, ("Unknown node type", a)
def score_subset(group, impl):
"""Returns (is_subset, goodness)"""
for key in group.attribs:
if key not in list(impl.attribs.keys()):
#print "BAD", key
return (0,) # Group sets an attribute the impl doesn't want
matching_commands = 0
for name_expr, g_command in group.commands.items():
if name_expr not in impl.commands:
return (0,) # Group sets a command the impl doesn't want
if nodes_equal(g_command, impl.commands[name_expr]):
# Prefer matching commands to overriding them
matching_commands += 1
for g_req in group.requires:
for i_req in impl.requires:
if nodes_equal(g_req, i_req): break
else:
return (0,) # Group adds a requires that the impl doesn't want
# Score result so we get groups that have all the same requires/commands first, then ones with all the same attribs
return (1, len(group.requires) + len(group.commands), len(group.attribs) + matching_commands)
# Note: the namespace stuff isn't quite right yet.
# Might get conflicts if both documents use the same prefix for different things.
def merge(master_doc, local_doc):
known_ids = set()
def check_unique(elem):
impl_id = impl.getAttribute("id")
if impl_id in known_ids:
raise DuplicateIDException("Duplicate ID " + impl_id)
known_ids.add(impl_id)
for impl in find_impls(master_doc.documentElement):
check_unique(impl)
# Merge each implementation in the local feed in turn (normally there will only be one)
for impl in find_impls(local_doc.documentElement):
check_unique(impl)
# 1. Get the context of the implementation to add. This is:
# - The set of its requirements
# - The set of its commands
# - Its attributes
new_impl_context = Context(impl)
# 2. For each <group> in the master feed, see if it provides a compatible context:
# - A subset of the new implementation's requirements
# - A subset of the new implementation's command names
# - A subset of the new implementation's attributes (names, not values)
# Choose the most compatible <group> (the root counts as a minimally compatible group)
best_group = ((1, 0, 0), master_doc.documentElement) # (score, element)
for group in find_groups(master_doc.documentElement):
group_context = Context(group)
score = score_subset(group_context, new_impl_context)
if score > best_group[0]:
best_group = (score, group)
group = best_group[1]
group_context = Context(group)
if new_impl_context.has_main_and_run:
# If the existing group doesn't have the same main value then we'll need a new group. Otherwise,
# we're likely to override the command by having main on the implementation element.
current_group_main = group_context.attribs.get((None, 'main'), None)
need_new_group_for_main = current_group_main != new_impl_context.attribs[(None, 'main')]
else:
need_new_group_for_main = False
new_commands = []
for name_expr, new_command in new_impl_context.commands.items():
if need_new_group_for_main and name_expr[0] == 'run':
# If we're creating a new <group main='...'> then we can't inherit an existing <command name='run'/>,
old_command = None
else:
old_command = group_context.commands.get(name_expr, None)
if not (old_command and nodes_equal(old_command, new_command)):
new_commands.append(ns.import_node(master_doc, new_command))
# If we have additional requirements or commands, we'll need to create a subgroup and add them
if len(new_impl_context.requires) > len(group_context.requires) or new_commands or need_new_group_for_main:
subgroup = group.ownerDocument.createElementNS(XMLNS_IFACE, 'group')
group.appendChild(subgroup)
group = subgroup
#group_context = Context(group)
for x in new_impl_context.requires:
for y in group_context.requires:
if nodes_equal(x, y): break
else:
req = ns.import_node(master_doc, x)
#print "Add", req
group.appendChild(req)
for c in new_commands:
group.appendChild(c)
if need_new_group_for_main:
group.setAttribute('main', new_impl_context.attribs[(None, 'main')])
# We'll remove it from the <implementation> below, when cleaning up duplicates
group_context = Context(group)
new_impl = ns.import_node(master_doc, impl)
# Already copied to parent <group>
for x in list(childNodes(new_impl, XMLNS_IFACE)):
if x.localName in ['command'] + list(requires_names):
new_impl.removeChild(x)
# Attributes might have been set on a parent group; move to the impl
for name in new_impl_context.attribs:
#print "Set", name, value
ns.add_attribute_ns(new_impl, name[0], name[1], new_impl_context.attribs[name])
for name, value in new_impl.attributes.itemsNS():
if name[0] == XMLNS_NAMESPACE or \
(name in group_context.attribs and group_context.attribs[name] == value):
#print "Deleting duplicate attribute", name, value
new_impl.removeAttributeNS(name[0], name[1])
group.appendChild(new_impl)
def merge_files(master_feed_url, master_feed, new_impls_feed):
"""Add each implementation in new_impls_feed to master_feed.
Return the new doc"""
with open(master_feed, 'rb') as stream:
master_doc = minidom.parse(stream)
with open(new_impls_feed, 'rb') as stream:
new_impls_doc = minidom.parse(stream)
merge(master_doc, new_impls_doc)
return master_doc
|
0install/0repo
|
repo/merge.py
|
Python
|
lgpl-2.1
| 9,427
|
[
"VisIt"
] |
f8299923f0226521b1e998d30505ec79a6f9dce40c859e7e1245f3b67c15e416
|
#! /usr/bin/env python
"""
Usage:
bam-coverage.py reference.fa query.x.reference.bam minmatch mapq
bam-coverage calculates the fraction of bases in 'reference.fa' that are
covered by sequences in a BAM alignment file that are longer than 'minmatch'.
Original script: https://github.com/ngs-docs/ngs-scripts/blob/master/blast/calc-blast-cover.py
"""
from __future__ import print_function, division
import sys
DEPENDENCIES = {
"pysam": False,
"screed": False
}
try:
import pysam
DEPENDENCIES['pysam'] = True
except ImportError:
pass
try:
import screed
DEPENDENCIES['screed'] = True
except ImportError:
pass
def check_dependencies():
if all(DEPENDENCIES[d] for d in DEPENDENCIES):
return True
print("Missing dependencies, install before proceeding:", file=sys.stderr)
for dep, installed in DEPENDENCIES.items():
if not installed:
print(dep, file=sys.stderr)
sys.exit(1)
def bam_coverage(reference, alignments, min_match, min_mapq=30, min_len=0):
check_dependencies()
# create empty lists representing the total number of bases in the
# reference
print("creating empty lists", file=sys.stderr)
covs = {}
for record in screed.open(reference):
covs[record.name] = [0] * len(record.sequence)
# run through the BAM records in the query, and calculate how much of
# the reference is covered by the query.
print('building coverage', file=sys.stderr)
with pysam.Samfile(alignments, "rb") as samfile:
for record in samfile:
if len(record.query) < min_match:
continue
if record.mapq < min_mapq:
continue
cov = covs.get(samfile.getrname(record.tid))
if not cov:
continue
if min_len and len(record.aligned_pairs) < min_len * len(record.seq):
continue
for pos_read, pos_ref in record.aligned_pairs:
if pos_ref:
cov[pos_ref] = 1
# print out summary statistics for each of the reference.
coved = {}
sizes = {}
total = 0
covered = 0
print('Summing stats', file=sys.stderr)
for name in covs:
coved[name] = sum(covs[name])
sizes[name] = float(len(covs[name]))
covered += coved[name]
total += sizes[name]
fraction = covered / float(total or 1)
print('total bases in reference:', total)
print('total ref bases covered :', covered)
print('fraction :', fraction)
print('reference :', reference)
print('BAM alignment file :', alignments)
return {
'total': total,
'covered': covered,
'fraction': fraction,
'coverage per contig': coved,
'contig size': sizes
}
if __name__ == "__main__":
reference = sys.argv[1]
alignments = sys.argv[2]
min_match = int(sys.argv[3])
query = sys.argv[4]
min_mapq = int(sys.argv[5])
bam_coverage(reference, alignments, min_match, query, min_mapq=min_mapq)
|
luizirber/bioinfo
|
bioinfo/bam_coverage.py
|
Python
|
bsd-3-clause
| 3,081
|
[
"BLAST",
"pysam"
] |
b4c612b98f9cd7fb3e74bb5ba8a2ef56a581eca1edeb8dcbe649e0f3b5c62afb
|
from toplevel import *
import pdb
from pylab import *
import argparse,glob,os,os.path
from scipy.ndimage import filters,interpolation,morphology,measurements
from scipy import stats
from scipy.misc import imsave
import common,sl,morph
def B(a):
if a.dtype==dtype('B'): return a
return array(a,'B')
class record:
def __init__(self,**kw): self.__dict__.update(kw)
def blackout_images(image,ticlass):
"""Takes a page image and a ticlass text/image classification image and replaces
all regions tagged as 'image' with rectangles in the page image. The page image
is modified in place. All images are iulib arrays."""
rgb = ocropy.intarray()
ticlass.textImageProbabilities(rgb,image)
r = ocropy.bytearray()
g = ocropy.bytearray()
b = ocropy.bytearray()
ocropy.unpack_rgb(r,g,b,rgb)
components = ocropy.intarray()
components.copy(g)
n = ocropy.label_components(components)
print "[note] number of image regions",n
tirects = ocropy.rectarray()
ocropy.bounding_boxes(tirects,components)
for i in range(1,tirects.length()):
r = tirects.at(i)
ocropy.fill_rect(image,r,0)
r.pad_by(-5,-5)
ocropy.fill_rect(image,r,255)
def binary_objects(binary):
labels,n = morph.label(binary)
objects = morph.find_objects(labels)
return objects
def estimate_scale(binary):
objects = binary_objects(binary)
bysize = sorted(objects,key=sl.area)
scalemap = zeros(binary.shape)
for o in bysize:
if amax(scalemap[o])>0: continue
scalemap[o] = sl.area(o)**0.5
scale = median(scalemap[(scalemap>3)&(scalemap<100)])
return scale
def compute_boxmap(binary,scale,threshold=(.5,4),dtype='i'):
objects = binary_objects(binary)
bysize = sorted(objects,key=sl.area)
boxmap = zeros(binary.shape,dtype)
for o in bysize:
if sl.area(o)**.5<threshold[0]*scale: continue
if sl.area(o)**.5>threshold[1]*scale: continue
boxmap[o] = 1
return boxmap
def compute_lines(segmentation,scale):
"""Given a line segmentation map, computes a list
of tuples consisting of 2D slices and masked images."""
lobjects = morph.find_objects(segmentation)
lines = []
for i,o in enumerate(lobjects):
if o is None: continue
if sl.dim1(o)<2*scale or sl.dim0(o)<scale: continue
mask = (segmentation[o]==i+1)
if amax(mask)==0: continue
result = record()
result.label = i+1
result.bounds = o
result.mask = mask
lines.append(result)
return lines
def pad_image(image,d,cval=inf):
result = ones(array(image.shape)+2*d)
result[:,:] = amax(image) if cval==inf else cval
result[d:-d,d:-d] = image
return result
@checks(ARANK(2),int,int,int,int,mode=str,cval=True,_=GRAYSCALE)
def extract(image,y0,x0,y1,x1,mode='nearest',cval=0):
h,w = image.shape
ch,cw = y1-y0,x1-x0
y,x = clip(y0,0,h-ch),clip(x0,0,w-cw)
sub = image[y:y+ch,x:x+cw]
# print "extract",image.dtype,image.shape
try:
return interpolation.shift(sub,(y-y0,x-x0),mode=mode,cval=cval,order=0)
except RuntimeError:
# workaround for platform differences between 32bit and 64bit
# scipy.ndimage
dtype = sub.dtype
sub = array(sub,dtype='float64')
sub = interpolation.shift(sub,(y-y0,x-x0),mode=mode,cval=cval,order=0)
sub = array(sub,dtype=dtype)
return sub
@checks(ARANK(2),True,pad=int,expand=int,_=GRAYSCALE)
def extract_masked(image,linedesc,pad=5,expand=0):
"""Extract a subimage from the image using the line descriptor.
A line descriptor consists of bounds and a mask."""
y0,x0,y1,x1 = [int(x) for x in [linedesc.bounds[0].start,linedesc.bounds[1].start, \
linedesc.bounds[0].stop,linedesc.bounds[1].stop]]
if pad>0:
mask = pad_image(linedesc.mask,pad,cval=0)
else:
mask = linedesc.mask
line = extract(image,y0-pad,x0-pad,y1+pad,x1+pad)
if expand>0:
mask = filters.maximum_filter(mask,(expand,expand))
line = where(mask,line,amax(line))
return line
def reading_order(lines,highlight=None,debug=0):
"""Given the list of lines (a list of 2D slices), computes
the partial reading order. The output is a binary 2D array
such that order[i,j] is true if line i comes before line j
in reading order."""
order = zeros((len(lines),len(lines)),'B')
def x_overlaps(u,v):
return u[1].start<v[1].stop and u[1].stop>v[1].start
def above(u,v):
return u[0].start<v[0].start
def left_of(u,v):
return u[1].stop<v[1].start
def separates(w,u,v):
if w[0].stop<min(u[0].start,v[0].start): return 0
if w[0].start>max(u[0].stop,v[0].stop): return 0
if w[1].start<u[1].stop and w[1].stop>v[1].start: return 1
if highlight is not None:
clf(); title("highlight"); imshow(binary); ginput(1,debug)
for i,u in enumerate(lines):
for j,v in enumerate(lines):
if x_overlaps(u,v):
if above(u,v):
order[i,j] = 1
else:
if [w for w in lines if separates(w,u,v)]==[]:
if left_of(u,v): order[i,j] = 1
if j==highlight and order[i,j]:
print (i,j),
y0,x0 = sl.center(lines[i])
y1,x1 = sl.center(lines[j])
plot([x0,x1+200],[y0,y1])
if highlight is not None:
print
ginput(1,debug)
return order
def topsort(order):
"""Given a binary array defining a partial order (o[i,j]==True means i<j),
compute a topological sort. This is a quick and dirty implementation
that works for up to a few thousand elements."""
n = len(order)
visited = zeros(n)
L = []
def visit(k):
if visited[k]: return
visited[k] = 1
for l in find(order[:,k]):
visit(l)
L.append(k)
for k in range(n):
visit(k)
return L #[::-1]
def show_lines(image,lines,lsort):
"""Overlays the computed lines on top of the image, for debugging
purposes."""
ys,xs = [],[]
clf(); cla()
imshow(image)
for i in range(len(lines)):
l = lines[lsort[i]]
y,x = sl.center(l.bounds)
xs.append(x)
ys.append(y)
o = l.bounds
r = matplotlib.patches.Rectangle((o[1].start,o[0].start),edgecolor='r',fill=0,width=sl.dim1(o),height=sl.dim0(o))
gca().add_patch(r)
h,w = image.shape
ylim(h,0); xlim(0,w)
plot(xs,ys)
@obsolete
def read_gray(fname):
image = imread(fname)
if image.ndim==3: image = mean(image,2)
return image
@obsolete
def read_binary(fname):
image = imread(fname)
if image.ndim==3: image = mean(image,2)
image -= amin(image)
image /= amax(image)
assert sum(image<0.01)+sum(image>0.99)>0.99*prod(image.shape),"input image is not binary"
binary = 1.0*(image<0.5)
return binary
@obsolete
def rgbshow(r,g,b=None,gn=1,cn=0,ab=0,**kw):
"""Small function to display 2 or 3 images as RGB channels."""
if b is None: b = zeros(r.shape)
combo = transpose(array([r,g,b]),axes=[1,2,0])
if cn:
for i in range(3):
combo[:,:,i] /= max(abs(amin(combo[:,:,i])),abs(amax(combo[:,:,i])))
elif gn:
combo /= max(abs(amin(combo)),abs(amax(combo)))
if ab:
combo = abs(combo)
if amin(combo)<0: print "warning: values less than zero"
imshow(clip(combo,0,1),**kw)
|
brobertson/ocropus-bgr
|
ocropy/ocrolib/psegutils.py
|
Python
|
apache-2.0
| 7,511
|
[
"VisIt"
] |
28d2b2ef5e1770d88a97c5f37815f6eb3dcddd4968bd95faa7f3cec7f8796ff1
|
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
from scipy import loadtxt
import os
#write for loop to iterate over each peak? Apply gaussian filter to find peaks and then just fit each one?
#adjust count rate for duration of sweep also
def choose_domain(xdata, ydata, domain, upper = None):
import numpy as np
if upper != None:
locs = np.where(ydata >= upper)[0]
for index in locs:
ydata[index] = upper
lower_bound = np.where(xdata == domain[0])[0]
upper_bound = np.where(xdata == domain[1])[0]
return xdata[lower_bound:upper_bound], ydata[lower_bound:upper_bound]
def main(dataset, show = False):
with open(dataset, 'r') as f:
f.seek(270)
time = float(f.read(6))
f.close()
xdata, ydata = loadtxt(dataset, unpack=True, usecols=[0,1], skiprows=25)
sel_xdata_1, sel_ydata_1 = choose_domain(xdata, ydata, [215, 307])
sel_xdata_2, sel_ydata_2 = choose_domain(xdata, ydata, [610, 700])
#sel_xdata_3, sel_ydata_3 = choose_domain(xdata, ydata, [610, 760])
error = np.sqrt(ydata)
err1 = np.ones_like(sel_ydata_1) #ask about these tomorrow
err2 = np.ones_like(sel_ydata_2)
#err3 = np.ones_like(sel_ydata_3)
for i in range(0, len(sel_ydata_1)):
if sel_ydata_1[i] == 0:
err1[i] = 1
else:
err1[i] = np.sqrt(sel_ydata_1[i])
for i in range(0, len(sel_ydata_2)):
if sel_ydata_2[i] == 0:
err2[i] = 1
else:
err2[i] = np.sqrt(sel_ydata_2[i])
#for i in range(0, len(sel_ydata_3)):
# if sel_ydata_3[i] == 0:
# err3[i] = 1
# else:
# err3[i] = np.sqrt(sel_ydata_3[i])
def gaussian(x, *p):
return p[0]/(p[1]*np.sqrt(2*np.pi)) * np.exp(-(x-p[2])**2/(2*p[1]**2)) + p[3] + p[4]*x
def red_gaussian(x, *p):
return p[0]/(p[1]*np.sqrt(2*np.pi)) * np.exp(-(x-p[2])**2/(2*p[1]**2)) + p[3]
p1 = [48810, 9, 266, 433, -1]
p2 = [9303, 14, 650, 2]
#p3 = [400**2, 19, 675, 0, 0]
popt1, pcov1 = curve_fit(gaussian, sel_xdata_1, sel_ydata_1, p0 = p1, sigma = err1)
popt2, pcov2 = curve_fit(red_gaussian, sel_xdata_2, sel_ydata_2, p0 = p2, sigma = err2)
#popt3, pcov3 = curve_fit(gaussian, sel_xdata_3, sel_ydata_3, p0 = p3, sigma = err3)
yFit_1 = gaussian(sel_xdata_1, *popt1)
yuFit_1 = gaussian(sel_xdata_1, *p1)
yFit_2 = red_gaussian(sel_xdata_2, *popt2)
yuFit_2 = red_gaussian(sel_xdata_2, *p2)
#yFit_3 = gaussian(sel_xdata_3, *popt3)
#yuFit_3 = gaussian(sel_xdata_3, *p3)
chisq1 = np.sum(((yFit_1-sel_ydata_1)/err1)**2)
chisq2 = np.sum(((yFit_2-sel_ydata_2)/err2)**2)
#chisq3 = np.sum(((yFit_3-sel_ydata_3)/err3)**2)
plt.figure(figsize = (13, 10))
plt.errorbar(xdata, ydata, error, fmt = 'o', label = "Raw Data")
plt.plot(sel_xdata_1, yFit_1,
linewidth = 2, alpha = .9, label = "511 keV Fit")
plt.plot(sel_xdata_2, yFit_2, linewidth = 2, alpha = .9, label = "1270 keV Fit")
#plt.plot(sel_xdata_3, yFit_3, linewidth = 2, alpha = .9, label = "356 keV Fit")
plt.text(500, 700, r"$f_1(x) = \frac{N_1}{\sigma_1\sqrt{2\pi}}e^{\frac{-(x-\mu_1)^2}{2\sigma_1^2}} + C + Bx$"\
"\n"\
r"$N_1 = %.0f \pm %.1g \, counts$"\
"\n"\
r"$\sigma_1 = %.2f \pm %.1g$"\
"\n"\
r"$\mu_1 = %.2f \pm %.1g$"\
"\n"\
r"$C = %.0f \pm %.1g \, counts$"\
"\n"\
r"$B = %.0f \pm %.1g \, counts/channel$"\
"\n"\
r"$\chi^2 = %f $"\
"\n"\
r"$\frac{\chi^2}{\nu} = %f $"\
"\n"\
r"$\nu = %d$"\
% (popt1[0], np.sqrt(pcov1[0,0]), popt1[1], np.sqrt(pcov1[1,1]), popt1[2], np.sqrt(pcov1[2,2]),
popt1[3], np.sqrt(pcov1[3,3]), popt1[4], np.sqrt(pcov1[4,4]), chisq1, chisq1/(len(sel_xdata_1) - len(p1)), len(sel_xdata_1) - len(p1)))
plt.text(800, 700, r"$f_2(x) = \frac{N_2}{\sigma_2\sqrt{2\pi}}e^{\frac{-(x-\mu_2)^2}{2\sigma_2^2}} + C$"\
"\n"\
r"$N_2 = %.0f \pm %.1g \, counts$"\
"\n"\
r"$\sigma_2 = %.1f \pm %.1g$"\
"\n"\
r"$\mu_2 = %.1f \pm %.1g$"\
"\n"\
r"$C = %.0f \pm %.1g \, counts$"\
"\n"\
r"$\chi^2 = %f $"\
"\n"\
r"$\frac{\chi^2}{\nu} = %f $"\
"\n"\
r"$\nu = %d$"\
% (popt2[0], np.sqrt(pcov2[0,0]), popt2[1], np.sqrt(pcov2[1,1]), popt2[2], np.sqrt(pcov2[2,2]),
popt2[3], np.sqrt(pcov2[3,3]), chisq2, chisq2/(len(sel_xdata_2) - len(p2)), len(sel_xdata_2) - len(p2)))
#plt.text(850, 600, r"$f_3(x) = \frac{N_3}{\sigma_3\sqrt{2\pi}}e^{\frac{-(x-\mu_3)^2}{2\sigma_3^2}} + C + Bx$"\
# "\n"\
# r"$N_3 = %.0f \pm %.1g \, counts$"\
# "\n"\
# r"$\sigma_3 = %.1f \pm %.1g$"\
# "\n"\
# r"$\mu_3 = %.1f \pm %.1g$"\
# "\n"\
# r"$C = %.0f \pm %.1g \, counts$"\
# "\n"\
# r"$B = %.0f \pm %.1g \, counts/channel$"\
# "\n"\
# r"$\chi^2 = %f $"\
# "\n"\
# r"$\frac{\chi^2}{\nu} = %f $"\
# "\n"\
# r"$\nu = %d$"\
# % (popt3[0], np.sqrt(pcov3[0,0]), popt3[1], np.sqrt(pcov3[1,1]), popt3[2], np.sqrt(pcov3[2,2]),
# popt3[3], np.sqrt(pcov3[3,3]), popt3[4], np.sqrt(pcov3[4,4]), chisq3, chisq3/(len(sel_xdata_3) - len(p3)), len(sel_xdata_3) - len(p3)))
plt.text(823, 350, r"$\frac{N_1}{t} = %f \, counts/sec$"\
"\n"\
r"$\frac{N_2}{t} = %f \, counts/sec$"\
# "\n"\
#r"$\frac{N_3}{t} = %f \, counts/sec$"\
% (popt1[0]/time, popt2[0]/time))
#dist = dataset[2:dataset.find("m")]
#with open("countrates.tsv", "a+") as f:
# f.write(str(dist)+"\t"+str(popt1[0]/time)+"\t"+str(popt2[0]/time)+"\t"+str(popt3[0]/time)+"\t"+
# str(np.sqrt(pcov1[0,0])/popt1[0])+"\t"+str(np.sqrt(pcov2[0,0])/popt2[0])+"\t"\
# +str(np.sqrt(pcov3[0,0])/popt3[0])+"\n")
#f.close()
plt.xlabel("Channel")
plt.ylabel("Counts")
plt.title("PHA Spectrum of Na-22 Energy Decay ~ %s" % dataset[2:dataset.find('.')])
plt.legend(loc = 4)
plt.savefig("/users/aman/desktop/phys211/gamma cross sections/plots/%s.pdf" % dataset[0:dataset.find('.')])
if show:
plt.show()
if __name__ == '__main__':
os.chdir("/users/aman/desktop/phys211/gamma cross sections/data/Na_22/run_2")
#with open("countrates.tsv", "w+") as f:
# f.write("Thickness in mm"+"\t"+"Countrate N1"+"\t"+"Countrate N2"+"\t"+"Countrate N3"\
# +"\t"+"% Error in N1"+"\t"+"% Error in N2"+"\t"+"% Error in N3"+"\n")
#f.close()
#for i in range(0, 60):
try:
main("Na%dmmAl.tsv" % 8, show = True)
except IOError:
pass
|
bzcheeseman/phys211
|
gamma cross sections/gamma_cross_sections.py
|
Python
|
lgpl-3.0
| 6,230
|
[
"Gaussian"
] |
7c581f5ba5fc9c55f8076de6863dfde9e6dcd37f0b409ac925d88a1808d1e4fd
|
import time
import smtplib
import imaplib
import sys
import traceback
from splinter import Browser
#Gmail account to listen for panic mail on
mail_user = ""
mail_pass = ""
#Panic subject
panic_subject = "panic"
#panic phrase to listen for in body of email
"""Please Change this"""
panic_phrase = "panic"
#Array of accounts credentials.
##Soon to be generated automatically from an encrypted file
#Just hang in there for that functionality.
creds = {}
##Example creds entry
# creds['google'] = ['mygmail@gmail.com','secretgmailpassword']
# You can add multiple accounts to each service type by building a list
# within a list in the same format as above
# Example is
# creds['google'] = [['mygmail@gmail.com','pass'],['secondgmail@gmail.com','pass2']]
##
creds['google'] = None
creds['facebook'] = None
creds['twitter'] = None
#New password to set all accounts to
"""Please Change this"""
#Or don't, I'm not your boss
#Needs to be complex enough for Facebook to accept it.
new_password = "Emergency!"
#Number of seconds to wait between checks for panic in email
WAIT_SECONDS = 10
#Number of attempts to make
TRIES = 1
#Lag permit for page loads in seconds
INV=0.5
def poll_for(value, b, attempts=5):
btn = None
for i in range(attempts):
try:
btn = b.find_by_css(value).first
except:
btn = None
if btn is not None:
break
time.sleep(INV)
return btn
def poll_fill(name, value, b, attempts=5):
for i in range(attempts):
try:
b.fill(name, value)
return
except:
time.sleep(INV)
continue
class mail_listener:
user = ""
passwd = ""
panic = ""
def __init__(self, user, passwd, panic):
self.user = user
self.passwd = passwd
self.panic = panic
def read(self):
mail = imaplib.IMAP4_SSL('imap.gmail.com')
mail.login(self.user, self.passwd)
mail.list()
mail.select("inbox")
result, data = mail.search(None, "(SUBJECT \"%s\")" % panic_subject)
ids = data[0]
id_list = ids.split()
try:
latest_email_id = id_list[-1]
except:
return None
result, data = mail.fetch(latest_email_id, "(RFC822)")
raw_email = data[0][1]
if self.panic in raw_email:
return raw_email
return None
class google_account:
login = ""
panic = ""
user = ""
attempts = 0
max_attempts = 5
def __init__(self,user, login, panic):
self.user = user
self.login = login
self.panic = panic
def passwd(self):
if len(self.login) < 1 or len(self.panic) < 1 or len(self.user) < 1:
return false
b = Browser()
b.driver.set_window_size(900,900)
try:
b.visit("https://accounts.google.com/ServiceLogin?service=accountsettings")
b.fill('Email',self.user)
btn = b.find_by_id("next")
btn.click()
b.fill('Passwd',self.login)
btn = poll_for("#signIn", b)
btn.click()
b.visit("https://myaccount.google.com/security#signin")
btn = b.find_by_css(".vkq40d").first
if not btn == None:
print "not none"
btn.click()
poll_fill('Email',self.user, b)
btn = b.find_by_id("next")
btn.click()
poll_fill('Passwd',self.login, b)
btn = b.find_by_id("signIn")
btn.click()
time.sleep(INV)
btn = poll_for(".TCRTM", b)
btn.click()
poll_fill('Passwd',self.login, b)
btn = b.find_by_id("signIn")
btn.click()
p = poll_for(".Hj", b)
p.fill(self.panic)
p = b.find_by_css(".Hj")[1]
p.fill(self.panic)
btn = b.find_by_css(".Ya")
btn.click()
time.sleep(INV*5)
b.quit()
except:
traceback.print_exc(file=sys.stdout)
raw_input("Something went wrong...")
b.quit()
if self.attempts < self.max_attempts:
self.attempts += 1
self.passwd()
class facebook_account:
user = ""
login = ""
panic = ""
def __init__(self, user, login, panic):
self.user = user
self.login = login
self.panic = panic
def passwd(self):
b = Browser()
b.driver.set_window_size(900,900)
try:
b.visit("https://www.facebook.com")
b.fill("email",self.user)
b.fill("pass",self.login)
btn = b.find_by_value("Log In")
btn.click()
b.visit("https://www.facebook.com/settings")
btn = b.find_by_id("u_0_7")
btn.click()
b.fill("password_old", self.login)
b.fill("password_new", self.panic)
b.fill("password_confirm", self.panic)
btn = b.find_by_value("Save Changes")
btn.click()
b.quit()
except:
b.quit()
class twitter_account:
user = ""
login = ""
panic = ""
def __init__(self, user, login, panic):
self.user = user
self.login = login
self.panic = panic
def passwd(self):
b = Browser()
b.driver.set_window_size(900,900)
try:
b.visit("https://twitter.com")
btn = b.find_by_css(".js-login")
btn.click()
b.find_by_name("session[username_or_email]").fill(self.user)
b.find_by_name("session[password]").fill(self.login)
btn = b.find_by_value("Log in")
btn.click()
b.visit("https://twitter.com/settings/password")
b.fill("current_password", self.login)
b.fill("user_password", self.panic)
b.fill("user_password_confirmation", self.panic)
btn = b.find_by_text("Save changes")
btn.click()
b.quit()
except:
b.quit()
#Not Yet Implemented
class microsoft_account:
#Currently having some weird issues with this.
#Microsoft is weird about access to this app
user = ""
login = ""
panic = ""
def __init__(self, user, login, panic):
raise NotImplementedError("Microsoft Account, Module Not Yet Implemented")
self.user = user
self.login = login
self.panic = panic
def passwd(self):
if len(self.login) < 1 or len(self.panic) < 1 or len(self.user) < 1:
return false
b = Browser()
b.visit("https://login.live.com")
#e = b.find_by_id("idDiv_PWD_UsernameExample")
b.fill("loginfmt",self.user)
b.fill("passwd",self.login)
b.driver.set_window_size(900,900)
btn = b.find_by_value("Sign in")
btn.mouse_over()
btn.double_click()
b.visit("https://account.live.com/password/change?mkt=en-US")
b.quit()
def trigger():
if creds['twitter'] is not None:
if type(creds['twitter'][0]) == str:
t = twitter_account(creds['twitter'][0],creds['twitter'][1],new_password)
t.passwd()
elif type(creds['twitter'][0]) == list:
for acc in creds['twitter']:
t = twitter_account(acc[0],acc[1],new_password)
t.passwd()
if creds['facebook'] is not None:
if type(creds['facebook'][0]) == str:
f = facebook_account(creds['facebook'][0],creds['facebook'][1],new_password)
f.passwd()
elif type(creds['facebook'][0]) == list:
for acc in creds['facebook']:
f = facebook_account(acc[0],acc[1],new_password)
f.passwd()
if creds['google'] is not None:
if type(creds['google'][0]) == str:
g = google_account(creds['google'][0],creds['google'][1],new_password)
g.passwd()
elif type(creds['google'][0]) == list:
for acc in creds['google']:
g = google_account(acc[0],acc[1],new_password)
g.passwd()
if __name__ == "__main__":
while True:
l = mail_listener(mail_user, mail_pass, panic_phrase)
parse = l.read()
### Right here is where I plan to implement cryptographic storage for creds
##stay tuned. That will all be coming shortly. But I have to be up at six am.
# - Ben
if parse is not None:
trigger()
TRIES -= 1
if TRIES < 1:
sys.exit(0)
time.sleep(WAIT_SECONDS)
|
PrometheanInfoSec/Lockdown
|
lockdown.py
|
Python
|
gpl-3.0
| 7,704
|
[
"VisIt"
] |
f5ebe331b035c379a2dd65ddfb9b0da375d290225d8c53d239b31c7d5e325a70
|
from simphony.api import CUDS
from simphony.core import CUBA
from simphony.cuds.meta.api import Material
from simphony.cuds.particles import Particle, Particles
from .lammps_data_file_parser import LammpsDataFileParser
from .lammps_data_file_writer import LammpsDataFileWriter
from .lammps_data_line_interpreter import LammpsDataLineInterpreter
from .lammps_simple_data_handler import LammpsSimpleDataHandler
from ..common.atom_style import (AtomStyle, get_atom_style)
from ..common.atom_style_description import ATOM_STYLE_DESCRIPTIONS
from ..common.utils import create_material_to_atom_type_map
from ..config.domain import get_box
def read_data_file(filename, atom_style=None, name=None):
""" Reads LAMMPS data file and create CUDS objects
Reads LAMMPS data file and create a Particles and CUDS. The CUDS
will contain a material for each atom type (e.g. CUBA.MATERIAL_TYPE).
The attributes for each particle are based upon what atom-style
the file contains (i.e. "sphere" means that particles in addition to having
CUBA.VELOCITY will also have a CUBA.RADIUS and CUBA.MASS). See
'atom_style' for more details.
Parameters
----------
filename : str
filename of lammps data file
atom_style : AtomStyle, optional
type of atoms in the file. If None, then an attempt of
interpreting the atom-style in the file is performed.
name : str, optional
name to be given to returned Particles. If None, then filename is
used.
Returns
-------
particles : Particles
particles
SD : CUDS
SD containing materials
"""
handler = LammpsSimpleDataHandler()
parser = LammpsDataFileParser(handler=handler)
parser.parse(filename)
if atom_style is None:
atom_style = (
get_atom_style(handler.get_atom_type())
if handler.get_atom_type()
else AtomStyle.ATOMIC)
types = (atom_t for atom_t in
range(1, handler.get_number_atom_types() + 1))
atoms = handler.get_atoms()
velocities = handler.get_velocities()
masses = handler.get_masses()
box_origin = handler.get_box_origin()
box_vectors = handler.get_box_vectors()
type_to_material_map = {}
statedata = CUDS()
# set up a Material for each different type
for atom_type in types:
material = Material()
description = "Material for lammps atom type (originally '{}')".format(
atom_type
)
material.description = description
type_to_material_map[atom_type] = material.uid
statedata.add([material])
# add masses to materials
for atom_type, mass in masses.iteritems():
material = statedata.get(type_to_material_map[atom_type])
material.data[CUBA.MASS] = mass
statedata.update([material])
def convert_atom_type_to_material(atom_type):
return type_to_material_map[atom_type]
interpreter = LammpsDataLineInterpreter(atom_style,
convert_atom_type_to_material)
# create particles
particles = Particles(name=name if name else filename)
data = particles.data
data.update({CUBA.ORIGIN: box_origin,
CUBA.VECTOR: box_vectors})
particles.data = data
# add each particle
for lammps_id, values in atoms.iteritems():
coordinates, data = interpreter.convert_atom_values(values)
data.update(interpreter.convert_velocity_values(velocities[lammps_id]))
p = Particle(coordinates=coordinates, data=data)
particles.add([p])
return particles, statedata
def write_data_file(filename,
particles,
state_data,
atom_style=AtomStyle.ATOMIC):
""" Writes LAMMPS data file from CUDS objects
Writes LAMMPS data file from a list of Particles.
The particles will be annotated with their Simphony-uid. For example::
10 1 17 -1.0 10.0 5.0 6.0 # uid:'40fb302c-6e71-11e5-b35f-08606e7c2200' # noqa
Parameters
----------
filename : str
filename of lammps data file
particles : Particles or iterable of Particles
particles
state_data : CUDS
SD containing materials
atom_style : AtomStyle, optional
type of atoms to be written to file
Raises
"""
if type(particles) is not list:
particles = [particles]
num_particles = sum(
pc.count_of(CUBA.PARTICLE) for pc in particles)
# get a mapping from material_type to atom_type
material_to_atom_type = create_material_to_atom_type_map(state_data)
box = get_box([pc.data for pc in particles])
material_type_to_mass = None if not _style_has_masses(
atom_style) else _get_mass(state_data)
writer = LammpsDataFileWriter(filename,
atom_style=atom_style,
number_atoms=num_particles,
material_to_atom_type=material_to_atom_type,
simulation_box=box,
material_type_to_mass=material_type_to_mass)
for pc in particles:
for p in pc.iter(item_type=CUBA.PARTICLE):
writer.write_atom(p)
writer.close()
def _style_has_masses(atom_style):
""" Returns if atom style has masses
"""
return ATOM_STYLE_DESCRIPTIONS[atom_style].has_mass_per_type
def _get_mass(state_data):
""" Get a dictionary from 'material type' to 'mass'.
Parameters:
-----------
state_data : CUDS
SD containing material with mass
"""
material_type_to_mass = {}
for material in state_data.iter(item_type=CUBA.PARTICLE):
try:
material_type_to_mass[material.uid] = material.data[CUBA.MASS]
except KeyError:
raise RuntimeError(
"CUBA.MASS is missing from material '{}'".format(
material.uid))
return material_type_to_mass
|
simphony/simphony-lammps-md
|
simlammps/io/file_utility.py
|
Python
|
bsd-2-clause
| 6,019
|
[
"LAMMPS"
] |
6ff75dd758b37c36ea58ce8b8fe197d4e6a5ba217e681557a8d421635ed3ea84
|
"""PyVista-like ITKwidgets plotter."""
import numpy as np
import pyvista as pv
class PlotterITK():
"""ITKwidgets plotter.
Used for plotting interactively within a jupyter notebook.
Requires ``itkwidgets>=0.25.2``. For installation see:
https://github.com/InsightSoftwareConsortium/itkwidgets#installation
Examples
--------
>>> import pyvista
>>> mesh = pyvista.Sphere()
>>> pl = pyvista.PlotterITK() # doctest:+SKIP
>>> pl.add_mesh(mesh, color='w') # doctest:+SKIP
>>> pl.background_color = 'k' # doctest:+SKIP
>>> pl.show() # doctest:+SKIP
"""
def __init__(self, **kwargs):
"""Initialize the itkwidgets plotter."""
try:
import itkwidgets
except ImportError: # pragma: no cover
raise ImportError("Please install `itkwidgets>=0.25.2`")
from itkwidgets import __version__
from scooby import meets_version
if not meets_version(__version__, "0.25.2"): # pragma: no cover
raise ImportError("Please install `itkwidgets>=0.25.2`")
self._point_sets = []
self._geometries = []
self._geometry_colors = []
self._geometry_opacities = []
self._point_set_colors = []
self._background_color = None
self._camera_position = None
self._point_set_sizes = []
self._point_set_representations = []
def add_points(self, points, color=None, point_size=3.0):
"""Add points to plotter.
Parameters
----------
points : np.ndarray or pyvista.DataSet
n x 3 numpy array of points or pyvista dataset with points.
color : string or 3 item list, optional. Color of points (if visible).
Either a string, rgb list, or hex color string. For example:
``color='white'``
``color='w'``
``color=[1, 1, 1]``
``color='#FFFFFF'``
point_size : float, optional
Point size of any nodes in the dataset plotted. Also applicable
when style='points'. Default ``3.0``
Examples
--------
Add 10 random points to the plotter
>>> add_points(np.random.random((10, 3)), 'r', 10) # doctest:+SKIP
"""
if pv.is_pyvista_dataset(points):
point_array = points.points
else:
point_array = points
# style : str, optional
# How to represent the point set. One of ``'hidden'``,
# ``'points'``, or ``'spheres'``.
# if style not in ['hidden', 'points', 'spheres']:
# raise ValueError("``style`` must be either 'hidden', 'points', or"
# "'spheres'")
if not isinstance(point_size, (int, float)):
raise TypeError('``point_size`` parameter must be a float')
self._point_set_sizes.append(point_size)
self._point_set_colors.append(pv.parse_color(color))
self._point_sets.append(point_array)
# self._point_set_representations.append(style)
def add_mesh(self, mesh, color=None, scalars=None,
opacity=1.0, smooth_shading=False):
"""Add a PyVista/VTK mesh or dataset.
Adds any PyVista/VTK mesh that itkwidgets can wrap to the
scene.
Parameters
----------
mesh : pyvista.DataSet or pyvista.MultiBlock
Any PyVista or VTK mesh is supported. Also, any dataset
that :func:`pyvista.wrap` can handle including NumPy arrays of XYZ
points.
color : string or 3 item list, optional, defaults to white
Use to make the entire mesh have a single solid color.
Either a string, RGB list, or hex color string. For example:
``color='white'``, ``color='w'``, ``color=[1, 1, 1]``, or
``color='#FFFFFF'``. Color will be overridden if scalars are
specified.
scalars : str or numpy.ndarray, optional
Scalars used to "color" the mesh. Accepts a string name of an
array that is present on the mesh or an array equal
to the number of cells or the number of points in the
mesh. Array should be sized as a single vector. If both
``color`` and ``scalars`` are ``None``, then the active scalars are
used.
opacity : float, optional
Opacity of the mesh. If a single float value is given, it will be
the global opacity of the mesh and uniformly applied everywhere -
should be between 0 and 1. Default 1.0
smooth_shading : bool, optional
Smooth mesh surface mesh by taking into account surface
normals. Surface will appear smoother while sharp edges
will still look sharp. Default False.
"""
if not pv.is_pyvista_dataset(mesh):
mesh = pv.wrap(mesh)
# smooth shading requires point normals to be freshly computed
if smooth_shading:
# extract surface if mesh is exterior
if not isinstance(mesh, pv.PolyData):
grid = mesh
mesh = grid.extract_surface()
ind = mesh.point_arrays['vtkOriginalPointIds']
# remap scalars
if isinstance(scalars, np.ndarray):
scalars = scalars[ind]
mesh.compute_normals(cell_normals=False, inplace=True)
elif 'Normals' in mesh.point_arrays:
# if 'normals' in mesh.point_arrays:
mesh.point_arrays.pop('Normals')
# make the scalars active
if isinstance(scalars, str):
if scalars in mesh.point_arrays or scalars in mesh.cell_arrays:
array = mesh[scalars].copy()
else:
raise ValueError(f'Scalars ({scalars}) not in mesh')
mesh[scalars] = array
mesh.active_scalars_name = scalars
elif isinstance(scalars, np.ndarray):
array = scalars
scalar_name = '_scalars'
mesh[scalar_name] = array
mesh.active_scalars_name = scalar_name
elif color is not None:
mesh.active_scalars_name = None
# itkwidgets does not support VTK_ID_TYPE
if 'vtkOriginalPointIds' in mesh.point_arrays:
mesh.point_arrays.pop('vtkOriginalPointIds')
if 'vtkOriginalCellIds' in mesh.cell_arrays:
mesh.cell_arrays.pop('vtkOriginalCellIds')
from itkwidgets._transform_types import to_geometry
mesh = to_geometry(mesh)
self._geometries.append(mesh)
self._geometry_colors.append(pv.parse_color(color))
self._geometry_opacities.append(opacity)
@property
def background_color(self):
"""Return the background color of the plotter."""
return self._background_color
@background_color.setter
def background_color(self, color):
"""Set the background color of the plotter.
Examples
--------
Set the background color to black
>>> plotter.background_color = 'k'
"""
self._background_color = pv.parse_color(color)
@property
def camera_position(self):
"""Return camera position of the plotter as a list."""
if self._camera_position is not None:
return self._camera_position
@camera_position.setter
def camera_position(self, camera_location):
"""Set camera position of the plotter."""
if isinstance(camera_location, str):
raise ValueError('String camera positions are not supported in PlotterITK')
else:
# check if a valid camera position
if not len(camera_location) == 3:
raise pv.core.errors.InvalidCameraError
elif any([len(item) != 3 for item in camera_location]):
raise pv.core.errors.InvalidCameraError
self._camera_position = camera_location
def show(self, ui_collapsed=True, rotate=False, show_bounds=False,
**kwargs):
"""Show itkwidgets plotter in cell output.
Parameters
----------
ui_collapsed : bool, optional
Plot with the user interface collapsed. UI can be enabled
when plotting. Default ``False``.
rotate : bool, optional
Rotate the camera around the scene. Default ``False``.
Appears to be computationally intensive.
show_bounds : bool, optional
Show the bounding box. Default False
point_size : int, optional
Size of the points displayed in the
Returns
--------
viewer : itkwidgets.Viewer
``ITKwidgets`` viewer.
"""
if self._background_color is not None:
kwargs.setdefault('background', self._background_color)
if self._camera_position is not None:
kwargs.setdefault('camera', self._camera_position)
from itkwidgets import Viewer
viewer = Viewer(geometries=self._geometries,
geometry_colors=self._geometry_colors,
geometry_opacities=self._geometry_opacities,
point_set_colors=self._point_set_colors,
point_sets=self._point_sets,
point_set_sizes=self._point_set_sizes,
point_set_representations=self._point_set_representations,
ui_collapsed=ui_collapsed,
rotate=rotate,
axes=show_bounds,
**kwargs)
# always show if iPython is installed
try:
from IPython import display
display.display_html(viewer)
except ImportError: # pragma: no cover
pass
return viewer
|
akaszynski/vtkInterface
|
pyvista/jupyter/itkplotter.py
|
Python
|
mit
| 9,875
|
[
"VTK"
] |
8c8bfde62b7d0edc335dcc5ba4cc31473aff7ad0a3116dceb173cf8cac30483c
|
#Author:Chaitanya CH
#FileName: updatepaintGL.py
#This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
import sys
from PyQt4 import QtCore, QtGui
from PyGLWidget import PyGLWidget
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from oglfunc.objects import *
from oglfunc.group import *
from numpy import arange,digitize
import moose
import pickle
mc=moose.context
class updatepaintGL(PyGLWidget):
def paintGL(self):
PyGLWidget.paintGL(self)
self.render()
def setSelectionMode(self,mode):
self.selectionMode = mode
def render(self):
if (self.defaultPosVal==0):
self.translate([0.5,-0.5,-30.0])
self.rotate([1.0, 0.0, 0.0],-7.0)
self.rotate([0.0, 1.0, 0.0],-7.0)
self.xpan = 0.5
self.ypan = -0.5
self.zpan = -30.0
self.defaultPosVal = 1
if self.lights:
glMatrixMode(GL_MODELVIEW)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_COLOR_MATERIAL)
light0_pos = 200.0, 200.0, 300.0, 0
diffuse0 = 1.0, 1.0, 1.0, 1.0
specular0 = 1.0, 1.0, 1.0, 1.0
ambient0 = 0, 0, 0, 1
glMatrixMode(GL_MODELVIEW)
glLightfv(GL_LIGHT0, GL_POSITION, light0_pos)
glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuse0)
glLightfv(GL_LIGHT0, GL_SPECULAR, specular0)
glLightfv(GL_LIGHT0, GL_AMBIENT, ambient0)
self.renderAxis() #draws 3 axes at origin
for obj in self.sceneObjects:
obj.render()
for obj in self.vizObjects:
obj.render()
self.selectedObjects.render()
def updateViz(self):
if self.gridRadiusViz==0:
vals=[]
for name in self.vizObjectNames:
r=mc.pathToId(name+self.moosepath)
d=float(mc.getField(r,self.variable))
vals.append(d)
inds = digitize(vals,self.stepVals)
for i in range(0,len(self.vizObjects)):
self.vizObjects[i].r,self.vizObjects[i].g,self.vizObjects[i].b=self.colorMap[inds[i]-1]
else:
vals=[]
vals_2=[]
for name in self.vizObjectNames:
r=mc.pathToId(name+self.moosepath)
d=float(mc.getField(r,self.variable))
r2=mc.pathToId(name+self.moosepath_2)
d2=float(mc.getField(r2,self.variable_2))
vals.append(d)
vals_2.append(d2)
inds = digitize(vals,self.stepVals)
inds_2 = digitize(vals_2,self.stepVals_2)
for i in range(0,len(self.vizObjects)):
self.vizObjects[i].r,self.vizObjects[i].g,self.vizObjects[i].b=self.colorMap[inds[i]-1]
self.vizObjects[i].radius=self.indRadius[inds_2[i]-1]
self.updateGL()
def setSpecificCompartmentName(self,name):
self.specificCompartmentName = name
def drawNewCell(self, cellName, style = 2,cellCentre=[0.0,0.0,0.0],cellAngle=[0.0,0.0,0.0,0.0]):
#***cellName = moosepath in the GL canvas***
an=moose.Neutral(cellName)
all_ch=an.childList #all children
ch = self.get_childrenOfField(all_ch,'Compartment') #compartments only
l_coords = []
for i in range(0,len(ch),1):
x=float(mc.getField(ch[i],'x'))*(1e+04)
y=float(mc.getField(ch[i],'y'))*(1e+04)
z=float(mc.getField(ch[i],'z'))*(1e+04)
x0=float(mc.getField(ch[i],'x0'))*(1e+04)
y0=float(mc.getField(ch[i],'y0'))*(1e+04)
z0=float(mc.getField(ch[i],'z0'))*(1e+04)
d=float(mc.getField(ch[i],'diameter'))*(1e+04)
l_coords.append((x0,y0,z0,x,y,z,d,ch[i].path()))
if self.viz==1: #fix
self.selectionMode=0
if (style==1) or (style==2): #ensures soma is drawn as a sphere
self.specificCompartmentName='soma'
if (self.selectionMode): #self.selectionMode = 1,cells are pickable
newCell = cellStruct(self,l_coords,cellName,style,specificCompartmentName=self.specificCompartmentName)
newCell._centralPos = cellCentre
newCell.rotation = cellAngle
self.sceneObjectNames.append(cellName)
self.sceneObjects.append(newCell)
if self.viz==1: #fix
self.vizObjects.append(newCell)
self.vizObjectNames.append(cellName)
else: #self.selectionMode=0,comapartments are pickable
for i in range(0,len(l_coords),1):
if (moose.Compartment(ch[i]).name==self.specificCompartmentName):#drawing of the select compartment in style 0
if style==0:
compartmentLine=somaDisk(self,l_coords[i],cellName)
compartmentLine._centralPos = cellCentre
compartmentLine.rotation = cellAngle
self.sceneObjectNames.append(l_coords[i][7])
self.sceneObjects.append(compartmentLine)
if self.viz==1:
self.vizObjects.append(compartmentLine)
self.vizObjectNames.append(l_coords[i][7])
elif (style==1) or (style==2): #drawing of the soma in style 1&2
compartmentLine = somaSphere(self,l_coords[i],cellName) #$
elif style==3: #grid view, any choice to compartment as a disk
compartmentLine=somaDisk(self,[0,0,0,0,0,0,0,l_coords[i][7]],cellName)
compartmentLine.radius = 0.20
compartmentLine._centralPos = cellCentre
compartmentLine.rotation = cellAngle
self.sceneObjectNames.append(l_coords[i][7])
self.sceneObjects.append(compartmentLine)
if self.viz==1:
self.vizObjects.append(compartmentLine)
self.vizObjectNames.append(l_coords[i][7])
else: #to draw compartments other than soma
if style==1:
compartmentLine=cLine(self,l_coords[i],cellName) #$
elif style==2:
compartmentLine=cCylinder(self,l_coords[i],cellName) #$
if (style==1)or(style==2): #necessary, appends includ the soma as well (= include code in "$" areas)
compartmentLine._centralPos = cellCentre
compartmentLine.rotation = cellAngle
self.sceneObjectNames.append(l_coords[i][7])
self.sceneObjects.append(compartmentLine)
if self.viz==1:
self.vizObjects.append(compartmentLine)
self.vizObjectNames.append(l_coords[i][7])
def drawAllCells(self, style = 2, cellCentre=[0.0,0.0,0.0], cellAngle=[0.0,0.0,0.0,0.0]):
an=moose.Neutral('/') #moose root children
all_ch=an.childList
#all children under root, of cell type
ch = self.get_childrenOfField(all_ch,'Cell')
for i in range(0,len(ch),1):
self.drawNewCell(moose.Cell(ch[i]).path,style,cellCentre,cellAngle)
nh = self.get_childrenOfField(all_ch,'Neutral') #all cells under all other neutral elements.
for j in range(0,len(nh),1):
an=moose.Neutral(nh[j]) #this neutral element
all_ch=an.childList #all children under this neutral element
ch = self.get_childrenOfField(all_ch,'Cell')
for i in range(0,len(ch),1):
self.drawNewCell(moose.Cell(ch[i]).path,style,cellCentre,cellAngle)
def drawAllCellsUnder(self, path, style = 2, cellCentre=[0.0,0.0,0.0], cellAngle=[0.0,0.0,0.0,0.0]):
pathID = mc.pathToId(path)
if mc.className(pathID) =='Neutral':
an=moose.Neutral(pathID) #this neutral element
all_ch=an.childList #all children under this neutral element
ch = self.get_childrenOfField(all_ch,'Cell')
for i in range(0,len(ch),1):
self.drawNewCell(moose.Cell(ch[i]).path,style,cellCentre,cellAngle)
else:
print 'Select a Neutral element path'
def get_childrenOfField(self,all_ch,field): #'all_ch' is a tuple of moose.id, 'field' is the field to sort with; returns a tuple with valid moose.id's
ch=[]
for i in range(0,len(all_ch)):
if(mc.className(all_ch[i])==field):
ch.append(all_ch[i])
return tuple(ch)
def setColorMap(self,vizMinVal=-0.1,vizMaxVal=0.07,moosepath='',variable='Vm',cMap='jet'):
self.colorMap=[]
self.stepVals=[]
self.moosepath=moosepath
self.variable=variable
if cMap=='':
steps = 64
for x in range(0,steps):
r=max((2.0*x)/steps-1,0.0)
b=max((-2.0*x)/steps+1,0.0)
g=min((2.0*x)/steps,(-2.0*x)/steps+2)
self.colorMap.append([r,g,b])
else:
f = open(cMap,'r')
self.colorMap = pickle.load(f)
steps = len(self.colorMap)
f.close()
self.stepVals = arange(vizMinVal,vizMaxVal,(vizMaxVal-vizMinVal)/steps)
def setColorMap_2(self,vizMinVal_2=-0.1,vizMaxVal_2=0.07,moosepath_2='',variable_2='Vm'): #colormap for the radius - grid view case
self.moosepath_2 = moosepath_2
self.variable_2 = variable_2
self.stepVals_2 = arange(vizMinVal_2,vizMaxVal_2,(vizMaxVal_2-vizMinVal_2)/30) #assigned a default of 30 steps
self.indRadius = arange(0.05,0.20,0.005) #radius equivalent colormap
class newGLWindow(QtGui.QMainWindow):
def __init__(self, parent = None):
# initialization of the superclass
super(newGLWindow, self).__init__(parent)
# setup the GUI --> function generated by pyuic4
self.name = 'GL Window '
#self.setupUi(self)
def windowTitle(self,name):
self.name = name
#MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", self.name, None, QtGui.QApplication.UnicodeUTF8))
self.setupUi(self)
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setWindowModality(QtCore.Qt.NonModal)
MainWindow.resize(500, 500)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
self.centralwidget = QtGui.QWidget(MainWindow)
self.horizontalLayout = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.mgl = updatepaintGL(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.mgl.sizePolicy().hasHeightForWidth())
self.mgl.setSizePolicy(sizePolicy)
self.mgl.setObjectName("mgl")
self.horizontalLayout.addWidget(self.mgl)
MainWindow.setCentralWidget(self.centralwidget)
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", self.name, None, QtGui.QApplication.UnicodeUTF8))
class newGLSubWindow(QtGui.QMdiSubWindow):
"""This is to customize MDI sub window for our purpose.
In particular, we don't want anything to be deleted when the window is closed.
"""
def __init__(self, *args):
QtGui.QMdiSubWindow.__init__(self, *args)
def closeEvent(self, event):
self.emit(QtCore.SIGNAL('subWindowClosed()'))
self.hide()
|
BhallaLab/moose-thalamocortical
|
pymoose/gui/qt/updatepaintGL.py
|
Python
|
lgpl-2.1
| 11,475
|
[
"MOOSE"
] |
7ce84a8cf63f9f5e380259906b80abda2c5bcd98733fda060d0cc6ba419e45fd
|
from simphony.core import CUBA
def create_material_to_atom_type_map(state_data):
""" Creates a map from material type ui to atom type
create a mapping from material-uids to atom_type based
on the materials given in SD. which goes from 1 to N in lammps
Parameters:
-----------
state_data : StateData
state data with information on materials
"""
material_to_atom = {}
number_atom_types = 1
for material in state_data.iter(item_type=CUBA.MATERIAL):
material_to_atom[material.uid] = number_atom_types
number_atom_types += 1
return material_to_atom
|
simphony/simphony-lammps-md
|
simlammps/common/utils.py
|
Python
|
bsd-2-clause
| 618
|
[
"LAMMPS"
] |
caa24730f106cee5540f59489c800ed8f80aaf9351fd358c4f7e0b28637f9ace
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GeoRecord.geo_id_segments'
#db.add_column(u'profiles_georecord', 'geo_id_segments', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
# Adding field 'FlatValue.geography_geo_key'
db.add_column(u'profiles_flatvalue', 'geography_geo_key', self.gf('django.db.models.fields.CharField')(default=0, max_length='255', db_index=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'GeoRecord.geo_id_segments'
#db.delete_column(u'profiles_georecord', 'geo_id_segments')
# Deleting field 'FlatValue.geography_geo_key'
db.delete_column(u'profiles_flatvalue', 'geography_geo_key')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 12, 20, 11, 16, 23, 222847)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 12, 20, 11, 16, 23, 222344)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'maps.shapefile': {
'Meta': {'object_name': 'ShapeFile'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'geo_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geo_meta_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'geom_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'shape_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'zoom_threshold': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
u'profiles.customvalue': {
'Meta': {'object_name': 'CustomValue'},
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_value': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'supress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_operator': ('django.db.models.fields.CharField', [], {'max_length': "'255'"})
},
u'profiles.datadomain': {
'Meta': {'ordering': "['weight']", 'object_name': 'DataDomain'},
'group': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Group']", 'through': u"orm['profiles.DataDomainIndex']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Indicator']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'order': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'subdomain_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subdomains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'symmetrical': 'False', 'blank': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.datadomainindex': {
'Meta': {'ordering': "['order']", 'object_name': 'DataDomainIndex'},
'dataDomain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataDomain']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'profiles.datapoint': {
'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'DataPoint'},
'change_from_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_from'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
'change_to_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_to'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']", 'null': 'True'})
},
u'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'profiles.denominator': {
'Meta': {'object_name': 'Denominator'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'multiplier': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sort': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.denominatorpart': {
'Meta': {'object_name': 'DenominatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.IndicatorPart']"})
},
u'profiles.flatvalue': {
'Meta': {'object_name': 'FlatValue'},
'display_title': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'f_moe': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_number': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_percent': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'geography': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'geography_geo_key': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'geography_name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'geography_slug': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'geometry_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'indicator_slug': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'time_key': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'value_type': ('django.db.models.fields.CharField', [], {'max_length': "'100'"})
},
u'profiles.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataSource']", 'symmetrical': 'False', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'shapefile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maps.ShapeFile']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'}),
'summary_level': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'profiles.georecord': {
'Meta': {'unique_together': "(('slug', 'level'), ('level', 'geo_id', 'custom_name', 'owner'))", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geo_id_segments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geo_searchable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']"}),
'mappings': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'blank': 'True'})
},
u'profiles.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Indicator']", 'through': u"orm['profiles.GroupIndex']", 'symmetrical': 'False'}),
'order': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
u'profiles.groupindex': {
'Meta': {'ordering': "['order']", 'object_name': 'GroupIndex'},
'groups': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'groups'", 'to': u"orm['profiles.Indicator']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'profiles.indicator': {
'Meta': {'object_name': 'Indicator'},
'data_domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_distribution': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display_percent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_generated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'limitations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'routine_use': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'default': "'U.S. Census Bureau'", 'max_length': '300', 'blank': 'True'}),
'universe': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
u'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataDomain']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.indicatorpart': {
'Meta': {'object_name': 'IndicatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']"})
},
u'profiles.legendoption': {
'Meta': {'object_name': 'LegendOption'},
'bin_options': ('django.db.models.fields.TextField', [], {'default': "''"}),
'bin_type': ('django.db.models.fields.CharField', [], {'default': "'jenks'", 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.precalculatedvalue': {
'Meta': {'object_name': 'PrecalculatedValue'},
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'geo_record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'table': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'profiles.taskstatus': {
'Meta': {'object_name': 'TaskStatus'},
'error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
't_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'task': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'profiles.time': {
'Meta': {'object_name': 'Time'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
},
u'profiles.value': {
'Meta': {'object_name': 'Value'},
'datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataPoint']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
}
}
complete_apps = ['profiles']
|
216software/Profiles
|
communityprofiles/profiles/oldmigrations/0070_auto__add_field_georecord_geo_id_segments__add_field_flatvalue_geograp.py
|
Python
|
mit
| 23,849
|
[
"MOE"
] |
5e21a80105a0985a3d4a5cde3c318e629704643a9017ae95a28c4bb6b2215ed6
|
import unittest as ut
import tests_common
import os
import numpy as np
import espressomd
from espressomd import lb
from espressomd import integrate
if "ENGINE" in espressomd.features() and "LB" in espressomd.features():
class SwimmerTest(ut.TestCase):
def test(self):
# Set to true if you need a new
# comparison configuration
new_configuration = False
boxl = 12
sampsteps = 2000
tstep = 0.01
temp = 0.0
S = espressomd.System()
if (S.n_nodes > 1):
print("NOTE: Ignoring testcase for n_nodes > 1")
return
S.box_l = [boxl, boxl, boxl]
S.skin = 0.1
S.time_step = tstep
S.part.add(id=0, pos=[6.0,3.0,2.0],
swimming={"mode": "pusher", "v_swim": 0.10, "dipole_length": 1.0, "rotational_friction": 2.0},
quat=[np.sqrt(.5),np.sqrt(.5), 0, 0])
S.part.add(id=1, pos=[2.0,3.0,6.0],
swimming={"mode": "pusher", "f_swim": 0.03, "dipole_length": 2.0, "rotational_friction": 20.0},
quat=[np.sqrt(.5), 0,np.sqrt(.5), 0])
S.part.add(id=2, pos=[3.0,2.0,6.0],
swimming={"mode": "puller", "v_swim": 0.15, "dipole_length": 0.5, "rotational_friction": 15.0},
quat=[np.sqrt(.5), 0, 0,np.sqrt(.5)])
S.part.add(id=3, pos=[3.0,6.0,2.0],
swimming={"mode": "puller", "f_swim": 0.05, "dipole_length": 1.5, "rotational_friction": 6.0},
quat=[ 0, 0,np.sqrt(.5),np.sqrt(.5)])
lbm = lb.LBFluid(agrid=1.0, tau=tstep, fric=0.5, visc=1.0, dens=1.0)
S.actors.add(lbm)
#thermostat lb $temp
integrate.integrate(sampsteps)
if new_configuration:
lbm.print_vtk_velocity("engine_lb.vtk")
self.assertTrue( True )
else:
lbm.print_vtk_velocity("engine_lb_tmp.vtk")
different, difference = tests_common.calculate_vtk_max_pointwise_difference("engine_lb.vtk", "engine_lb_tmp.vtk",tol=2.0e-7)
os.remove("engine_lb_tmp.vtk")
print("Maximum deviation to the reference point is: {}".format(difference))
self.assertTrue( different )
if __name__ == '__main__':
print("Features: ", espressomd.features())
ut.main()
|
tbereau/espresso
|
testsuite/python/engine_lb.py
|
Python
|
gpl-3.0
| 2,570
|
[
"VTK"
] |
38e3a9d97c27510f207f5838ab62d7d09ff636f2b0f6d11e0fbd85ddc53cd22d
|
#
# This file is a part of the normalize python library
#
# normalize is free software: you can redistribute it and/or modify
# it under the terms of the MIT License.
#
# normalize is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
#
# You should have received a copy of the MIT license along with
# normalize. If not, refer to the upstream repository at
# http://github.com/hearsaycorp/normalize
#
from __future__ import absolute_import
from datetime import datetime
import json
from time import time
import types
import unittest
import normalize.exc as exc
from normalize.coll import list_of
from normalize.record import Record
from normalize.visitor import VisitorPattern
from testclasses import acent
from testclasses import acent_attributes
from testclasses import JsonStarList
from testclasses import maia
from testclasses import NamedStarList
from testclasses import PullRequest
from testclasses import StarList
from testclasses import StarSystem
from testclasses import Wall
from testclasses import wall_one
JSON_CAN_DUMP = (basestring, int, float, long, dict, list, types.NoneType)
class SimpleDumper(VisitorPattern):
@classmethod
def apply(self, value, *args):
if isinstance(value, JSON_CAN_DUMP):
dumpable = value
elif isinstance(value, datetime):
dumpable = value.isoformat()
else:
raise Exception("Can't dump %r" % value)
return dumpable
class AssertDiffTest(unittest.TestCase):
def assertDiffs(self, a, b, expected, **kwargs):
differences = set(str(x) for x in a.diff(b, **kwargs))
self.assertEqual(
differences,
set("<DiffInfo: %s>" % x for x in expected)
)
class TestVisitor(AssertDiffTest):
def setUp(self):
self.acent_json_data = {
'name': 'Alpha Centauri',
'components': [{'hip_id': 71683, 'name': 'Alpha Centauri A'},
{'hip_id': 71681, 'name': 'Alpha Centauri B'},
{'hip_id': 70890, 'name': 'Alpha Centauri C'}],
'attributes': acent_attributes,
}
self.nsl_json_data = {
'name': 'Alpha Centauri',
'values': self.acent_json_data['components']
}
def test_simple_dumper(self):
dumpable = SimpleDumper.visit(wall_one)
self.assertIsInstance(dumpable['posts'][0], dict)
self.assertEqual(dumpable['posts'][0]['edited'], "2001-09-09T01:46:40")
json.dumps(dumpable) # assert doesn't throw
wall_roundtripped = SimpleDumper.cast(Wall, dumpable)
self.assertDiffs(wall_one, wall_roundtripped, {})
self.assertDiffs(wall_one, Wall(dumpable), {})
def test_intro_example_dump(self):
dumped = SimpleDumper.visit(acent)
self.assertEqual(dumped, self.acent_json_data)
def test_intro_example_cast(self):
self.assertDiffs(acent, StarSystem(self.acent_json_data), {})
self.assertDiffs(
acent, SimpleDumper.cast(StarSystem, self.acent_json_data),
{},
)
def test_complex_dump(self):
nsl = NamedStarList(acent.components)
nsl.name = "Alpha Centauri"
dumped = SimpleDumper.visit(nsl)
self.assertEqual(dumped, self.nsl_json_data)
def test_complex_dump2(self):
dumped = SimpleDumper.visit(maia)
maia2 = SimpleDumper.cast(type(maia), dumped)
self.assertEqual(maia.diff(maia2), [])
self.assertEqual(maia2.coordinates['ICRS'][2], "49.60656")
self.assertEqual(maia2.designations['HR'], "1149")
def test_complex_cast(self):
nsl = NamedStarList(**(self.nsl_json_data))
self.assertDiffs(
nsl, SimpleDumper.cast(NamedStarList, self.nsl_json_data),
{},
)
def test_dump_types(self):
typeinfo = SimpleDumper.reflect(NamedStarList)
self.assertEqual(
typeinfo['itemtype']['properties']['hip_id']['type'],
'int',
)
typeinfo = SimpleDumper.reflect(Wall)
self.assertEqual(typeinfo['properties']['owner']['name'], 'Person')
self.assertEqual(
typeinfo['properties']['owner']['properties']['interests']['type'],
'list',
)
def test_json_dump(self):
plain_list = StarList(self.acent_json_data['components'])
json_list = JsonStarList(self.acent_json_data['components'])
plain_dumped = SimpleDumper.visit(plain_list)
json_dumped = SimpleDumper.visit(json_list)
self.assertEqual(plain_dumped, json_dumped)
def test_cast_garbage(self):
for garbage in (
"green cheese", [], (),
{'values': {"foo": "bar"}},
self.acent_json_data['components'],
):
with self.assertRaises(exc.VisitorGrokRecordError):
SimpleDumper.cast(NamedStarList, garbage)
def test_cast_complex_filtered(self):
# this works because the properties are filtered out; normally this
# filtering would be due to 'extraneous' property settings.
# MultiFieldSelector doesn't currently distinguish between 'None' =>
# all items in collection vs 'None' => all, so use a filter which
# mentions each of the items in the set.
nsl = SimpleDumper.cast(
NamedStarList,
self.acent_json_data['components'],
visit_filter=tuple([x, 'hip_id'] for x in range(0, 3)),
)
self.assertEqual(len(nsl), 3)
def test_visit_complex_filtered(self):
nsl = NamedStarList(**(self.nsl_json_data))
visited = SimpleDumper.visit(
nsl, filter=tuple([x, 'hip_id'] for x in range(0, 3)),
)
self.assertEqual(
visited, list(
{'hip_id': x['hip_id']} for x in
self.acent_json_data['components']
),
)
class TestTypeUnionCases(AssertDiffTest):
def setUp(self):
self.open_pr = PullRequest(number=123, merged_at=None)
self.closed_pr = PullRequest(
number=456,
merged_at=datetime.fromtimestamp(time() - 20 * 86400),
)
def test_type_union_dump(self):
dumped = SimpleDumper.visit(self.open_pr, ignore_none=False)
self.assertIn("created_at", dumped)
self.assertRegexpMatches(
dumped['created_at'], r'^\d{4}-\d{2}-\d{2}T.*',
)
self.assertEqual(dumped['merged_at'], None)
dumped = SimpleDumper.visit(self.closed_pr)
self.assertRegexpMatches(
dumped['created_at'], r'^\d{4}-\d{2}-\d{2}T.*',
)
self.assertIn("created_at", dumped)
self.assertIn('merged_at', dumped)
def test_type_union_load(self):
pr_dict = {
"number": "5125",
"created_at": "2014-07-23T12:34:56Z",
"merged_at": None,
}
my_pr = PullRequest(pr_dict)
pr_2 = SimpleDumper.cast(PullRequest, pr_dict, ignore_none=False)
self.assertDiffs(my_pr, pr_2, {})
def test_type_union_typeinfo(self):
schema = SimpleDumper.reflect(PullRequest)
self.assertEqual(schema['properties']['merged_at']['type'],
["datetime", "NoneType"])
def test_cast_collection(self):
RecordList = list_of(Record)
casted = VisitorPattern.cast(RecordList, [{}, {}])
self.assertIsInstance(casted[0], Record)
self.assertIsInstance(casted, RecordList)
empty_casted = VisitorPattern.cast(RecordList, [])
self.assertIsInstance(empty_casted, RecordList)
|
samv/normalize
|
tests/test_visitor.py
|
Python
|
mit
| 7,772
|
[
"VisIt"
] |
654adf9958af3ead55209e2c9b6163a6f2a68e8a0dfcbcd24ed7ef08e9a6d537
|
import pandas as pd
import time
import random
import numpy as np
from datetime import timedelta
from datetime import datetime
import MAUC
import argparse
parser = argparse.ArgumentParser(usage='python3 evalOneSubmission.py',
description=r'''
TADPOLE Evaluation Script:
The program computes the following matrics:
Clinical diagnosis prediction:
1. Multiclass area under the receiver operating curve (mAUC)
2. Balanced classification accuracy (BCA)
Continuous feature predictions:
3. Mean Absolute Error (MAE)
4. Coverage Probability Accuracy (CPA)
5. Weighted Error Score (WES)
Author: Razvan V. Marinescu, razvan.marinescu.14@ucl.ac.uk
''')
def calcBCA(estimLabels, trueLabels, nrClasses):
# Balanced Classification Accuracy
bcaAll = []
for c0 in range(nrClasses):
for c1 in range(c0+1,nrClasses):
# c0 = positive class & c1 = negative class
TP = np.sum((estimLabels == c0) & (trueLabels == c0))
TN = np.sum((estimLabels == c1) & (trueLabels == c1))
FP = np.sum((estimLabels == c1) & (trueLabels == c0))
FN = np.sum((estimLabels == c0) & (trueLabels == c1))
# sometimes the sensitivity of specificity can be NaN, if the user doesn't forecast one of the classes.
# In this case we assume a default value for sensitivity/specificity
if (TP+FN) == 0:
sensitivity = 0.5
else:
sensitivity = TP/(TP+FN)
if (TN+FP) == 0:
specificity = 0.5
else:
specificity = TN/(TN+FP)
bcaCurr = 0.5*(sensitivity+specificity)
bcaAll += [bcaCurr]
# print('bcaCurr %f TP %f TN %f FP %f FN %f' % (bcaCurr, TP, TN, FP, FN))
return np.mean(bcaAll)
def parseData(d4Df, forecastDf, diagLabels):
trueDiag = d4Df['Diagnosis']
trueADAS = d4Df['ADAS13']
trueVents = d4Df['Ventricles']
nrSubj = d4Df.shape[0]
zipTrueLabelAndProbs = []
hardEstimClass = -1 * np.ones(nrSubj, int)
adasEstim = -1 * np.ones(nrSubj, float)
adasEstimLo = -1 * np.ones(nrSubj, float) # lower margin
adasEstimUp = -1 * np.ones(nrSubj, float) # upper margin
ventriclesEstim = -1 * np.ones(nrSubj, float)
ventriclesEstimLo = -1 * np.ones(nrSubj, float) # lower margin
ventriclesEstimUp = -1 * np.ones(nrSubj, float) # upper margin
# print('subDf.keys()', forecastDf['Forecast Date'])
invalidResultReturn = (None,None,None,None,None,None,None,None,None,None,None)
invalidFlag = False
# for each subject in D4 match the closest user forecasts
for s in range(nrSubj):
currSubjMask = d4Df['RID'].iloc[s] == forecastDf['RID']
currSubjData = forecastDf[currSubjMask]
# if subject is missing
if currSubjData.shape[0] == 0:
print('WARNING: Subject RID %s missing from user forecasts' % d4Df['RID'].iloc[s])
invalidFlag = True
continue
# if not all forecast months are present
if currSubjData.shape[0] < 5*12: # check if at least 5 years worth of forecasts exist
print('WARNING: Missing forecast months for subject with RID %s' % d4Df['RID'].iloc[s])
invalidFlag = True
continue
currSubjData = currSubjData.reset_index(drop=True)
timeDiffsScanCog = [d4Df['CognitiveAssessmentDate'].iloc[s] - d for d in currSubjData['Forecast Date']]
# print('Forecast Date 2',currSubjData['Forecast Date'])
indexMin = np.argsort(np.abs(timeDiffsScanCog))[0]
# print('timeDiffsScanMri', indexMin, timeDiffsScanMri)
pCN = currSubjData['CN relative probability'].iloc[indexMin]
pMCI = currSubjData['MCI relative probability'].iloc[indexMin]
pAD = currSubjData['AD relative probability'].iloc[indexMin]
# normalise the relative probabilities by their sum
pSum = (pCN + pMCI + pAD)/3
pCN /= pSum
pMCI /= pSum
pAD /= pSum
hardEstimClass[s] = np.argmax([pCN, pMCI, pAD])
adasEstim[s] = currSubjData['ADAS13'].iloc[indexMin]
adasEstimLo[s] = currSubjData['ADAS13 50% CI lower'].iloc[indexMin]
adasEstimUp[s] = currSubjData['ADAS13 50% CI upper'].iloc[indexMin]
# for the mri scan find the forecast closest to the scan date,
# which might be different from the cognitive assessment date
timeDiffsScanMri = [d4Df['ScanDate'].iloc[s] - d for d in currSubjData['Forecast Date']]
indexMinMri = np.argsort(np.abs(timeDiffsScanMri))[0]
ventriclesEstim[s] = currSubjData['Ventricles_ICV'].iloc[indexMinMri]
ventriclesEstimLo[s] = currSubjData['Ventricles_ICV 50% CI lower'].iloc[indexMinMri]
ventriclesEstimUp[s] = currSubjData['Ventricles_ICV 50% CI upper'].iloc[indexMinMri]
# print('%d probs' % d4Df['RID'].iloc[s], pCN, pMCI, pAD)
if not np.isnan(trueDiag.iloc[s]):
zipTrueLabelAndProbs += [(trueDiag.iloc[s], [pCN, pMCI, pAD])]
if invalidFlag:
# if at least one subject was missing or if
raise ValueError('Submission was incomplete. Please resubmit')
# If there are NaNs in D4, filter out them along with the corresponding user forecasts
# This can happen if rollover subjects don't come for visit in ADNI3.
notNanMaskDiag = np.logical_not(np.isnan(trueDiag))
trueDiagFilt = trueDiag[notNanMaskDiag]
hardEstimClassFilt = hardEstimClass[notNanMaskDiag]
notNanMaskADAS = np.logical_not(np.isnan(trueADAS))
trueADASFilt = trueADAS[notNanMaskADAS]
adasEstim = adasEstim[notNanMaskADAS]
adasEstimLo = adasEstimLo[notNanMaskADAS]
adasEstimUp = adasEstimUp[notNanMaskADAS]
notNanMaskVents = np.logical_not(np.isnan(trueVents))
trueVentsFilt = trueVents[notNanMaskVents]
ventriclesEstim = ventriclesEstim[notNanMaskVents]
ventriclesEstimLo = ventriclesEstimLo[notNanMaskVents]
ventriclesEstimUp = ventriclesEstimUp[notNanMaskVents]
assert trueDiagFilt.shape[0] == hardEstimClassFilt.shape[0]
assert trueADASFilt.shape[0] == adasEstim.shape[0] == adasEstimLo.shape[0] == adasEstimUp.shape[0]
assert trueVentsFilt.shape[0] == ventriclesEstim.shape[0] == \
ventriclesEstimLo.shape[0] == ventriclesEstimUp.shape[0]
return zipTrueLabelAndProbs, hardEstimClassFilt, adasEstim, adasEstimLo, adasEstimUp, \
ventriclesEstim, ventriclesEstimLo, ventriclesEstimUp, trueDiagFilt, trueADASFilt, trueVentsFilt
def evalOneSub(d4Df, forecastDf):
"""
Evaluates one submission.
Parameters
----------
d4Df - Pandas data frame containing the D4 dataset
subDf - Pandas data frame containing user forecasts for D2 subjects.
Returns
-------
mAUC - multiclass Area Under Curve
bca - balanced classification accuracy
adasMAE - ADAS13 Mean Aboslute Error
ventsMAE - Ventricles Mean Aboslute Error
adasCovProb - ADAS13 Coverage Probability for 50% confidence interval
ventsCovProb - Ventricles Coverage Probability for 50% confidence interval
"""
forecastDf['Forecast Date'] = [datetime.strptime(x, '%Y-%m') for x in forecastDf['Forecast Date']] # considers every month estimate to be the actual first day 2017-01
if isinstance(d4Df['Diagnosis'].iloc[0], str):
d4Df['CognitiveAssessmentDate'] = [datetime.strptime(x, '%Y-%m-%d') for x in d4Df['CognitiveAssessmentDate']]
d4Df['ScanDate'] = [datetime.strptime(x, '%Y-%m-%d') for x in d4Df['ScanDate']]
mapping = {'CN' : 0, 'MCI' : 1, 'AD' : 2}
d4Df.replace({'Diagnosis':mapping}, inplace=True)
diagLabels = ['CN', 'MCI', 'AD']
zipTrueLabelAndProbs, hardEstimClass, adasEstim, adasEstimLo, adasEstimUp, \
ventriclesEstim, ventriclesEstimLo, ventriclesEstimUp, trueDiagFilt, trueADASFilt, trueVentsFilt = \
parseData(d4Df, forecastDf, diagLabels)
zipTrueLabelAndProbs = list(zipTrueLabelAndProbs)
########## compute metrics for the clinical status #############
##### Multiclass AUC (mAUC) #####
nrClasses = len(diagLabels)
mAUC = MAUC.MAUC(zipTrueLabelAndProbs, num_classes=nrClasses)
### Balanced Classification Accuracy (BCA) ###
# print('hardEstimClass', np.unique(hardEstimClass), hardEstimClass)
trueDiagFilt = trueDiagFilt.astype(int)
# print('trueDiagFilt', np.unique(trueDiagFilt), trueDiagFilt)
bca = calcBCA(hardEstimClass, trueDiagFilt, nrClasses=nrClasses)
####### compute metrics for Ventricles and ADAS13 ##########
#### Mean Absolute Error (MAE) #####
adasMAE = np.mean(np.abs(adasEstim - trueADASFilt))
ventsMAE = np.mean(np.abs(ventriclesEstim - trueVentsFilt))
##### Weighted Error Score (WES) ####
adasCoeffs = 1/(adasEstimUp - adasEstimLo)
adasWES = np.sum(adasCoeffs * np.abs(adasEstim - trueADASFilt))/np.sum(adasCoeffs)
ventsCoeffs = 1/(ventriclesEstimUp - ventriclesEstimLo)
ventsWES = np.sum(ventsCoeffs * np.abs(ventriclesEstim - trueVentsFilt))/np.sum(ventsCoeffs)
#### Coverage Probability Accuracy (CPA) ####
adasCovProb = np.sum((adasEstimLo < trueADASFilt) &
(adasEstimUp > trueADASFilt))/trueADASFilt.shape[0]
adasCPA = np.abs(adasCovProb - 0.5)
ventsCovProb = np.sum((ventriclesEstimLo < trueVentsFilt) &
(ventriclesEstimUp > trueVentsFilt))/trueVentsFilt.shape[0]
ventsCPA = np.abs(ventsCovProb - 0.5)
return mAUC, bca, adasMAE, ventsMAE, adasWES, ventsWES, adasCPA, ventsCPA
if __name__ == "__main__":
parser.add_argument('--d4File', dest='d4File', help='CSV file containing the D4 dataset. '\
'Needs to be in the same format of D4_dummy.csv')
parser.add_argument('--forecastFile', dest='forecastFile', help='CSV file containing the user '
'forecasts for subjects in D2. Needs to be in the same format as '
'TADPOLE_Submission_TeamName1.xlsx or TADPOLE_Submission_Leaderboard_TeamName1.csv')
parser.add_argument('--leaderboard', action='store_true', help='pass this flag if the submission is a leaderboard submission. It ensures the filename is in the right format')
args = parser.parse_args()
d4File = args.d4File
forecastFile = args.forecastFile
forecastFileShort = forecastFile.split('/')[-1]
if args.leaderboard:
if (not forecastFileShort.startswith('TADPOLE_Submission_Pycon_')) or (not forecastFileShort.endswith('.csv')):
raise ValueError('File %s is not in the correct format: ' % forecastFileShort +
'TADPOLE_Submission_Pycon_TeamName.csv')
else:
if (not forecastFileShort.startswith('TADPOLE_Submission_')) or (not forecastFileShort.endswith('.csv')):
raise ValueError('File %s is not in the correct format: TADPOLE_Submission_TeamName.csv.' % forecastFileShort)
if 'TeamName' in forecastFileShort:
raise ValueError(r'''
Wrong file name! First rename the submission file
''' + forecastFileShort + r'''
to
TADPOLE_Submission_Pycon_<YourTeamName><Index>.csv
Examples:
TADPOLE_Submission_Pycon_PyHackers1.csv (first submission)
TADPOLE_Submission_Pycon_PowerRangers3.csv (third submission)
''')
d4Df = pd.read_csv(d4File)
subDf = pd.read_csv(forecastFile)
# don't catch the exception here, as this main function is used to test if the submission if correct
mAUC, bca, adasMAE, ventsMAE, adasWES, ventsWES, adasCPA, ventsCPA = \
evalOneSub(d4Df, subDf)
print('########### Metrics for clinical status ##################')
print('mAUC', mAUC)
print('bca', bca)
print('\n########### Mean Absolute Error (MAE) ##################')
print('adasMAE', adasMAE, 'ventsMAE', ventsMAE)
print('\n########### Weighted Error Score (WES) ##################')
print('adasWES', adasWES, 'ventsWES', ventsWES)
print('\n########### Coverage Probability Accuracy ##################')
print('adasCPA', adasCPA, 'ventsCPA', ventsCPA)
print('\n\n########### File is ready for submission to TADPOLE ###########')
|
swhustla/pycon2017-alzheimers-hack
|
notebooks/evalOneSubmission.py
|
Python
|
mit
| 11,554
|
[
"VisIt"
] |
08296a40264388a9a7cbff2a01c93f779e631ca937010a38e50b8495e26fbeec
|
"""HTTP related handlers.
Note that some other HTTP handlers live in more specific modules: _auth.py,
_gzip.py, etc.
Copyright 2002-2006 John J Lee <jjl@pobox.com>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import HTMLParser
from cStringIO import StringIO
import htmlentitydefs
import logging
import robotparser
import socket
import time
import _sgmllib_copy as sgmllib
from _urllib2_fork import HTTPError, BaseHandler
from _headersutil import is_html
from _html import unescape, unescape_charref
from _request import Request
from _response import response_seek_wrapper
import _rfc3986
import _sockettimeout
debug = logging.getLogger("mechanize").debug
debug_robots = logging.getLogger("mechanize.robots").debug
# monkeypatch urllib2.HTTPError to show URL
## import urllib2
## def urllib2_str(self):
## return 'HTTP Error %s: %s (%s)' % (
## self.code, self.msg, self.geturl())
## urllib2.HTTPError.__str__ = urllib2_str
CHUNK = 1024 # size of chunks fed to HTML HEAD parser, in bytes
DEFAULT_ENCODING = 'latin-1'
# XXX would self.reset() work, instead of raising this exception?
class EndOfHeadError(Exception): pass
class AbstractHeadParser:
# only these elements are allowed in or before HEAD of document
head_elems = ("html", "head",
"title", "base",
"script", "style", "meta", "link", "object")
_entitydefs = htmlentitydefs.name2codepoint
_encoding = DEFAULT_ENCODING
def __init__(self):
self.http_equiv = []
def start_meta(self, attrs):
http_equiv = content = None
for key, value in attrs:
if key == "http-equiv":
http_equiv = self.unescape_attr_if_required(value)
elif key == "content":
content = self.unescape_attr_if_required(value)
if http_equiv is not None and content is not None:
self.http_equiv.append((http_equiv, content))
def end_head(self):
raise EndOfHeadError()
def handle_entityref(self, name):
#debug("%s", name)
self.handle_data(unescape(
'&%s;' % name, self._entitydefs, self._encoding))
def handle_charref(self, name):
#debug("%s", name)
self.handle_data(unescape_charref(name, self._encoding))
def unescape_attr(self, name):
#debug("%s", name)
return unescape(name, self._entitydefs, self._encoding)
def unescape_attrs(self, attrs):
#debug("%s", attrs)
escaped_attrs = {}
for key, val in attrs.items():
escaped_attrs[key] = self.unescape_attr(val)
return escaped_attrs
def unknown_entityref(self, ref):
self.handle_data("&%s;" % ref)
def unknown_charref(self, ref):
self.handle_data("&#%s;" % ref)
class XHTMLCompatibleHeadParser(AbstractHeadParser,
HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
AbstractHeadParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag not in self.head_elems:
raise EndOfHeadError()
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
pass # unknown tag
else:
method(attrs)
else:
method(attrs)
def handle_endtag(self, tag):
if tag not in self.head_elems:
raise EndOfHeadError()
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
pass # unknown tag
else:
method()
def unescape(self, name):
# Use the entitydefs passed into constructor, not
# HTMLParser.HTMLParser's entitydefs.
return self.unescape_attr(name)
def unescape_attr_if_required(self, name):
return name # HTMLParser.HTMLParser already did it
class HeadParser(AbstractHeadParser, sgmllib.SGMLParser):
def _not_called(self):
assert False
def __init__(self):
sgmllib.SGMLParser.__init__(self)
AbstractHeadParser.__init__(self)
def handle_starttag(self, tag, method, attrs):
if tag not in self.head_elems:
raise EndOfHeadError()
if tag == "meta":
method(attrs)
def unknown_starttag(self, tag, attrs):
self.handle_starttag(tag, self._not_called, attrs)
def handle_endtag(self, tag, method):
if tag in self.head_elems:
method()
else:
raise EndOfHeadError()
def unescape_attr_if_required(self, name):
return self.unescape_attr(name)
def parse_head(fileobj, parser):
"""Return a list of key, value pairs."""
while 1:
data = fileobj.read(CHUNK)
try:
parser.feed(data)
except EndOfHeadError:
break
if len(data) != CHUNK:
# this should only happen if there is no HTML body, or if
# CHUNK is big
break
return parser.http_equiv
class HTTPEquivProcessor(BaseHandler):
"""Append META HTTP-EQUIV headers to regular HTTP headers."""
handler_order = 300 # before handlers that look at HTTP headers
def __init__(self, head_parser_class=HeadParser,
i_want_broken_xhtml_support=False,
):
self.head_parser_class = head_parser_class
self._allow_xhtml = i_want_broken_xhtml_support
def http_response(self, request, response):
if not hasattr(response, "seek"):
response = response_seek_wrapper(response)
http_message = response.info()
url = response.geturl()
ct_hdrs = http_message.getheaders("content-type")
if is_html(ct_hdrs, url, self._allow_xhtml):
try:
try:
html_headers = parse_head(response,
self.head_parser_class())
finally:
response.seek(0)
except (HTMLParser.HTMLParseError,
sgmllib.SGMLParseError):
pass
else:
for hdr, val in html_headers:
# add a header
http_message.dict[hdr.lower()] = val
text = hdr + ": " + val
for line in text.split("\n"):
http_message.headers.append(line + "\n")
return response
https_response = http_response
class MechanizeRobotFileParser(robotparser.RobotFileParser):
def __init__(self, url='', opener=None):
robotparser.RobotFileParser.__init__(self, url)
self._opener = opener
self._timeout = _sockettimeout._GLOBAL_DEFAULT_TIMEOUT
def set_opener(self, opener=None):
import _opener
if opener is None:
opener = _opener.OpenerDirector()
self._opener = opener
def set_timeout(self, timeout):
self._timeout = timeout
def read(self):
"""Reads the robots.txt URL and feeds it to the parser."""
if self._opener is None:
self.set_opener()
req = Request(self.url, unverifiable=True, visit=False,
timeout=self._timeout)
try:
f = self._opener.open(req)
except HTTPError, f:
pass
except (IOError, socket.error, OSError), exc:
debug_robots("ignoring error opening %r: %s" %
(self.url, exc))
return
lines = []
line = f.readline()
while line:
lines.append(line.strip())
line = f.readline()
status = f.code
if status == 401 or status == 403:
self.disallow_all = True
debug_robots("disallow all")
elif status >= 400:
self.allow_all = True
debug_robots("allow all")
elif status == 200 and lines:
debug_robots("parse lines")
self.parse(lines)
class RobotExclusionError(HTTPError):
def __init__(self, request, *args):
apply(HTTPError.__init__, (self,)+args)
self.request = request
class HTTPRobotRulesProcessor(BaseHandler):
# before redirections, after everything else
handler_order = 800
try:
from httplib import HTTPMessage
except:
from mimetools import Message
http_response_class = Message
else:
http_response_class = HTTPMessage
def __init__(self, rfp_class=MechanizeRobotFileParser):
self.rfp_class = rfp_class
self.rfp = None
self._host = None
def http_request(self, request):
scheme = request.get_type()
if scheme not in ["http", "https"]:
# robots exclusion only applies to HTTP
return request
if request.get_selector() == "/robots.txt":
# /robots.txt is always OK to fetch
return request
host = request.get_host()
# robots.txt requests don't need to be allowed by robots.txt :-)
origin_req = getattr(request, "_origin_req", None)
if (origin_req is not None and
origin_req.get_selector() == "/robots.txt" and
origin_req.get_host() == host
):
return request
if host != self._host:
self.rfp = self.rfp_class()
try:
self.rfp.set_opener(self.parent)
except AttributeError:
debug("%r instance does not support set_opener" %
self.rfp.__class__)
self.rfp.set_url(scheme+"://"+host+"/robots.txt")
self.rfp.set_timeout(request.timeout)
self.rfp.read()
self._host = host
ua = request.get_header("User-agent", "")
if self.rfp.can_fetch(ua, request.get_full_url()):
return request
else:
# XXX This should really have raised URLError. Too late now...
msg = "request disallowed by robots.txt"
raise RobotExclusionError(
request,
request.get_full_url(),
403, msg,
self.http_response_class(StringIO()), StringIO(msg))
https_request = http_request
class HTTPRefererProcessor(BaseHandler):
"""Add Referer header to requests.
This only makes sense if you use each RefererProcessor for a single
chain of requests only (so, for example, if you use a single
HTTPRefererProcessor to fetch a series of URLs extracted from a single
page, this will break).
There's a proper implementation of this in mechanize.Browser.
"""
def __init__(self):
self.referer = None
def http_request(self, request):
if ((self.referer is not None) and
not request.has_header("Referer")):
request.add_unredirected_header("Referer", self.referer)
return request
def http_response(self, request, response):
self.referer = response.geturl()
return response
https_request = http_request
https_response = http_response
def clean_refresh_url(url):
# e.g. Firefox 1.5 does (something like) this
if ((url.startswith('"') and url.endswith('"')) or
(url.startswith("'") and url.endswith("'"))):
url = url[1:-1]
return _rfc3986.clean_url(url, "latin-1") # XXX encoding
def parse_refresh_header(refresh):
"""
>>> parse_refresh_header("1; url=http://example.com/")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1; url='http://example.com/'")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1")
(1.0, None)
>>> parse_refresh_header("blah") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError: invalid literal for float(): blah
"""
ii = refresh.find(";")
if ii != -1:
pause, newurl_spec = float(refresh[:ii]), refresh[ii+1:]
jj = newurl_spec.find("=")
key = None
if jj != -1:
key, newurl = newurl_spec[:jj], newurl_spec[jj+1:]
newurl = clean_refresh_url(newurl)
if key is None or key.strip().lower() != "url":
raise ValueError()
else:
pause, newurl = float(refresh), None
return pause, newurl
class HTTPRefreshProcessor(BaseHandler):
"""Perform HTTP Refresh redirections.
Note that if a non-200 HTTP code has occurred (for example, a 30x
redirect), this processor will do nothing.
By default, only zero-time Refresh headers are redirected. Use the
max_time attribute / constructor argument to allow Refresh with longer
pauses. Use the honor_time attribute / constructor argument to control
whether the requested pause is honoured (with a time.sleep()) or
skipped in favour of immediate redirection.
Public attributes:
max_time: see above
honor_time: see above
"""
handler_order = 1000
def __init__(self, max_time=0, honor_time=True):
self.max_time = max_time
self.honor_time = honor_time
self._sleep = time.sleep
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if code == 200 and hdrs.has_key("refresh"):
refresh = hdrs.getheaders("refresh")[0]
try:
pause, newurl = parse_refresh_header(refresh)
except ValueError:
debug("bad Refresh header: %r" % refresh)
return response
if newurl is None:
newurl = response.geturl()
if (self.max_time is None) or (pause <= self.max_time):
if pause > 1E-3 and self.honor_time:
self._sleep(pause)
hdrs["location"] = newurl
# hardcoded http is NOT a bug
response = self.parent.error(
"http", request, response,
"refresh", msg, hdrs)
else:
debug("Refresh header ignored: %r" % refresh)
return response
https_response = http_response
|
odicraig/kodi2odi
|
addons/plugin.video.roggerstream-4.0.0/mechanize/_http.py
|
Python
|
gpl-3.0
| 14,801
|
[
"VisIt"
] |
ecd0e7b4cef73bcf1cfaf4835b3acf0346996445e261d48a8285e89facf94e26
|
########################################################################
# $HeadURL $
# File: FTSAgent.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/05/31 10:00:13
########################################################################
""" :mod: FTSAgent
==============
.. module: FTSAgent
:synopsis: agent propagating scheduled RMS request in FTS
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
DIRAC agent propagating scheduled RMS request in FTS
Request processing phases (each in a separate thread):
1. MONITOR
...active FTSJobs, prepare FTSFiles dictionary with files to submit, fail, register and reschedule
2. CHECK REPLICAS
...just in case if all transfers are done, if yes, end processing
3. FAILED FILES:
...if at least one Failed FTSFile is found, set Request.Operation.File to 'Failed', end processing
4. UPDATE Waiting#SourceSE FTSFiles
...if any found in FTSDB
5. REGISTER REPLICA
...insert RegisterReplica operation to request, if some FTSFiles failed to register, end processing
6. RESCHEDULE FILES
...for FTSFiles failed with missing sources error
7. SUBMIT
...but read 'Waiting' FTSFiles first from FTSDB and merge those with FTSFiles to retry
"""
__RCSID__ = "$Id: $"
# #
# @file FTSAgent.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/05/31 10:00:51
# @brief Definition of FTSAgent class.
# # imports
import time
import datetime
import re
# # from DIRAC
from DIRAC import S_OK, S_ERROR, gLogger
# # from CS
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getUsernameForDN
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getRegistrationProtocols
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
# # from Core
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.Core.Utilities.LockRing import LockRing
from DIRAC.Core.Utilities.ThreadPool import ThreadPool
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities.Time import fromString
from DIRAC.Core.Utilities.List import breakListIntoChunks
# # from DMS
from DIRAC.DataManagementSystem.Client.FTSClient import FTSClient
from DIRAC.DataManagementSystem.Client.FTSJob import FTSJob
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.DataManagementSystem.private.FTSPlacement import FTSPlacement
from DIRAC.DataManagementSystem.private.FTSHistoryView import FTSHistoryView
from DIRAC.DataManagementSystem.Client.FTSFile import FTSFile
# # from RMS
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
# # from RSS
# #from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
# # from Resources
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
# # from Accounting
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.ConfigurationSystem.Client.PathFinder import getServiceSection
# # agent base name
AGENT_NAME = "DataManagement/FTSAgent"
class escapeTry( Exception ):
pass
########################################################################
class FTSAgent( AgentModule ):
"""
.. class:: FTSAgent
Agent propagating Scheduled request to Done or Failed state in the FTS system.
Requests and associated FTSJobs (and so FTSFiles) are kept in cache.
"""
# # fts placement refresh in seconds
FTSPLACEMENT_REFRESH = FTSHistoryView.INTERVAL / 2
# # placeholder for max job per channel
MAX_ACTIVE_JOBS = 50
# # min threads
MIN_THREADS = 1
# # max threads
MAX_THREADS = 10
# # files per job
MAX_FILES_PER_JOB = 100
# # MAX FTS transfer per FTSFile
MAX_ATTEMPT = 256
# # stage flag
PIN_TIME = 0
# # FTS submission command
SUBMIT_COMMAND = 'glite-transfer-submit'
# # FTS monitoring command
MONITOR_COMMAND = 'glite-transfer-status'
# Max number of requests fetched from the RMS
MAX_REQUESTS = 100
# # placeholder for FTS client
__ftsClient = None
# # placeholder for the FTS version
__ftsVersion = None
# # placeholder for request client
__requestClient = None
# # placeholder for resources helper
__resources = None
# # placeholder for RSS client
__rssClient = None
# # placeholder for FTSPlacement
__ftsPlacement = None
# # placement regeneration time delta
__ftsPlacementValidStamp = None
# # placeholder for threadPool
__threadPool = None
# # update lock
__updateLock = None
# # request cache
__reqCache = dict()
def updateLock( self ):
""" update lock """
if not self.__updateLock:
self.__updateLock = LockRing().getLock( "FTSAgentLock" )
return self.__updateLock
@classmethod
def requestClient( cls ):
""" request client getter """
if not cls.__requestClient:
cls.__requestClient = ReqClient()
return cls.__requestClient
@classmethod
def ftsClient( cls ):
""" FTS client """
if not cls.__ftsClient:
cls.__ftsClient = FTSClient()
return cls.__ftsClient
@classmethod
def rssClient( cls ):
""" RSS client getter """
if not cls.__rssClient:
cls.__rssClient = ResourceStatus()
return cls.__rssClient
@classmethod
def getRequest( cls, reqID ):
""" get Requests systematically and refresh cache """
getRequest = cls.requestClient().getRequest( reqID )
if not getRequest["OK"]:
cls.__reqCache.pop( reqID, None )
return getRequest
getRequest = getRequest["Value"]
if not getRequest:
cls.__reqCache.pop( reqID, None )
return S_ERROR( "request of id '%s' not found in ReqDB" % reqID )
cls.__reqCache[reqID] = getRequest
return S_OK( cls.__reqCache[reqID] )
@classmethod
def putRequest( cls, request, clearCache = True ):
""" put request back to ReqDB
:param Request request: Request instance
:param bool clearCache: clear the cache?
also finalize request if status == Done
"""
# # put back request
if request.RequestID not in cls.__reqCache:
return S_OK()
put = cls.requestClient().putRequest( request )
if not put["OK"]:
return put
# # finalize first if possible
if request.Status == "Done" and request.JobID:
finalizeRequest = cls.requestClient().finalizeRequest( request.RequestID, request.JobID )
if not finalizeRequest["OK"]:
request.Status = "Scheduled"
# # del request from cache if needed
if clearCache:
cls.__reqCache.pop( request.RequestID, None )
return S_OK()
@classmethod
def putFTSJobs( cls, ftsJobsList ):
""" put back fts jobs to the FTSDB """
for ftsJob in ftsJobsList:
put = cls.ftsClient().putFTSJob( ftsJob )
if not put["OK"]:
return put
return S_OK()
@staticmethod
def updateFTSFileDict( ftsFilesDict, toUpdateDict ):
""" update :ftsFilesDict: with FTSFiles in :toUpdateDict: """
for category, ftsFileList in ftsFilesDict.items():
for ftsFile in toUpdateDict.get( category, [] ):
if ftsFile not in ftsFileList:
ftsFileList.append( ftsFile )
return ftsFilesDict
# def resources( self ):
# """ resource helper getter """
# if not self.__resources:
# self.__resources = Resources()
# return self.__resources
def threadPool( self ):
""" thread pool getter """
if not self.__threadPool:
self.__threadPool = ThreadPool( self.MIN_THREADS, self.MAX_THREADS )
self.__threadPool.daemonize()
return self.__threadPool
def resetFTSPlacement( self ):
""" create fts Placement """
ftsHistory = self.ftsClient().getFTSHistory()
if not ftsHistory["OK"]:
self.log.error( "unable to get FTS history:", ftsHistory["Message"] )
return ftsHistory
ftsHistory = ftsHistory["Value"]
try:
self.updateLock().acquire()
if not self.__ftsPlacement:
self.__ftsPlacement = FTSPlacement( csPath = None, ftsHistoryViews = ftsHistory )
else:
self.__ftsPlacement.refresh( ftsHistoryViews = ftsHistory )
finally:
self.updateLock().release()
# # save time stamp
self.__ftsPlacementValidStamp = datetime.datetime.now() + datetime.timedelta( seconds = self.FTSPLACEMENT_REFRESH )
return S_OK()
def initialize( self ):
""" agent's initialization """
# # data manager
self.dataManager = DataManager()
log = self.log.getSubLogger( "initialize" )
self.FTSPLACEMENT_REFRESH = self.am_getOption( "FTSPlacementValidityPeriod", self.FTSPLACEMENT_REFRESH )
log.info( "FTSPlacement validity period = %s s" % self.FTSPLACEMENT_REFRESH )
self.SUBMIT_COMMAND = self.am_getOption( "SubmitCommand", self.SUBMIT_COMMAND )
log.info( "FTS submit command = %s" % self.SUBMIT_COMMAND )
self.MONITOR_COMMAND = self.am_getOption( "MonitorCommand", self.MONITOR_COMMAND )
log.info( "FTS commands: submit = %s monitor %s" % ( self.SUBMIT_COMMAND, self.MONITOR_COMMAND ) )
self.PIN_TIME = self.am_getOption( "PinTime", self.PIN_TIME )
log.info( "Stage files before submission = ", {True: "yes", False: "no"}[bool( self.PIN_TIME )] )
self.MAX_ACTIVE_JOBS = self.am_getOption( "MaxActiveJobsPerRoute", self.MAX_ACTIVE_JOBS )
log.info( "Max active FTSJobs/route = ", str( self.MAX_ACTIVE_JOBS ) )
self.MAX_FILES_PER_JOB = self.am_getOption( "MaxFilesPerJob", self.MAX_FILES_PER_JOB )
log.info( "Max FTSFiles/FTSJob = ", str( self.MAX_FILES_PER_JOB ) )
self.MAX_ATTEMPT = self.am_getOption( "MaxTransferAttempts", self.MAX_ATTEMPT )
log.info( "Max transfer attempts = ", str( self.MAX_ATTEMPT ) )
# # thread pool
self.MIN_THREADS = self.am_getOption( "MinThreads", self.MIN_THREADS )
self.MAX_THREADS = self.am_getOption( "MaxThreads", self.MAX_THREADS )
minmax = ( abs( self.MIN_THREADS ), abs( self.MAX_THREADS ) )
self.MIN_THREADS, self.MAX_THREADS = min( minmax ), max( minmax )
log.info( "ThreadPool min threads = ", str( self.MIN_THREADS ) )
log.info( "ThreadPool max threads = ", str( self.MAX_THREADS ) )
self.MAX_REQUESTS = self.am_getOption( "MaxRequests", self.MAX_REQUESTS )
log.info( "Max Requests fetched = ", str( self.MAX_REQUESTS ) )
self.__ftsVersion = Operations().getValue( 'DataManagement/FTSVersion', 'FTS2' )
log.info( "FTSVersion : %s" % self.__ftsVersion )
log.info( "initialize: creation of FTSPlacement..." )
createPlacement = self.resetFTSPlacement()
if not createPlacement["OK"]:
log.error( "initialize: %s" % createPlacement["Message"] )
return createPlacement
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/DataManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'DataManager' )
log.info( "will use DataManager proxy" )
self.registrationProtocols = getRegistrationProtocols()
# # gMonitor stuff here
gMonitor.registerActivity( "RequestsAtt", "Attempted requests executions",
"FTSAgent", "Requests/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RequestsOK", "Successful requests executions",
"FTSAgent", "Requests/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RequestsFail", "Failed requests executions",
"FTSAgent", "Requests/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSJobsSubAtt", "FTSJobs creation attempts",
"FTSAgent", "Created FTSJobs/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSJobsSubOK", "FTSJobs submitted successfully",
"FTSAgent", "Successful FTSJobs submissions/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSJobsSubFail", "FTSJobs submissions failed",
"FTSAgent", "Failed FTSJobs submissions/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSJobsMonAtt", "FTSJobs monitored",
"FTSAgent", "FTSJobs/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSJobsMonOK", "FTSJobs monitored successfully",
"FTSAgent", "FTSJobs/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSJobsMonFail", "FTSJobs attempts failed",
"FTSAgent", "FTSJobs/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSMonitorFail", "Failed FTS monitor executions",
"FTSAgent", "Execution/mins", gMonitor.OP_SUM )
pollingTime = self.am_getOption( "PollingTime", 60 )
for status in list( FTSJob.INITSTATES + FTSJob.TRANSSTATES + FTSJob.FAILEDSTATES + FTSJob.FINALSTATES ):
gMonitor.registerActivity( "FTSJobs%s" % status, "FTSJobs %s" % status ,
"FTSAgent", "FTSJobs/cycle", gMonitor.OP_ACUM, pollingTime )
gMonitor.registerActivity( "FtSJobsPerRequest", "Average FTSJobs per request",
"FTSAgent", "FTSJobs/Request", gMonitor.OP_MEAN )
gMonitor.registerActivity( "FTSFilesPerJob", "FTSFiles per FTSJob",
"FTSAgent", "Number of FTSFiles per FTSJob", gMonitor.OP_MEAN )
gMonitor.registerActivity( "FTSSizePerJob", "Average FTSFiles size per FTSJob",
"FTSAgent", "Average submitted size per FTSJob", gMonitor.OP_MEAN )
return S_OK()
def finalize( self ):
""" finalize processing """
# log = self.log.getSubLogger( "finalize" )
# if self.__reqCache:
# log.info( 'putting back %d requests from cache' % len( self.__reqCache ) )
# else:
# log.info( 'no requests to put back' )
# for request in self.__reqCache.values():
# put = self.requestClient().putRequest( request )
# if not put["OK"]:
# log.error( "unable to put back request '%s': %s" % ( request.RequestName, put["Message"] ) )
return S_OK()
def execute( self ):
""" one cycle execution """
# Don't use the server certificate otherwise the DFC wont let us write
gConfigurationData.setOptionInCFG( '/DIRAC/Security/UseServerCertificate', 'false' )
log = gLogger.getSubLogger( "execute" )
# # reset FTSPlacement if expired
now = datetime.datetime.now()
if now > self.__ftsPlacementValidStamp:
log.info( "resetting expired FTS placement..." )
resetFTSPlacement = self.resetFTSPlacement()
if not resetFTSPlacement["OK"]:
log.error( "FTSPlacement recreation error: %s" % resetFTSPlacement["Message"] )
return resetFTSPlacement
self.__ftsPlacementValidStamp = now + datetime.timedelta( seconds = self.FTSPLACEMENT_REFRESH )
requestIDs = self.requestClient().getRequestIDsList( statusList = [ "Scheduled" ], limit = self.MAX_REQUESTS )
if not requestIDs["OK"]:
log.error( "unable to read scheduled request ids: %s" % requestIDs["Message"] )
return requestIDs
if not requestIDs["Value"]:
requestIDs = self.__reqCache.keys()
else:
requestIDs = [ req[0] for req in requestIDs["Value"] ]
requestIDs = list( set ( requestIDs + self.__reqCache.keys() ) )
if not requestIDs:
log.info( "no 'Scheduled' requests to process" )
return S_OK()
log.info( "found %s requests to process:" % len( requestIDs ) )
log.info( " => from internal cache: %s" % ( len( self.__reqCache ) ) )
log.info( " => new read from RMS: %s" % ( len( requestIDs ) - len( self.__reqCache ) ) )
for requestID in requestIDs:
request = self.getRequest( requestID )
if not request["OK"]:
log.error( "Error getting request", "%s: %s" % ( requestID, request["Message"] ) )
continue
request = request["Value"]
sTJId = request.RequestID
while True:
queue = self.threadPool().generateJobAndQueueIt( self.processRequest,
args = ( request, ),
sTJId = sTJId )
if queue["OK"]:
log.info( "Request enqueued for execution", sTJId )
gMonitor.addMark( "RequestsAtt", 1 )
break
time.sleep( 1 )
# # process all results
self.threadPool().processAllResults()
return S_OK()
def processRequest( self, request ):
""" process one request
:param Request request: ReqDB.Request
"""
log = self.log.getSubLogger( "req_%s/%s" % ( request.RequestID, request.RequestName ) )
operation = request.getWaiting()
if not operation["OK"]:
log.error( "Unable to find 'Scheduled' ReplicateAndRegister operation in request" )
return self.putRequest( request )
operation = operation["Value"]
if not isinstance( operation, Operation ):
log.error( "Waiting returned operation is not an operation:", type( operation ) )
return self.putRequest( request )
if operation.Type != "ReplicateAndRegister":
log.error( "operation to be executed is not a ReplicateAndRegister but", operation.Type )
return self.putRequest( request )
if operation.Status != "Scheduled":
log.error( "operation in a wrong state, expecting 'Scheduled', got", operation.Status )
return self.putRequest( request )
log.info( 'start processRequest' )
# # select FTSJobs, by default all in TRANS_STATES and INIT_STATES
ftsJobs = self.ftsClient().getFTSJobsForRequest( request.RequestID )
if not ftsJobs["OK"]:
log.error( ftsJobs["Message"] )
return ftsJobs
ftsJobs = [ftsJob for ftsJob in ftsJobs.get( "Value", [] ) if ftsJob.Status not in FTSJob.FINALSTATES]
# # Use a try: finally: for making sure FTS jobs are put back before returning
try:
# # dict keeping info about files to reschedule, submit, fail and register
ftsFilesDict = dict( [ ( k, list() ) for k in ( "toRegister", "toSubmit", "toFail", "toReschedule", "toUpdate" ) ] )
if ftsJobs:
log.info( "==> found %s FTSJobs to monitor" % len( ftsJobs ) )
# # PHASE 0 = monitor active FTSJobs
for ftsJob in ftsJobs:
monitor = self.__monitorJob( request, ftsJob )
if not monitor["OK"]:
log.error( "unable to monitor FTSJob %s: %s" % ( ftsJob.FTSJobID, monitor["Message"] ) )
ftsJob.Status = "Submitted"
else:
ftsFilesDict = self.updateFTSFileDict( ftsFilesDict, monitor["Value"] )
log.info( "monitoring of FTSJobs completed" )
for key, ftsFiles in ftsFilesDict.items():
if ftsFiles:
log.verbose( " => %s FTSFiles to %s" % ( len( ftsFiles ), key[2:].lower() ) )
# # PHASE ONE - check ready replicas
missingReplicas = self.__checkReadyReplicas( request, operation )
if not missingReplicas["OK"]:
log.error( missingReplicas["Message"] )
else:
missingReplicas = missingReplicas["Value"]
for opFile in operation:
# Actually the condition below should never happen... Change printout for checking
if opFile.LFN not in missingReplicas and opFile.Status not in ( 'Done', 'Failed' ):
log.warn( "File should be set Done! %s is replicated at all targets" % opFile.LFN )
opFile.Status = "Done"
if missingReplicas:
# Check if these files are in the FTSDB
ftsFiles = self.ftsClient().getAllFTSFilesForRequest( request.RequestID )
if not ftsFiles['OK']:
log.error( ftsFiles['Message'] )
else:
ftsFiles = ftsFiles['Value']
ftsLfns = set( [ftsFile.LFN for ftsFile in ftsFiles] )
# Recover files not in FTSDB
toSchedule = set( missingReplicas ) - ftsLfns
if toSchedule:
log.warn( '%d files in operation are not in FTSDB, reset them Waiting' % len( toSchedule ) )
for opFile in operation:
if opFile.LFN in toSchedule and opFile.Status == 'Scheduled':
opFile.Status = 'Waiting'
# Recover files with target not in FTSDB
toSchedule = set( [missing for missing, missingSEs in missingReplicas.items()
if not [ftsFile for ftsFile in ftsFiles
if ftsFile.LFN == missing and ftsFile.TargetSE in missingSEs]] )
if toSchedule:
log.warn( '%d targets in operation are not in FTSDB, reset files Waiting' % len( toSchedule ) )
for opFile in operation:
if opFile.LFN in toSchedule and opFile.Status == 'Scheduled':
opFile.Status = 'Waiting'
# identify missing LFNs that are waiting for a replication which is finished
for ftsFile in [f for f in ftsFiles if f.LFN in missingReplicas and f.Status.startswith( 'Waiting#' )]:
targetSE = ftsFile.Status.split( '#' )[1]
finishedFiles = [f for f in ftsFiles if
f.LFN == ftsFile.LFN and
f.Status == 'Finished' and
f.TargetSE == targetSE and
f not in ftsFilesDict['toUpdate']]
if finishedFiles:
log.warn( "%s is %s while replication was Finished to %s, update" % ( ftsFile.LFN, ftsFile.Status, targetSE ) )
ftsFilesDict['toUpdate'] += finishedFiles
# identify Active transfers for which there is no FTS job any longer and reschedule them
for ftsFile in [f for f in ftsFiles if f.Status == 'Active' and f.TargetSE in missingReplicas.get( f.LFN, [] )]:
if not [ftsJob for ftsJob in ftsJobs if ftsJob.FTSGUID == ftsFile.FTSGUID]:
ftsFilesDict['toReschedule'].append( ftsFile )
# identify Finished transfer for which the replica is still missing
for ftsFile in [f for f in ftsFiles if f.Status == 'Finished' and f.TargetSE in missingReplicas.get( f.LFN, [] ) and f not in ftsFilesDict['toRegister'] ]:
# Check if there is a registration operation for that file and that target
regOp = [op for op in request if
op.Type == 'RegisterReplica' and
op.TargetSE == ftsFile.TargetSE and
[f for f in op if f.LFN == ftsFile.LFN]]
if not regOp:
ftsFilesDict['toReschedule'].append( ftsFile )
# Recover files that are Failed but were not spotted
for ftsFile in [f for f in ftsFiles if f.Status == 'Failed' and f.TargetSE in missingReplicas.get( f.LFN, [] )]:
_r, _s, fail = self.__checkFailed( ftsFile )
if fail:
ftsFilesDict['toFail'].append( ftsFile )
# If all transfers are finished for unregistered files and there is already a registration operation, set it Done
for lfn in missingReplicas:
if not [f for f in ftsFiles if f.LFN == lfn and ( f.Status != 'Finished' or f in ftsFilesDict['toReschedule'] or f in ftsFilesDict['toRegister'] )]:
for opFile in operation:
if opFile.LFN == lfn:
opFile.Status = 'Done'
break
toFail = ftsFilesDict.get( "toFail", [] )
toReschedule = ftsFilesDict.get( "toReschedule", [] )
toSubmit = ftsFilesDict.get( "toSubmit", [] )
toRegister = ftsFilesDict.get( "toRegister", [] )
toUpdate = ftsFilesDict.get( "toUpdate", [] )
# # PHASE TWO = Failed files? -> make request Failed and return
if toFail:
log.error( "==> found %s 'Failed' FTSFiles, but maybe other files can be processed..." % len( toFail ) )
for opFile in operation:
for ftsFile in toFail:
if opFile.FileID == ftsFile.FileID:
opFile.Error = ftsFile.Error
opFile.Status = "Failed"
operation.Error = "%s files are missing any replicas" % len( toFail )
# # requets.Status should be Failed if all files in the operation "Failed"
if request.Status == "Failed":
request.Error = "ReplicateAndRegister %s failed" % operation.Order
log.error( "request is set to 'Failed'" )
# # putRequest is done by the finally: clause... Not good to do it twice
raise escapeTry
# # PHASE THREE - update Waiting#TargetSE FTSFiles
if toUpdate:
log.info( "==> found %s possible FTSFiles to update..." % ( len( toUpdate ) ) )
byTarget = {}
for ftsFile in toUpdate:
byTarget.setdefault( ftsFile.TargetSE, [] ).append( ftsFile.FileID )
for targetSE, fileIDList in byTarget.items():
update = self.ftsClient().setFTSFilesWaiting( operation.OperationID, targetSE, fileIDList )
if not update["OK"]:
log.error( "update FTSFiles failed:", update["Message"] )
# # PHASE FOUR - add 'RegisterReplica' Operations
if toRegister:
log.info( "==> found %d Files waiting for registration, adding 'RegisterReplica' operations" % len( toRegister ) )
registerFiles = self.__insertRegisterOperation( request, operation, toRegister )
if not registerFiles["OK"]:
log.error( "unable to create 'RegisterReplica' operations:", registerFiles["Message"] )
# if request.Status == "Waiting":
# log.info( "request is in 'Waiting' state, will put it back to RMS" )
# return self.putRequest( request )
# # PHASE FIVE - reschedule operation files
if toReschedule:
log.info( "==> found %s Files to reschedule" % len( toReschedule ) )
rescheduleFiles = self.__reschedule( request, operation, toReschedule )
if not rescheduleFiles["OK"]:
log.error( 'Failed to reschedule files', rescheduleFiles["Message"] )
# # PHASE SIX - read Waiting ftsFiles and submit new FTSJobs. We get also Failed files to recover them if needed
ftsFiles = self.ftsClient().getFTSFilesForRequest( request.RequestID, [ "Waiting", "Failed", 'Submitted', 'Canceled' ] )
if not ftsFiles["OK"]:
log.error( ftsFiles["Message"] )
else:
retryIds = set ( [ ftsFile.FTSFileID for ftsFile in toSubmit ] )
for ftsFile in ftsFiles["Value"]:
if ftsFile.FTSFileID not in retryIds:
if ftsFile.Status in ( 'Failed', 'Canceled' ):
# If the file was not unrecoverable failed and is not yet set toSubmit
_reschedule, submit, _fail = self.__checkFailed( ftsFile )
elif ftsFile.Status == 'Submitted':
if ftsFile.FTSGUID not in [job.FTSGUID for job in ftsJobs]:
log.warn( 'FTS GUID %s not found in FTS jobs, resubmit file transfer' % ftsFile.FTSGUID )
ftsFile.Status = 'Waiting'
submit = True
else:
submit = False
else:
submit = True
if submit:
toSubmit.append( ftsFile )
retryIds.add( ftsFile.FTSFileID )
# # submit new ftsJobs
if toSubmit:
if request.Status != 'Scheduled':
log.info( "Found %d FTSFiles to submit while request is no longer in Scheduled status (%s)" \
% ( len( toSubmit ), request.Status ) )
else:
self.__checkDuplicates( request.RequestID, toSubmit )
log.info( "==> found %s FTSFiles to submit" % len( toSubmit ) )
submit = self.__submit( request, operation, toSubmit )
if not submit["OK"]:
log.error( submit["Message"] )
else:
ftsJobs += submit["Value"]
# # status change? - put back request
if request.Status != "Scheduled":
log.info( "request no longer in 'Scheduled' state (%s), will put it back to RMS" % request.Status )
except escapeTry:
# This clause is raised when one wants to return from within the try: clause
pass
except Exception, exceptMessage:
log.exception( "Exception in processRequest", lException = exceptMessage )
finally:
putRequest = self.putRequest( request, clearCache = ( request.Status != "Scheduled" ) )
if not putRequest["OK"]:
log.error( "unable to put back request:", putRequest["Message"] )
# # put back jobs in all cases
if ftsJobs:
for ftsJob in list( ftsJobs ):
if not len( ftsJob ):
log.warn( 'FTS job empty, removed: %s' % ftsJob.FTSGUID )
self.ftsClient().deleteFTSJob( ftsJob.FTSJobID )
ftsJobs.remove( ftsJob )
putJobs = self.putFTSJobs( ftsJobs )
if not putJobs["OK"]:
log.error( "unable to put back FTSJobs:", putJobs["Message"] )
putRequest = putJobs
# This is where one returns from after execution of the finally: block
return putRequest
def __checkDuplicates( self, reqID, toSubmit ):
""" Check in a list of FTSFiles whether there are duplicates
"""
tupleList = []
log = self.log.getSubLogger( "%s/checkDuplicates" % reqID )
for ftsFile in list( toSubmit ):
fTuple = ( ftsFile.LFN, ftsFile.SourceSE, ftsFile.TargetSE )
if fTuple in tupleList:
log.warn( "Duplicate file to submit, removed:", ', '.join( fTuple ) )
toSubmit.remove( ftsFile )
self.ftsClient().deleteFTSFiles( ftsFile.OperationID, [ftsFile.FileID] )
else:
tupleList.append( fTuple )
def __reschedule( self, request, operation, toReschedule ):
""" reschedule list of :toReschedule: files in request for operation :operation:
:param Request request:
:param Operation operation:
:param list toReschedule: list of FTSFiles
"""
log = self.log.getSubLogger( "req_%s/%s/reschedule" % ( request.RequestID, request.RequestName ) )
ftsFileIDs = [ftsFile.FileID for ftsFile in toReschedule]
for opFile in operation:
if opFile.FileID in ftsFileIDs:
opFile.Status = "Waiting"
toSchedule = []
# # filter files
for opFile in [ opFile for opFile in operation if opFile.Status == "Waiting" ]:
replicas = self.__filterReplicas( opFile )
if not replicas["OK"]:
continue
replicas = replicas["Value"]
validReplicas = replicas["Valid"]
noMetaReplicas = replicas["NoMetadata"]
noReplicas = replicas["NoReplicas"]
badReplicas = replicas['Bad']
if validReplicas:
validTargets = list( set( operation.targetSEList ) - set( validReplicas ) )
if not validTargets:
log.info( "file %s is already present at all targets" % opFile.LFN )
opFile.Status = "Done"
else:
toSchedule.append( ( opFile.toJSON()["Value"], validReplicas, validTargets ) )
elif noMetaReplicas:
log.warn( "unable to schedule '%s', couldn't get metadata at %s" % ( opFile.LFN, ','.join( noMetaReplicas ) ) )
elif noReplicas:
log.warn( "unable to schedule %s, file doesn't exist at %s" % ( opFile.LFN, ','.join( noReplicas ) ) )
opFile.Status = 'Failed'
elif badReplicas:
log.warn( "unable to schedule %s, all replicas have a bad checksum at %s" % ( opFile.LFN, ','.join( badReplicas ) ) )
opFile.Status = 'Failed'
# # do real schedule here
if toSchedule:
log.info( "Rescheduling %d files" % len( toReschedule ) )
ftsSchedule = self.ftsClient().ftsSchedule( request.RequestID,
operation.OperationID,
toSchedule )
if not ftsSchedule["OK"]:
log.error( "Error scheduling files", ftsSchedule["Message"] )
return ftsSchedule
ftsSchedule = ftsSchedule["Value"]
for opFile in operation:
fileID = opFile.FileID
if fileID in ftsSchedule["Successful"]:
opFile.Status = "Scheduled"
elif fileID in ftsSchedule["Failed"]:
opFile.Error = ftsSchedule["Failed"][fileID]
log.error( "Error scheduling file %s" % opFile.LFN, opFile.Error )
return S_OK()
def __submit( self, request, operation, toSubmit ):
""" create and submit new FTSJobs using list of FTSFiles
:param Request request: ReqDB.Request instance
:param list ftsFiles: list of FTSFile instances
:return: [ FTSJob, FTSJob, ...]
"""
log = self.log.getSubLogger( "req_%s/%s/submit" % ( request.RequestID, request.RequestName ) )
bySourceAndTarget = {}
for ftsFile in toSubmit:
if ftsFile.SourceSE not in bySourceAndTarget:
bySourceAndTarget.setdefault( ftsFile.SourceSE, {} )
if ftsFile.TargetSE not in bySourceAndTarget[ftsFile.SourceSE]:
bySourceAndTarget[ftsFile.SourceSE].setdefault( ftsFile.TargetSE, [] )
bySourceAndTarget[ftsFile.SourceSE][ftsFile.TargetSE].append( ftsFile )
ftsJobs = []
for source, targetDict in bySourceAndTarget.items():
for target, ftsFileList in targetDict.items():
log.info( "found %s files to submit from %s to %s" % ( len( ftsFileList ), source, target ) )
route = self.__ftsPlacement.findRoute( source, target )
if not route["OK"]:
log.error( route["Message"] )
continue
route = route["Value"]
routeValid = self.__ftsPlacement.isRouteValid( route )
if not routeValid['OK']:
log.error( "Route invalid : %s" % routeValid['Message'] )
continue
sourceSE = StorageElement( source )
sourceToken = sourceSE.getStorageParameters( "SRM2" )
if not sourceToken["OK"]:
log.error( "unable to get sourceSE '%s' parameters: %s" % ( source, sourceToken["Message"] ) )
continue
seStatus = sourceSE.getStatus()['Value']
targetSE = StorageElement( target )
targetToken = targetSE.getStorageParameters( "SRM2" )
if not targetToken["OK"]:
log.error( "unable to get targetSE '%s' parameters: %s" % ( target, targetToken["Message"] ) )
continue
# # create FTSJob
for fileList in breakListIntoChunks( ftsFileList, self.MAX_FILES_PER_JOB ):
ftsJob = FTSJob()
ftsJob.RequestID = request.RequestID
ftsJob.OperationID = operation.OperationID
ftsJob.SourceSE = source
ftsJob.TargetSE = target
ftsJob.SourceToken = sourceToken["Value"].get( "SpaceToken", "" )
ftsJob.TargetToken = targetToken["Value"].get( "SpaceToken", "" )
ftsJob.FTSServer = route.ftsServer
for ftsFile in fileList:
ftsFile.Attempt += 1
ftsFile.Error = ""
ftsJob.addFile( ftsFile )
submit = ftsJob.submitFTS( self.__ftsVersion, command = self.SUBMIT_COMMAND, pinTime = self.PIN_TIME if seStatus['TapeSE'] else 0 )
if not submit["OK"]:
log.error( "unable to submit FTSJob:", submit["Message"] )
continue
log.info( "FTSJob '%s'@'%s' has been submitted" % ( ftsJob.FTSGUID, ftsJob.FTSServer ) )
# # update statuses for job files
for ftsFile in ftsJob:
ftsFile.FTSGUID = ftsJob.FTSGUID
ftsFile.Status = "Submitted"
ftsFile.Attempt += 1
# # update placement route
try:
self.updateLock().acquire()
self.__ftsPlacement.startTransferOnRoute( route )
finally:
self.updateLock().release()
ftsJobs.append( ftsJob )
log.info( "%s new FTSJobs have been submitted" % len( ftsJobs ) )
return S_OK( ftsJobs )
def __monitorJob( self, request, ftsJob ):
""" execute FTSJob.monitorFTS for a given :ftsJob:
if ftsJob is in a final state, finalize it
:param Request request: ReqDB.Request instance
:param FTSJob ftsJob: FTSDB.FTSJob instance
"""
log = self.log.getSubLogger( "req_%s/%s/monitor/%s" % ( request.RequestID, request.RequestName, ftsJob.FTSGUID ) )
log.info( "FTSJob '%s'@'%s'" % ( ftsJob.FTSGUID, ftsJob.FTSServer ) )
# # this will be returned
ftsFilesDict = dict( [ ( k, list() ) for k in ( "toRegister", "toSubmit", "toFail", "toReschedule", "toUpdate" ) ] )
monitor = ftsJob.monitorFTS( self.__ftsVersion , command = self.MONITOR_COMMAND )
if not monitor["OK"]:
gMonitor.addMark( "FTSMonitorFail", 1 )
log.error( monitor["Message"] )
if "getTransferJobSummary2: Not authorised to query request" in monitor["Message"] or \
'was not found' in monitor['Message'] or\
"Not found" in monitor['Message'] or\
'Unknown transfer state' in monitor['Message']:
log.error( "FTSJob not known (expired on server?): delete it" )
for ftsFile in ftsJob:
ftsFile.Status = "Waiting"
ftsFilesDict["toSubmit"].append( ftsFile )
# # No way further for that job: delete it
res = self.ftsClient().deleteFTSJob( ftsJob.FTSJobID )
if not res['OK']:
log.error( "Unable to delete FTSJob", res['Message'] )
return S_OK( ftsFilesDict )
return monitor
monitor = monitor["Value"]
log.info( "FTSJob Status = %s Completeness = %s" % ( ftsJob.Status, ftsJob.Completeness ) )
# # monitor status change
gMonitor.addMark( "FTSJobs%s" % ftsJob.Status, 1 )
if ftsJob.Status in FTSJob.FINALSTATES:
finalizeFTSJob = self.__finalizeFTSJob( request, ftsJob )
if not finalizeFTSJob["OK"]:
if 'Unknown transfer state' in finalizeFTSJob['Message']:
for ftsFile in ftsJob:
ftsFile.Status = "Waiting"
ftsFilesDict["toSubmit"].append( ftsFile )
# # No way further for that job: delete it
res = self.ftsClient().deleteFTSJob( ftsJob.FTSJobID )
if not res['OK']:
log.error( "Unable to delete FTSJob", res['Message'] )
else:
log.error( finalizeFTSJob["Message"] )
return finalizeFTSJob
else:
ftsFilesDict = self.updateFTSFileDict( ftsFilesDict, finalizeFTSJob["Value"] )
return S_OK( ftsFilesDict )
def __finalizeFTSJob( self, request, ftsJob ):
""" finalize FTSJob
:param Request request: ReqDB.Request instance
:param FTSJob ftsJob: FTSDB.FTSJob instance
"""
log = self.log.getSubLogger( "req_%s/%s/monitor/%s/finalize" % ( request.RequestID,
request.RequestName,
ftsJob.FTSJobID ) )
log.info( "finalizing FTSJob %s@%s" % ( ftsJob.FTSGUID, ftsJob.FTSServer ) )
# # this will be returned
ftsFilesDict = dict( [ ( k, list() ) for k in ( "toRegister", "toSubmit", "toFail", "toReschedule", "toUpdate" ) ] )
monitor = ftsJob.monitorFTS( self.__ftsVersion, command = self.MONITOR_COMMAND, full = True )
if not monitor["OK"]:
log.error( monitor["Message"] )
return monitor
# # split FTSFiles to different categories
processFiles = self.__filterFiles( ftsJob )
if not processFiles["OK"]:
log.error( processFiles["Message"] )
return processFiles
processFiles = processFiles['Value']
if processFiles['toRegister']:
log.error( "Some files could not be registered in FC:", len( processFiles['toRegister'] ) )
ftsFilesDict = self.updateFTSFileDict( ftsFilesDict, processFiles )
# # send accounting record for this job
self.__sendAccounting( ftsJob, request.OwnerDN )
# # update placement - remove this job from placement
route = self.__ftsPlacement.findRoute( ftsJob.SourceSE, ftsJob.TargetSE )
if route["OK"]:
try:
self.updateLock().acquire()
self.__ftsPlacement.finishTransferOnRoute( route['Value'] )
finally:
self.updateLock().release()
log.info( "FTSJob is finalized" )
return S_OK( ftsFilesDict )
def __checkFailed( self, ftsFile ):
reschedule = False
submit = False
fail = False
if ftsFile.Status in ( "Failed", 'Canceled' ):
if ftsFile.Error == "MissingSource":
reschedule = True
else:
if ftsFile.Attempt < self.MAX_ATTEMPT:
submit = True
else:
fail = True
return reschedule, submit, fail
def __filterFiles( self, ftsJob ):
""" process ftsFiles from finished ftsJob
:param FTSJob ftsJob: monitored FTSJob instance
"""
# # lists for different categories
toUpdate = []
toReschedule = []
toRegister = []
toSubmit = []
toFail = []
# # loop over files in fts job
for ftsFile in ftsJob:
# # successful files
if ftsFile.Status == "Finished":
if ftsFile.Error == "AddCatalogReplicaFailed":
toRegister.append( ftsFile )
toUpdate.append( ftsFile )
continue
reschedule, submit, fail = self.__checkFailed( ftsFile )
if reschedule:
toReschedule.append( ftsFile )
elif submit:
toSubmit.append( ftsFile )
elif fail:
toFail.append( ftsFile )
return S_OK( { "toUpdate": toUpdate,
"toSubmit": toSubmit,
"toRegister": toRegister,
"toReschedule": toReschedule,
"toFail": toFail } )
def __insertRegisterOperation( self, request, operation, toRegister ):
""" add RegisterReplica operation
:param Request request: request instance
:param Operation transferOp: 'ReplicateAndRegister' operation for this FTSJob
:param list toRegister: [ FTSDB.FTSFile, ... ] - files that failed to register
"""
log = self.log.getSubLogger( "req_%s/%s/registerFiles" % ( request.RequestID, request.RequestName ) )
byTarget = {}
for ftsFile in toRegister:
if ftsFile.TargetSE not in byTarget:
byTarget.setdefault( ftsFile.TargetSE, [] )
byTarget[ftsFile.TargetSE].append( ftsFile )
log.info( "will create %s 'RegisterReplica' operations" % len( byTarget ) )
for target, ftsFileList in byTarget.items():
log.info( "creating 'RegisterReplica' operation for targetSE %s with %s files..." % ( target,
len( ftsFileList ) ) )
registerOperation = Operation()
registerOperation.Type = "RegisterReplica"
registerOperation.Status = "Waiting"
registerOperation.TargetSE = target
targetSE = StorageElement( target )
for ftsFile in ftsFileList:
opFile = File()
opFile.LFN = ftsFile.LFN
pfn = returnSingleResult( targetSE.getURL( ftsFile.LFN, protocol = self.registrationProtocols ) )
if not pfn["OK"]:
continue
opFile.PFN = pfn["Value"]
registerOperation.addFile( opFile )
request.insertBefore( registerOperation, operation )
return S_OK()
@staticmethod
def __sendAccounting( ftsJob, ownerDN ):
""" prepare and send DataOperation to AccouringDB """
dataOp = DataOperation()
dataOp.setStartTime( fromString( ftsJob.SubmitTime ) )
dataOp.setEndTime( fromString( ftsJob.LastUpdate ) )
accountingDict = dict()
accountingDict["OperationType"] = "ReplicateAndRegister"
username = getUsernameForDN( ownerDN )
if not username["OK"]:
username = ownerDN
else:
username = username["Value"]
accountingDict["User"] = username
accountingDict["Protocol"] = "FTS3" if 'fts3' in ftsJob.FTSServer.lower() else 'FTS'
accountingDict['ExecutionSite'] = ftsJob.FTSServer
accountingDict['RegistrationTime'] = ftsJob._regTime
accountingDict['RegistrationOK'] = ftsJob._regSuccess
accountingDict['RegistrationTotal'] = ftsJob._regTotal
accountingDict["TransferOK"] = len( [ f for f in ftsJob if f.Status in FTSFile.SUCCESS_STATES ] )
accountingDict["TransferTotal"] = len( ftsJob )
accountingDict["TransferSize"] = ftsJob.Size - ftsJob.FailedSize
accountingDict["FinalStatus"] = ftsJob.Status
accountingDict["Source"] = ftsJob.SourceSE
accountingDict["Destination"] = ftsJob.TargetSE
# dt = ftsJob.LastUpdate - ftsJob.SubmitTime
# transferTime = dt.days * 86400 + dt.seconds
# accountingDict["TransferTime"] = transferTime
accountingDict['TransferTime'] = sum( [int( f._duration ) for f in ftsJob if f.Status in FTSFile.SUCCESS_STATES ] )
dataOp.setValuesFromDict( accountingDict )
dataOp.commit()
def __checkReadyReplicas( self, request, operation ):
""" check ready replicas for transferOperation """
log = self.log.getSubLogger( "req_%s/%s/checkReadyReplicas" % ( request.RequestID, request.RequestName ) )
targetSESet = set( operation.targetSEList )
# # { LFN: [ targetSE, ... ] }
missingReplicas = {}
scheduledFiles = dict( [ ( opFile.LFN, opFile ) for opFile in operation
if opFile.Status in ( "Scheduled", "Waiting" ) ] )
# # get replicas
replicas = FileCatalog().getReplicas( scheduledFiles.keys() )
if not replicas["OK"]:
self.log.error( replicas["Message"] )
return replicas
replicas = replicas["Value"]
fullyReplicated = 0
missingSEs = {}
for successfulLFN in replicas["Successful"]:
reps = set( replicas['Successful'][successfulLFN] )
if targetSESet.issubset( reps ):
log.verbose( "%s has been replicated to all targets" % successfulLFN )
fullyReplicated += 1
scheduledFiles[successfulLFN].Status = "Done"
else:
missingReplicas[successfulLFN] = sorted( targetSESet - reps )
ses = ",".join( missingReplicas[ successfulLFN ] )
missingSEs[ses] = missingSEs.setdefault( ses, 0 ) + 1
log.verbose( "%s is still missing at %s" % ( successfulLFN, ses ) )
if fullyReplicated:
log.info( "%d new files have been replicated to all targets" % fullyReplicated )
if missingSEs:
for ses in missingSEs:
log.info( "%d replicas still missing at %s" % ( missingSEs[ses], ses ) )
reMissing = re.compile( "no such file or directory" )
for failedLFN, errStr in replicas["Failed"].items():
scheduledFiles[failedLFN].Error = errStr
if reMissing.search( errStr.lower() ):
log.error( "%s is missing, setting its status to 'Failed'" % failedLFN )
scheduledFiles[failedLFN].Status = "Failed"
else:
log.warn( "unable to read replicas for %s: %s" % ( failedLFN, errStr ) )
return S_OK( missingReplicas )
def __filterReplicas( self, opFile ):
""" filter out banned/invalid source SEs """
from DIRAC.DataManagementSystem.Agent.RequestOperations.ReplicateAndRegister import filterReplicas
return filterReplicas( opFile, logger = self.log, dataManager = self.dataManager )
|
vmendez/DIRAC
|
DataManagementSystem/Agent/FTSAgent.py
|
Python
|
gpl-3.0
| 47,236
|
[
"DIRAC"
] |
89cff7b744d89e5a41fce40a118a592388b55994f72fb758d6e1845cf450fcc7
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'vtk_tools_layout.ui'
#
# Created: Fri Mar 10 10:39:40 2017
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_vtk_tools(object):
def setupUi(self, vtk_tools):
vtk_tools.setObjectName("vtk_tools")
vtk_tools.resize(422, 471)
self.verticalLayout_9 = QtGui.QVBoxLayout(vtk_tools)
self.verticalLayout_9.setObjectName("verticalLayout_9")
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.verticalLayout_4 = QtGui.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_rec = QtGui.QLabel(vtk_tools)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_rec.sizePolicy().hasHeightForWidth())
self.label_rec.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_rec.setFont(font)
self.label_rec.setToolTip("")
self.label_rec.setObjectName("label_rec")
self.horizontalLayout.addWidget(self.label_rec)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.radioButton_abs = QtGui.QRadioButton(vtk_tools)
self.radioButton_abs.setChecked(True)
self.radioButton_abs.setObjectName("radioButton_abs")
self.horizontalLayout.addWidget(self.radioButton_abs)
self.radioButton_rel = QtGui.QRadioButton(vtk_tools)
self.radioButton_rel.setObjectName("radioButton_rel")
self.horizontalLayout.addWidget(self.radioButton_rel)
self.verticalLayout_4.addLayout(self.horizontalLayout)
self.verticalLayout_6 = QtGui.QVBoxLayout()
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.label_3 = QtGui.QLabel(vtk_tools)
self.label_3.setObjectName("label_3")
self.verticalLayout_6.addWidget(self.label_3)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.lineEdit_vg = QtGui.QLineEdit(vtk_tools)
self.lineEdit_vg.setObjectName("lineEdit_vg")
self.horizontalLayout_2.addWidget(self.lineEdit_vg)
self.pushButton_vg = QtGui.QPushButton(vtk_tools)
self.pushButton_vg.setObjectName("pushButton_vg")
self.horizontalLayout_2.addWidget(self.pushButton_vg)
self.verticalLayout_6.addLayout(self.horizontalLayout_2)
self.verticalLayout_4.addLayout(self.verticalLayout_6)
self.verticalLayout_7 = QtGui.QVBoxLayout()
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.label_4 = QtGui.QLabel(vtk_tools)
self.label_4.setObjectName("label_4")
self.verticalLayout_7.addWidget(self.label_4)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.lineEdit_vgref = QtGui.QLineEdit(vtk_tools)
self.lineEdit_vgref.setEnabled(False)
self.lineEdit_vgref.setObjectName("lineEdit_vgref")
self.horizontalLayout_4.addWidget(self.lineEdit_vgref)
self.pushButton_vgref = QtGui.QPushButton(vtk_tools)
self.pushButton_vgref.setEnabled(False)
self.pushButton_vgref.setObjectName("pushButton_vgref")
self.horizontalLayout_4.addWidget(self.pushButton_vgref)
self.verticalLayout_7.addLayout(self.horizontalLayout_4)
self.verticalLayout_8 = QtGui.QVBoxLayout()
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.verticalLayout_7.addLayout(self.verticalLayout_8)
self.label_5 = QtGui.QLabel(vtk_tools)
self.label_5.setObjectName("label_5")
self.verticalLayout_7.addWidget(self.label_5)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.lineEdit_vgout = QtGui.QLineEdit(vtk_tools)
self.lineEdit_vgout.setText("")
self.lineEdit_vgout.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lineEdit_vgout.setObjectName("lineEdit_vgout")
self.horizontalLayout_3.addWidget(self.lineEdit_vgout)
self.pushButton_vtkout = QtGui.QPushButton(vtk_tools)
self.pushButton_vtkout.setObjectName("pushButton_vtkout")
self.horizontalLayout_3.addWidget(self.pushButton_vtkout)
self.pushButton_parav = QtGui.QPushButton(vtk_tools)
self.pushButton_parav.setObjectName("pushButton_parav")
self.horizontalLayout_3.addWidget(self.pushButton_parav)
self.verticalLayout_7.addLayout(self.horizontalLayout_3)
self.start_vg = QtGui.QPushButton(vtk_tools)
self.start_vg.setEnabled(False)
self.start_vg.setObjectName("start_vg")
self.verticalLayout_7.addWidget(self.start_vg)
self.verticalLayout_4.addLayout(self.verticalLayout_7)
self.verticalLayout.addLayout(self.verticalLayout_4)
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.line = QtGui.QFrame(vtk_tools)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout_2.addWidget(self.line)
self.label_src = QtGui.QLabel(vtk_tools)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_src.sizePolicy().hasHeightForWidth())
self.label_src.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_src.setFont(font)
self.label_src.setObjectName("label_src")
self.verticalLayout_2.addWidget(self.label_src)
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label = QtGui.QLabel(vtk_tools)
self.label.setObjectName("label")
self.verticalLayout_3.addWidget(self.label)
self.horizontalLayout_6 = QtGui.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.lineEdit_rays = QtGui.QLineEdit(vtk_tools)
self.lineEdit_rays.setObjectName("lineEdit_rays")
self.horizontalLayout_6.addWidget(self.lineEdit_rays)
self.pushButton_rays = QtGui.QPushButton(vtk_tools)
self.pushButton_rays.setObjectName("pushButton_rays")
self.horizontalLayout_6.addWidget(self.pushButton_rays)
self.verticalLayout_3.addLayout(self.horizontalLayout_6)
self.verticalLayout_2.addLayout(self.verticalLayout_3)
self.verticalLayout_5 = QtGui.QVBoxLayout()
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.label_2 = QtGui.QLabel(vtk_tools)
self.label_2.setObjectName("label_2")
self.verticalLayout_5.addWidget(self.label_2)
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.lineEdit_raysout = QtGui.QLineEdit(vtk_tools)
self.lineEdit_raysout.setObjectName("lineEdit_raysout")
self.horizontalLayout_5.addWidget(self.lineEdit_raysout)
self.pushButton_raysout = QtGui.QPushButton(vtk_tools)
self.pushButton_raysout.setObjectName("pushButton_raysout")
self.horizontalLayout_5.addWidget(self.pushButton_raysout)
self.verticalLayout_5.addLayout(self.horizontalLayout_5)
self.start_rays = QtGui.QPushButton(vtk_tools)
self.start_rays.setEnabled(False)
self.start_rays.setObjectName("start_rays")
self.verticalLayout_5.addWidget(self.start_rays)
self.verticalLayout_2.addLayout(self.verticalLayout_5)
self.verticalLayout.addLayout(self.verticalLayout_2)
self.verticalLayout_9.addLayout(self.verticalLayout)
self.buttonBox = QtGui.QDialogButtonBox(vtk_tools)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout_9.addWidget(self.buttonBox)
self.retranslateUi(vtk_tools)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), vtk_tools.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), vtk_tools.reject)
QtCore.QMetaObject.connectSlotsByName(vtk_tools)
def retranslateUi(self, vtk_tools):
vtk_tools.setWindowTitle(QtGui.QApplication.translate("vtk_tools", "VTK tools", None, QtGui.QApplication.UnicodeUTF8))
self.label_rec.setText(QtGui.QApplication.translate("vtk_tools", "Velocity grid to VTK", None, QtGui.QApplication.UnicodeUTF8))
self.radioButton_abs.setToolTip(QtGui.QApplication.translate("vtk_tools", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Sans Serif\'; font-size:9pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans\'; font-size:10pt;\">Create vtk velocity model with absolute velocities. </span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Sans\'; font-size:10pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans\'; font-size:10pt;\">Inputfile: </span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Sans\'; font-size:10pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans\'; font-size:10pt;\">\'vgrids.in\' of the corresponding iteration step (e.g. iteration step 1: </span><span style=\" font-family:\'Sans\'; font-size:10pt; font-style:italic;\">$simulation_dir/it1/vgrids.in</span><span style=\" font-family:\'Sans\'; font-size:10pt;\">)</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.radioButton_abs.setText(QtGui.QApplication.translate("vtk_tools", "absolute [?]", None, QtGui.QApplication.UnicodeUTF8))
self.radioButton_rel.setToolTip(QtGui.QApplication.translate("vtk_tools", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Sans Serif\'; font-size:9pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans\'; font-size:10pt;\">Create vtk velocity model with relative velocities. </span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Sans\'; font-size:10pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans\'; font-size:10pt;\">Inputfiles: </span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Sans\'; font-size:10pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans\'; font-size:10pt;\">\'vgrids.in\' of the corresponding iteration step (e.g. iteration step 1: </span><span style=\" font-family:\'Sans\'; font-size:10pt; font-style:italic;\">$simulation_dir/it1/vgrids.in</span><span style=\" font-family:\'Sans\'; font-size:10pt;\">)</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans\'; font-size:10pt;\">\'vgridsref.in\' (</span><span style=\" font-family:\'Sans\'; font-size:10pt; font-style:italic;\">$simulation_dir/vgridsref.in</span><span style=\" font-family:\'Sans\'; font-size:10pt;\">)</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.radioButton_rel.setText(QtGui.QApplication.translate("vtk_tools", "relative [?]", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("vtk_tools", "Browse for velocity grid file (\'vgrids.in\'):", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_vg.setText(QtGui.QApplication.translate("vtk_tools", "Browse", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("vtk_tools", "Browse for reference velocity grid file (\'vgridsref.in\'):", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_vgref.setText(QtGui.QApplication.translate("vtk_tools", "Browse", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("vtk_tools", "Output Filename:", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_vtkout.setText(QtGui.QApplication.translate("vtk_tools", "Browse", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_parav.setToolTip(QtGui.QApplication.translate("vtk_tools", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Sans\'; font-size:10pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Call Paraview (Shell command: <span style=\" font-style:italic;\">paraview</span>) for the specified output filename in the current directory.</p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_parav.setText(QtGui.QApplication.translate("vtk_tools", "<- Paraview", None, QtGui.QApplication.UnicodeUTF8))
self.start_vg.setText(QtGui.QApplication.translate("vtk_tools", "Start", None, QtGui.QApplication.UnicodeUTF8))
self.label_src.setToolTip(QtGui.QApplication.translate("vtk_tools", "Create VTK files from the FMTOMO output file \'rays.dat\'.\n"
"This will generate one file of ray paths for each individual source.", None, QtGui.QApplication.UnicodeUTF8))
self.label_src.setText(QtGui.QApplication.translate("vtk_tools", "Rays to VTK [?]", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("vtk_tools", "Browse for input file (\'rays.dat\'):", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_rays.setText(QtGui.QApplication.translate("vtk_tools", "Browse", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("vtk_tools", "Specify output directory:", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_raysout.setText(QtGui.QApplication.translate("vtk_tools", "Browse", None, QtGui.QApplication.UnicodeUTF8))
self.start_rays.setText(QtGui.QApplication.translate("vtk_tools", "Start", None, QtGui.QApplication.UnicodeUTF8))
|
seismology-RUB/ASP3D
|
asp3d/gui/layouts/vtk_tools_layout.py
|
Python
|
gpl-3.0
| 16,446
|
[
"ParaView",
"VTK"
] |
e88255c00b85051d251c135608519521f8f2d58f5b825e1a95886d065ce966e8
|
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Sales Pipeline"),
"icon": "fa fa-star",
"items": [
{
"type": "doctype",
"name": "Lead",
"description": _("Database of potential customers."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Opportunity",
"description": _("Potential opportunities for selling."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Customer",
"description": _("Customer database."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Contact",
"description": _("All Contacts."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Communication",
"description": _("Record of all communications of type email, phone, chat, visit, etc."),
},
{
"type": "doctype",
"name": "Lead Source",
"description": _("Track Leads by Lead Source.")
},
]
},
{
"label": _("Reports"),
"icon": "fa fa-list",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Lead Details",
"doctype": "Lead",
"onboard": 1,
},
{
"type": "page",
"name": "sales-funnel",
"label": _("Sales Funnel"),
"icon": "fa fa-bar-chart",
"onboard": 1,
},
{
"type": "report",
"name": "Prospects Engaged But Not Converted",
"doctype": "Lead",
"is_query_report": True,
"onboard": 1,
},
{
"type": "report",
"name": "Minutes to First Response for Opportunity",
"doctype": "Opportunity",
"is_query_report": True,
"dependencies": ["Opportunity"]
},
{
"type": "report",
"is_query_report": True,
"name": "Customer Addresses And Contacts",
"doctype": "Contact",
"dependencies": ["Customer"]
},
{
"type": "report",
"is_query_report": True,
"name": "Inactive Customers",
"doctype": "Sales Order",
"dependencies": ["Sales Order"]
},
{
"type": "report",
"is_query_report": True,
"name": "Campaign Efficiency",
"doctype": "Lead",
"dependencies": ["Lead"]
},
{
"type": "report",
"is_query_report": True,
"name": "Lead Owner Efficiency",
"doctype": "Lead",
"dependencies": ["Lead"]
}
]
},
{
"label": _("Settings"),
"icon": "fa fa-cog",
"items": [
{
"type": "doctype",
"label": _("Customer Group"),
"name": "Customer Group",
"icon": "fa fa-sitemap",
"link": "Tree/Customer Group",
"description": _("Manage Customer Group Tree."),
"onboard": 1,
},
{
"type": "doctype",
"label": _("Territory"),
"name": "Territory",
"icon": "fa fa-sitemap",
"link": "Tree/Territory",
"description": _("Manage Territory Tree."),
"onboard": 1,
},
{
"type": "doctype",
"label": _("Sales Person"),
"name": "Sales Person",
"icon": "fa fa-sitemap",
"link": "Tree/Sales Person",
"description": _("Manage Sales Person Tree."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Campaign",
"description": _("Sales campaigns."),
},
{
"type": "doctype",
"name": "SMS Center",
"description":_("Send mass SMS to your contacts"),
},
{
"type": "doctype",
"name": "SMS Log",
"description":_("Logs for maintaining sms delivery status"),
},
{
"type": "doctype",
"name": "SMS Settings",
"description": _("Setup SMS gateway settings")
}
]
},
{
"label": _("Maintenance"),
"icon": "fa fa-star",
"items": [
{
"type": "doctype",
"name": "Maintenance Schedule",
"description": _("Plan for maintenance visits."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Maintenance Visit",
"description": _("Visit report for maintenance call."),
},
{
"type": "report",
"name": "Maintenance Schedules",
"is_query_report": True,
"doctype": "Maintenance Schedule"
},
{
"type": "doctype",
"name": "Warranty Claim",
"description": _("Warranty Claim against Serial No."),
},
]
},
# {
# "label": _("Help"),
# "items": [
# {
# "type": "help",
# "label": _("Lead to Quotation"),
# "youtube_id": "TxYX4r4JAKA"
# },
# {
# "type": "help",
# "label": _("Newsletters"),
# "youtube_id": "muLKsCrrDRo"
# },
# ]
# },
]
|
brownharryb/erpnext
|
erpnext/config/crm.py
|
Python
|
gpl-3.0
| 4,525
|
[
"VisIt"
] |
d93e6b212ad14793424dd5f1c68e09711bc6419f6508cce19284afabaecfd439
|
##
# Copyright 2009-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Quantum ESPRESSO, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import fileinput
import os
import re
import shutil
import sys
import shlex
from distutils.version import LooseVersion
from subprocess import Popen
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
class EB_QuantumESPRESSO(ConfigureMake):
"""Support for building and installing Quantum ESPRESSO."""
@staticmethod
def extra_options():
"""Custom easyconfig parameters for Quantum ESPRESSO."""
extra_vars = {
'hybrid': [False, "Enable hybrid build (with OpenMP)", CUSTOM],
'with_scalapack': [True, "Enable ScaLAPACK support", CUSTOM],
}
return ConfigureMake.extra_options(extra_vars)
def __init__(self, *args, **kwargs):
"""Add extra config options specific to Quantum ESPRESSO."""
super(EB_QuantumESPRESSO, self).__init__(*args, **kwargs)
self.build_in_installdir = True
if LooseVersion(self.version) >= LooseVersion("6"):
self.install_subdir = "qe-%s" % self.version
else:
self.install_subdir = "espresso-%s" % self.version
def patch_step(self):
"""Patch files from build dir (not start dir)."""
super(EB_QuantumESPRESSO, self).patch_step(beginpath=self.builddir)
def configure_step(self):
"""Custom configuration procedure for Quantum ESPRESSO."""
if self.toolchain.options.get('openmp', False) or self.cfg['hybrid']:
self.cfg.update('configopts', '--enable-openmp')
if not self.toolchain.options.get('usempi', None):
self.cfg.update('configopts', '--disable-parallel')
if not self.cfg['with_scalapack']:
self.cfg.update('configopts', '--without-scalapack')
repls = []
if self.toolchain.comp_family() in [toolchain.INTELCOMP]:
# set preprocessor command (-E to stop after preprocessing, -C to preserve comments)
cpp = "%s -E -C" % os.getenv('CC')
repls.append(('CPP', cpp, False))
env.setvar('CPP', cpp)
# also define $FCCPP, but do *not* include -C (comments should not be preserved when preprocessing Fortran)
env.setvar('FCCPP', "%s -E" % os.getenv('CC'))
super(EB_QuantumESPRESSO, self).configure_step()
# compose list of DFLAGS (flag, value, keep_stuff)
# for guidelines, see include/defs.h.README in sources
dflags = []
comp_fam_dflags = {
toolchain.INTELCOMP: '-D__INTEL',
toolchain.GCC: '-D__GFORTRAN -D__STD_F95',
}
dflags.append(comp_fam_dflags[self.toolchain.comp_family()])
if self.toolchain.options.get('openmp', False):
libfft = os.getenv('LIBFFT_MT')
else:
libfft = os.getenv('LIBFFT')
if libfft:
if "fftw3" in libfft:
dflags.append('-D__FFTW3')
else:
dflags.append('-D__FFTW')
env.setvar('FFTW_LIBS', libfft)
if get_software_root('ACML'):
dflags.append('-D__ACML')
if self.toolchain.options.get('usempi', None):
dflags.append('-D__MPI -D__PARA')
if self.toolchain.options.get('openmp', False) or self.cfg['hybrid']:
dflags.append(" -D__OPENMP")
if self.cfg['with_scalapack']:
dflags.append(" -D__SCALAPACK")
# always include -w to supress warnings
dflags.append('-w')
repls.append(('DFLAGS', ' '.join(dflags), False))
# complete C/Fortran compiler and LD flags
if self.toolchain.options.get('openmp', False) or self.cfg['hybrid']:
repls.append(('LDFLAGS', self.toolchain.get_flag('openmp'), True))
repls.append(('(?:C|F90|F)FLAGS', self.toolchain.get_flag('openmp'), True))
# obtain library settings
libs = []
for lib in ['BLAS', 'LAPACK', 'FFT', 'SCALAPACK']:
if self.toolchain.options.get('openmp', False):
val = os.getenv('LIB%s_MT' % lib)
else:
val = os.getenv('LIB%s' % lib)
repls.append(('%s_LIBS' % lib, val, False))
libs.append(val)
libs = ' '.join(libs)
repls.append(('BLAS_LIBS_SWITCH', 'external', False))
repls.append(('LAPACK_LIBS_SWITCH', 'external', False))
repls.append(('LD_LIBS', os.getenv('LIBS'), False))
self.log.debug("List of replacements to perform: %s" % repls)
if LooseVersion(self.version) >= LooseVersion("6"):
make_ext = '.inc'
else:
make_ext = '.sys'
# patch make.sys file
fn = os.path.join(self.cfg['start_dir'], 'make' + make_ext)
try:
for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):
for (k, v, keep) in repls:
# need to use [ \t]* instead of \s*, because vars may be undefined as empty,
# and we don't want to include newlines
if keep:
line = re.sub(r"^(%s\s*=[ \t]*)(.*)$" % k, r"\1\2 %s" % v, line)
else:
line = re.sub(r"^(%s\s*=[ \t]*).*$" % k, r"\1%s" % v, line)
# fix preprocessing directives for .f90 files in make.sys if required
if self.toolchain.comp_family() in [toolchain.GCC]:
line = re.sub(r"\$\(MPIF90\) \$\(F90FLAGS\) -c \$<",
"$(CPP) -C $(CPPFLAGS) $< -o $*.F90\n" +
"\t$(MPIF90) $(F90FLAGS) -c $*.F90 -o $*.o",
line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s: %s", fn, err)
self.log.debug("Contents of patched %s: %s" % (fn, open(fn, "r").read()))
# patch default make.sys for wannier
if LooseVersion(self.version) >= LooseVersion("5"):
fn = os.path.join(self.cfg['start_dir'], 'install', 'make_wannier90' + make_ext)
else:
fn = os.path.join(self.cfg['start_dir'], 'plugins', 'install', 'make_wannier90.sys')
try:
for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):
line = re.sub(r"^(LIBS\s*=\s*).*", r"\1%s" % libs, line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s: %s", fn, err)
self.log.debug("Contents of patched %s: %s" % (fn, open(fn, "r").read()))
# patch Makefile of want plugin
wantprefix = 'want-'
wantdirs = [d for d in os.listdir(self.builddir) if d.startswith(wantprefix)]
if len(wantdirs) > 1:
raise EasyBuildError("Found more than one directory with %s prefix, help!", wantprefix)
if len(wantdirs) != 0:
wantdir = os.path.join(self.builddir, wantdirs[0])
make_sys_in_path = None
cand_paths = [os.path.join('conf', 'make.sys.in'), os.path.join('config', 'make.sys.in')]
for path in cand_paths:
full_path = os.path.join(wantdir, path)
if os.path.exists(full_path):
make_sys_in_path = full_path
break
if make_sys_in_path is None:
raise EasyBuildError("Failed to find make.sys.in in want directory %s, paths considered: %s",
wantdir, ', '.join(cand_paths))
try:
for line in fileinput.input(make_sys_in_path, inplace=1, backup='.orig.eb'):
# fix preprocessing directives for .f90 files in make.sys if required
if self.toolchain.comp_family() in [toolchain.GCC]:
line = re.sub("@f90rule@",
"$(CPP) -C $(CPPFLAGS) $< -o $*.F90\n" +
"\t$(MPIF90) $(F90FLAGS) -c $*.F90 -o $*.o",
line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s: %s", fn, err)
# move non-espresso directories to where they're expected and create symlinks
try:
dirnames = [d for d in os.listdir(self.builddir) if not d == self.install_subdir]
targetdir = os.path.join(self.builddir, self.install_subdir)
for dirname in dirnames:
shutil.move(os.path.join(self.builddir, dirname), os.path.join(targetdir, dirname))
self.log.info("Moved %s into %s" % (dirname, targetdir))
dirname_head = dirname.split('-')[0]
# Handle the case where the directory is preceded by 'qe-'
if dirname_head == 'qe':
dirname_head = dirname.split('-')[1]
linkname = None
if dirname_head == 'sax':
linkname = 'SaX'
if dirname_head == 'wannier90':
linkname = 'W90'
elif dirname_head in ['gipaw', 'plumed', 'want', 'yambo']:
linkname = dirname_head.upper()
if linkname:
os.symlink(os.path.join(targetdir, dirname), os.path.join(targetdir, linkname))
except OSError, err:
raise EasyBuildError("Failed to move non-espresso directories: %s", err)
def build_step(self):
self.log.info("Started build step")#. saving current directory")
#cwd = os.getcwd()
#self.log.info("Current directory is "+cwd+". Getting target directory")
self.log.info("Getting target directory")
targetdir = os.path.join(self.builddir, self.install_subdir)
self.log.info("fixing directory structure")
wrongdir = os.path.join(targetdir,"q-e-qe-6.2.1")
for filename in os.listdir(wrongdir):
shutil.move(os.path.join(wrongdir,filename),os.path.join(targetdir,filename))
os.rmdir(wrongdir)
self.log.info("Switching to target directory "+targetdir)
os.chdir(targetdir)
args = shlex.split("make "+self.cfg["buildopts"])
sys.stdout.write("== running "+str(args)+" in directory "+targetdir+"\n")
self.log.info("Calling make with options in config buildopts")
p = Popen(args)
p.communicate()
#heck_call(["make",self.cfg["buildopts"]], shell=True)
#self.log.info("returning to original directory "+cwd)
#os.chdir(cwd)
def install_step(self):
"""Skip install step, since we're building in the install directory."""
pass
def sanity_check_step(self):
"""Custom sanity check for Quantum ESPRESSO."""
# build list of expected binaries based on make targets
bins = ["iotk", "iotk.x", "iotk_print_kinds.x"]
if 'cp' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
bins.extend(["cp.x", "cppp.x", "wfdd.x"])
if 'gww' in self.cfg['buildopts']: # only for v4.x, not in v5.0 anymore
bins.extend(["gww_fit.x", "gww.x", "head.x", "pw4gww.x"])
if 'ld1' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
bins.extend(["ld1.x"])
if 'gipaw' in self.cfg['buildopts']:
bins.extend(["gipaw.x"])
if 'neb' in self.cfg['buildopts'] or 'pwall' in self.cfg['buildopts'] or \
'all' in self.cfg['buildopts']:
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["neb.x", "path_interpolation.x"])
if 'ph' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
bins.extend(["dynmat.x", "lambda.x", "matdyn.x", "ph.x", "phcg.x", "q2r.x"])
if LooseVersion(self.version) < LooseVersion("6"):
bins.extend(["d3.x"])
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["fqha.x", "q2qstar.x"])
if 'pp' in self.cfg['buildopts'] or 'pwall' in self.cfg['buildopts'] or \
'all' in self.cfg['buildopts']:
bins.extend(["average.x", "bands.x", "dos.x", "epsilon.x", "initial_state.x",
"plan_avg.x", "plotband.x", "plotproj.x", "plotrho.x", "pmw.x", "pp.x",
"projwfc.x", "sumpdos.x", "pw2wannier90.x", "pw_export.x", "pw2gw.x",
"wannier_ham.x", "wannier_plot.x"])
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["pw2bgw.x", "bgw2pw.x"])
else:
bins.extend(["pw2casino.x"])
if 'pw' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
bins.extend(["dist.x", "ev.x", "kpoints.x", "pw.x", "pwi2xsf.x"])
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["generate_vdW_kernel_table.x"])
else:
bins.extend(["path_int.x"])
if LooseVersion(self.version) < LooseVersion("5.3.0"):
bins.extend(["band_plot.x", "bands_FS.x", "kvecs_FS.x"])
if 'pwcond' in self.cfg['buildopts'] or 'pwall' in self.cfg['buildopts'] or \
'all' in self.cfg['buildopts']:
bins.extend(["pwcond.x"])
if 'tddfpt' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["turbo_lanczos.x", "turbo_spectrum.x"])
upftools = []
if 'upf' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
upftools = ["casino2upf.x", "cpmd2upf.x", "fhi2upf.x", "fpmd2upf.x", "ncpp2upf.x",
"oldcp2upf.x", "read_upf_tofile.x", "rrkj2upf.x", "uspp2upf.x", "vdb2upf.x",
"virtual.x"]
if LooseVersion(self.version) > LooseVersion("5"):
upftools.extend(["interpolate.x", "upf2casino.x"])
if 'vdw' in self.cfg['buildopts']: # only for v4.x, not in v5.0 anymore
bins.extend(["vdw.x"])
if 'w90' in self.cfg['buildopts']:
bins.extend(["wannier90.x"])
want_bins = []
if 'want' in self.cfg['buildopts']:
want_bins = ["bands.x", "blc2wan.x", "conductor.x", "current.x", "disentangle.x",
"dos.x", "gcube2plt.x", "kgrid.x", "midpoint.x", "plot.x", "sumpdos",
"wannier.x", "wfk2etsf.x"]
if LooseVersion(self.version) > LooseVersion("5"):
want_bins.extend(["cmplx_bands.x", "decay.x", "sax2qexml.x", "sum_sgm.x"])
if 'xspectra' in self.cfg['buildopts']:
bins.extend(["xspectra.x"])
yambo_bins = []
if 'yambo' in self.cfg['buildopts']:
yambo_bins = ["a2y", "p2y", "yambo", "ypp"]
pref = self.install_subdir
custom_paths = {
'files': [os.path.join(pref, 'bin', x) for x in bins] +
[os.path.join(pref, 'upftools', x) for x in upftools] +
[os.path.join(pref, 'WANT', 'bin', x) for x in want_bins] +
[os.path.join(pref, 'YAMBO', 'bin', x) for x in yambo_bins],
'dirs': [os.path.join(pref, 'include')]
}
super(EB_QuantumESPRESSO, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Custom path suggestions for Quantum ESPRESSO."""
guesses = super(EB_QuantumESPRESSO, self).make_module_req_guess()
# order matters here, 'bin' should be *last* in this list to ensure it gets prepended to $PATH last,
# so it gets preference over the others
# this is important since some binaries are available in two places (e.g. dos.x in both bin and WANT/bin)
bindirs = ['upftools', 'WANT/bin', 'YAMBO/bin', 'bin']
guesses.update({
'PATH': [os.path.join(self.install_subdir, bindir) for bindir in bindirs],
'CPATH': [os.path.join(self.install_subdir, 'include')],
})
return guesses
|
qldhpc/eb_local
|
eb_blocks/q/quantumespresso.py
|
Python
|
apache-2.0
| 17,565
|
[
"ESPResSo",
"Quantum ESPRESSO",
"Wannier90",
"Yambo"
] |
79fb572dd8a8235f01d7bf6a29393ca341997fa04ee5e61981981e736149b8f3
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 25 11:20:57 2019
@author: butenko
"""
import os
import fileinput
def paste_to_hoc_python3(axonnodes,axoninter,axontotal,v_init,steps_per_ms):
#end=" "
#print(os.path.dirname(os.path.realpath(__file__)))
NNODES_line="NNODES ="
NNODES_input="NNODES = {}\n".format(axonnodes)
axonnodes_line="axonnodes="
axonnodes_input="axonnodes={}\n".format(axonnodes)
nv_init_line="v_init="
nv_init_input="v_init={}\n".format(v_init) #normally, -80mv
steps_per_ms_line="steps_per_ms="
steps_per_ms_input="steps_per_ms={}\n".format(steps_per_ms)
#trial_line="Tis_max"
#input_line="Tis_max={}\n".format(Tis_max)
#x = fileinput.input(files="axon4pyfull.hoc", inplace=1)
x = fileinput.input(files="init_B5_extracellular.hoc", inplace=1)
for line in x:
if line.startswith(axonnodes_line):
line = axonnodes_input
if line.startswith(nv_init_line):
line = nv_init_input
if line.startswith(steps_per_ms_line):
line = steps_per_ms_input
print(line,end="")
x.close()
x = fileinput.input(files="axon5.hoc", inplace=1)
for line in x:
if line.startswith(NNODES_line):
line = NNODES_input
print(line,end="")
x.close()
return True
def paste_paraview_vis_python3(Points_on_model,N_comp_in_between):
#end=" "
NPoints_line="Points_on_model"
NPoints_input="Points_on_model={}\n".format(Points_on_model) #NEURON uses ms
N_comp_in_between_line="N_comp_in_between"
N_comp_in_between_input="N_comp_in_between={}\n".format(N_comp_in_between) #NEURON uses ms
fl = fileinput.input(files="Visualization_files/Paraview_vis_axon.py", inplace=1)
for line in fl:
if line.startswith(NPoints_line):
line = NPoints_input
if line.startswith(N_comp_in_between_line):
line = N_comp_in_between_input
print(line,end="")
fl.close()
return True
|
andreashorn/lead_dbs
|
ext_libs/OSS-DBS/OSS_platform/Axon_files/Reilly2016/Parameter_insertion_python3.py
|
Python
|
gpl-3.0
| 2,066
|
[
"NEURON"
] |
a27a87c11157bc0c83c4ad38f87ecd163931b4518d41ca5dd39431482e123273
|
from __future__ import division, print_function
import warnings
import numpy as np
from scipy.special import gammainccinv
from scipy.ndimage.filters import convolve
def _inv_nchi_cdf(N, K, alpha):
"""Inverse CDF for the noncentral chi distribution
See [1]_ p.3 section 2.3"""
return gammainccinv(N * K, 1 - alpha) / K
def piesno(data, N, alpha=0.01, l=100, itermax=100, eps=1e-5, return_mask=False):
"""
Probabilistic Identification and Estimation of Noise (PIESNO).
Parameters
-----------
data : ndarray
The magnitude signals to analyse. The last dimension must contain the
same realisation of the volume, such as dMRI or fMRI data.
N : int
The number of phase array coils of the MRI scanner.
If your scanner does a SENSE reconstruction, ALWAYS use N=1, as the noise
profile is always Rician.
If your scanner does a GRAPPA reconstruction, set N as the number
of phase array coils.
alpha : float
Probabilistic estimation threshold for the gamma function.
l : int
number of initial estimates for sigma to try.
itermax : int
Maximum number of iterations to execute if convergence
is not reached.
eps : float
Tolerance for the convergence criterion. Convergence is
reached if two subsequent estimates are smaller than eps.
return_mask : bool
If True, return a mask identyfing all the pure noise voxel
that were found.
Returns
--------
sigma : float
The estimated standard deviation of the gaussian noise.
mask : ndarray (optional)
A boolean mask indicating the voxels identified as pure noise.
Note
------
This function assumes two things : 1. The data has a noisy, non-masked
background and 2. The data is a repetition of the same measurements
along the last axis, i.e. dMRI or fMRI data, not structural data like T1/T2.
This function processes the data slice by slice, as originally designed in
the paper. Use it to get a slice by slice estimation of the noise, as in
spinal cord imaging for example.
References
------------
.. [1] Koay CG, Ozarslan E and Pierpaoli C.
"Probabilistic Identification and Estimation of Noise (PIESNO):
A self-consistent approach and its applications in MRI."
Journal of Magnetic Resonance 2009; 199: 94-103.
.. [2] Koay CG, Ozarslan E and Basser PJ.
"A signal transformational framework for breaking the noise floor
and its applications in MRI."
Journal of Magnetic Resonance 2009; 197: 108-119.
"""
# This method works on a 2D array with repetitions as the third dimension,
# so process the dataset slice by slice.
if data.ndim < 3:
e_s = "This function only works on datasets of at least 3 dimensions."
raise ValueError(e_s)
if data.ndim == 4:
sigma = np.zeros(data.shape[-2], dtype=np.float32)
mask_noise = np.zeros(data.shape[:-1], dtype=np.bool)
for idx in range(data.shape[-2]):
sigma[idx], mask_noise[..., idx] = _piesno_3D(data[..., idx, :],
N, alpha=alpha,
l=l, itermax=itermax,
eps=eps, return_mask=True)
else:
sigma, mask_noise = _piesno_3D(data, N,
alpha=alpha,
l=l,
itermax=itermax,
eps=eps,
return_mask=True)
if return_mask:
return sigma, mask_noise
return sigma
def _piesno_3D(data, N, alpha=0.01, l=100, itermax=100, eps=1e-5,
return_mask=False):
"""
Probabilistic Identification and Estimation of Noise (PIESNO).
This is the slice by slice version for working on a 4D array.
Parameters
-----------
data : ndarray
The magnitude signals to analyse. The last dimension must contain the
same realisation of the volume, such as dMRI or fMRI data.
N : int
The number of phase array coils of the MRI scanner.
alpha : float (optional)
Probabilistic estimation threshold for the gamma function.
Default: 0.01.
l : int (optional)
number of initial estimates for sigma to try. Default: 100.
itermax : int (optional)
Maximum number of iterations to execute if convergence
is not reached. Default: 100.
eps : float (optional)
Tolerance for the convergence criterion. Convergence is
reached if two subsequent estimates are smaller than eps.
Default: 1e-5.
return_mask : bool (optional)
If True, return a mask identyfing all the pure noise voxel
that were found. Default: False.
Returns
--------
sigma : float
The estimated standard deviation of the gaussian noise.
mask : ndarray
A boolean mask indicating the voxels identified as pure noise.
Notes
------
This function assumes two things : 1. The data has a noisy, non-masked
background and 2. The data is a repetition of the same measurements
along the last axis, i.e. dMRI or fMRI data, not structural data like T1/T2.
References
------------
.. [1] Koay CG, Ozarslan E and Pierpaoli C.
"Probabilistic Identification and Estimation of Noise (PIESNO):
A self-consistent approach and its applications in MRI."
Journal of Magnetic Resonance 2009; 199: 94-103.
.. [2] Koay CG, Ozarslan E and Basser PJ.
"A signal transformational framework for breaking the noise floor
and its applications in MRI."
Journal of Magnetic Resonance 2009; 197: 108-119.
"""
# Get optimal quantile for N if available, else use the median.
opt_quantile = {1: 0.79681213002002,
2: 0.7306303027491917,
4: 0.6721952960782169,
8: 0.6254030432343569,
16: 0.5900487123737876,
32: 0.5641772300866416,
64: 0.5455611840489607,
128: 0.5322811923303339}
if N in opt_quantile:
q = opt_quantile[N]
else:
q = 0.5
# Initial estimation of sigma
denom = np.sqrt(2 * _inv_nchi_cdf(N, 1, q))
m = np.percentile(data, q * 100) / denom
phi = np.arange(1, l + 1) * m / l
K = data.shape[-1]
sum_m2 = np.sum(data**2, axis=-1, dtype=np.float32)
sigma = np.zeros(phi.shape, dtype=phi.dtype)
mask = np.zeros(phi.shape + data.shape[:-1])
lambda_minus = _inv_nchi_cdf(N, K, alpha/2)
lambda_plus = _inv_nchi_cdf(N, K, 1 - alpha/2)
pos = 0
max_length_omega = 0
for num, sig in enumerate(phi):
sig_prev = 0
omega_size = 1
idx = np.zeros(sum_m2.shape, dtype=np.bool)
for n in range(itermax):
if np.abs(sig - sig_prev) < eps:
break
s = sum_m2 / (2 * K * sig**2)
idx = np.logical_and(lambda_minus <= s, s <= lambda_plus)
omega = data[idx, :]
# If no point meets the criterion, exit
if omega.size == 0:
omega_size = 0
break
sig_prev = sig
# Numpy percentile must range in 0 to 100, hence q*100
sig = np.percentile(omega, q * 100) / denom
omega_size = omega.size / K
# Remember the biggest omega array as giving the optimal
# sigma amongst all initial estimates from phi
if omega_size > max_length_omega:
pos, max_length_omega = num, omega_size
sigma[num] = sig
mask[num] = idx
if return_mask:
return sigma[pos], mask[pos]
return sigma[pos]
def estimate_sigma(arr, disable_background_masking=False, N=1):
"""Standard deviation estimation from local patches
Parameters
----------
arr : 3D or 4D ndarray
The array to be estimated
disable_background_masking : bool, default False
If True, uses all voxels for the estimation, otherwise, only non-zeros
voxels are used. Useful if the background is masked by the scanner.
N : int, default 1
Number of coils of the receiver array. Use N = 1 in case of a SENSE
reconstruction (Philips scanners) or the number of coils for a GRAPPA
reconstruction (Siemens and GE). See [1] for more information.
Returns
-------
sigma : ndarray
standard deviation of the noise, one estimation per volume.
Note
-------
This function is the same as manually taking the standard deviation of the
background and gives one value for the whole 3D array.
It also includes the coil-dependent correction factor of Koay 2006
(see [1]_, equation 18) with theta = 0.
Since this function was introduced in [2]_ for T1 imaging,
it is expected to perform ok on diffusion MRI data, but might oversmooth
some regions and leave others un-denoised for spatially varying noise profiles.
Consider using :func:`piesno` to estimate sigma instead if visual inacuracies
are apparent in the denoised result.
Reference
-------
.. [1] Koay, C. G., & Basser, P. J. (2006). Analytically exact correction
scheme for signal extraction from noisy magnitude MR signals.
Journal of Magnetic Resonance), 179(2), 317-22.
.. [2] Coupe, P., Yger, P., Prima, S., Hellier, P., Kervrann, C., Barillot, C., 2008.
An optimized blockwise nonlocal means denoising filter for 3-D magnetic
resonance images, IEEE Trans. Med. Imaging 27, 425-41.
"""
k = np.zeros((3, 3, 3), dtype=np.int8)
k[0, 1, 1] = 1
k[2, 1, 1] = 1
k[1, 0, 1] = 1
k[1, 2, 1] = 1
k[1, 1, 0] = 1
k[1, 1, 2] = 1
# Precomputed factor from Koay 2006, this corrects the bias of magnitude image
correction_factor = {1: 0.42920367320510366,
4: 0.4834941393603609,
6: 0.4891759468548269,
8: 0.49195420135894175,
12: 0.4946862482541263,
16: 0.4960339908122364,
20: 0.4968365823718557,
24: 0.49736907650825657,
32: 0.49803177052530145,
64: 0.49901964176235936}
if N in correction_factor:
factor = correction_factor[N]
else:
raise ValueError("N = {0} is not supported! Please choose amongst \
{1}".format(N, sorted(list(correction_factor.keys()))))
if arr.ndim == 3:
sigma = np.zeros(1, dtype=np.float32)
arr = arr[..., None]
elif arr.ndim == 4:
sigma = np.zeros(arr.shape[-1], dtype=np.float32)
else:
raise ValueError("Array shape is not supported!", arr.shape)
if disable_background_masking:
mask = arr[..., 0].astype(np.bool)
else:
mask = np.ones_like(arr[..., 0], dtype=np.bool)
conv_out = np.zeros(arr[..., 0].shape, dtype=np.float64)
for i in range(sigma.size):
convolve(arr[..., i], k, output=conv_out)
mean_block = np.sqrt(6/7) * (arr[..., i] - 1/6 * conv_out)
sigma[i] = np.sqrt(np.mean(mean_block[mask]**2) / factor)
return sigma
|
oesteban/dipy
|
dipy/denoise/noise_estimate.py
|
Python
|
bsd-3-clause
| 11,425
|
[
"Gaussian"
] |
4efebec49a476f5de1510370a8c9850a3598035d2cbb30adb78bc9eb47a46fc4
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import os,unittest,numpy as np
from pyscf import gto, scf, tddft
from pyscf.data.nist import HARTREE2EV
from pyscf.nao import bse_iter
from pyscf.nao.m_polariz_inter_ave import polariz_freq_osc_strength
class KnowValues(unittest.TestCase):
def test_159_bse_h2b_uhf_cis(self):
""" This """
mol = gto.M(verbose=1,atom='B 0 0 0; H 0 0.489 1.074; H 0 0.489 -1.074',basis='cc-pvdz',spin=3)
gto_mf = scf.UHF(mol)
gto_mf.kernel()
gto_td = tddft.TDHF(gto_mf)
gto_td.nstates = 150
gto_td.kernel()
omegas = np.arange(0.0, 2.0, 0.01) + 1j*0.03
p_ave = -polariz_freq_osc_strength(gto_td.e, gto_td.oscillator_strength(), omegas).imag
data = np.array([omegas.real*HARTREE2EV, p_ave])
np.savetxt('test_0159_bse_h2b_uhf_cis_pyscf.txt', data.T, fmt=['%f','%f'])
#data_ref = np.loadtxt('test_0159_bse_h2b_uhf_cis_pyscf.txt-ref').T
#self.assertTrue(np.allclose(data_ref, data, atol=1e-6, rtol=1e-3))
nao_td = bse_iter(mf=gto_mf, gto=mol, verbosity=0, xc_code='CIS')
polariz = -nao_td.comp_polariz_inter_ave(omegas).imag
data = np.array([omegas.real*HARTREE2EV, polariz])
np.savetxt('test_0159_bse_h2b_uhf_cis_nao.txt', data.T, fmt=['%f','%f'])
data_ref = np.loadtxt('test_0159_bse_h2b_uhf_cis_nao.txt-ref').T
self.assertTrue(np.allclose(data_ref, data, atol=1e-6, rtol=1e-3))
if __name__ == "__main__": unittest.main()
|
gkc1000/pyscf
|
pyscf/nao/test/test_0159_bse_h2b_uhf_cis.py
|
Python
|
apache-2.0
| 2,055
|
[
"PySCF"
] |
007469a146989b4ef99cd4874887a151c6e7acb0cc1195f37dee4cafdc4d1a1f
|
"""Simple script to retrieve the coordinates for the molecules and to
save to simpler numpy array files, for easy access later.
"""
import os.path as osp
import numpy as np
from rdkit import Chem
from mastic.interfaces.rdkit import AssignBondOrdersFromTemplate
from mastic.interfaces.rdkit import RDKitMoleculeWrapper
TPPU_MOL_path = osp.join(".", "TPPU.mol")
TPPU_MOL_rdkit = Chem.MolFromMolFile(TPPU_MOL_path, sanitize=True)
TPPU_PDB_path = osp.join(".", "TPPU.pdb")
TPPU_PDB_rdkit = Chem.MolFromPDBFile(TPPU_PDB_path, removeHs=False, sanitize=False)
seh_PDB_path = osp.join(".", "sEH.pdb")
seh_rdkit = Chem.MolFromPDBFile(seh_PDB_path, removeHs=False, sanitize=False)
TPPU_rdkit = AssignBondOrdersFromTemplate(TPPU_MOL_rdkit, TPPU_PDB_rdkit)
TPPU_rdkit_wrapper = RDKitMoleculeWrapper(TPPU_rdkit, mol_name="TPPU")
seh_rdkit_wrapper = RDKitMoleculeWrapper(seh_rdkit, mol_name="sEH")
TPPU_coords = TPPU_rdkit_wrapper.get_conformer_coords(0)
seh_coords = seh_rdkit_wrapper.get_conformer_coords(0)
# write the coordinates out to a binary file
np.save("TPPU_coords.npy", TPPU_coords)
np.save("sEH_coords.npy", seh_coords)
|
salotz/mast
|
examples/sEH-TPPU/seh_tppu_crystal_coords.py
|
Python
|
mit
| 1,126
|
[
"RDKit"
] |
0a5d44bc50ae1cce58b3452370b5c0690987eacb565b7fdc62808a8931052e16
|
#
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import espressomd
import numpy
import unittest as ut
from tests_common import abspath
@ut.skipIf(not espressomd.has_features(["LENNARD_JONES"]),
"Features not available, skipping test!")
class LennardJonesTest(ut.TestCase):
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
data = numpy.loadtxt(abspath('data/lj_system.dat'))
def setUp(self):
self.system.part.clear()
self.system.box_l = [10.7437] * 3
lj_eps = 1.0
lj_sig = 1.0
lj_cut = 1.12246
self.system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig,
cutoff=lj_cut, shift="auto")
self.system.cell_system.skin = 0.4
self.system.time_step = .1
for i in range(self.data.shape[0]):
self.system.part.add(id=int(self.data[i][0]), pos=[
self.data[i][1], self.data[i][2], self.data[i][3]])
def check(self):
rms = 0.0
max_df = 0.0
for i in range(self.data.shape[0]):
f = self.system.part[i].f
for j in range(3):
df2 = (self.data[i][4 + j] - f[j])**2
rms += df2
max_df = max(max_df, (df2)**0.5)
rms = rms**0.5
self.assertTrue(rms < 1e-5)
self.assertTrue(max_df < 1e-5)
def test_dd(self):
self.system.cell_system.set_domain_decomposition(
use_verlet_lists=False)
self.system.integrator.run(recalc_forces=True, steps=0)
self.check()
def test_dd_vl(self):
self.system.cell_system.set_domain_decomposition(use_verlet_lists=True)
# Build VL and calc ia
self.system.integrator.run(recalc_forces=True, steps=0)
self.check()
# Calc is from VLs
self.system.integrator.run(recalc_forces=True, steps=0)
self.check()
if __name__ == '__main__':
print("Features: ", espressomd.features())
ut.main()
|
hmenke/espresso
|
testsuite/python/lj.py
|
Python
|
gpl-3.0
| 2,725
|
[
"ESPResSo"
] |
6405cb8fb56695794752e52c5a6e3208ffc9f902789e4114565b571d2c1eb1fc
|
#! /usr/bin/env python
# This script uses Fourier analysis to try to detect regions of the image
# with periodicity in a certain frequency range.
import cv, cv2, time
import numpy as np, matplotlib.pyplot as plot
import sys, os, cPickle as pickle
TEMPORAL_WINDOW = 120 # number of frames to consider in Fourier analysis
FREQ_COMPONENTS = [3,4,5,6,7]
TEMPORAL_WINDOW_FULL = False
# SPIKE_DETECTION = "factor"
# SPIKE_DETECTION = "offset"
# SPIKE_DETECTION = "combo"
# SPIKE_DETECTION = "max" # if the spike is maximal
SPIKE_DETECTION = "outlier" # if the spike is a statistical outlier
# standard deviations above the mean required to be considered a spike
PEAK_STDEVS = 9
# frequency bin must be above this in order to be considered a significant peak
MIN_FREQ_INTENSITY = 50
# size of overlapping bounding boxes to check for periodicity
SPATIAL_WINDOW_X = 10
SPATIAL_WINDOW_Y = 10
# overlap factor of 2 means each window overlaps by half
OVERLAP_FACTOR = 2
# scale to resize optical flow windows to, for consistent SVM inputs
TARGET_SCALE_X = 50
TARGET_SCALE_Y = 50
# color of the rectangles to draw over the image
OVERLAY_COLOR = (255, 255, 255)
# the box to monitor for freq histogram or waveform
# MONITOR_BOX = 310 # 25 meter, foot area when walking
# MONITOR_BOX = 239 # 25 meter, periodic box
# MONITOR_BOX = 235 # 25 meter, to the left of person
MONITOR_BOX = 0
MONITOR_BOX_COLOR = (255, 0, 0)
MONITOR_BOX_COLOR_POSITIVE = (0,255,0)
# if we want to see the frequency histogram in the desired box
DISPLAY_HIST = False
FFT = [] # the FFT of the monitor box
FFT_THRESHOLD = 0 # this is set per frame
# if we want to see the FFT of the FFT, for tuning purposes.
DISPLAY_SECOND_DEGREE_FOURIER = False
SECOND_DEGREE_FFT = []
# if we want to see the intensity waveform in the desired box
DISPLAY_WAVEFORM = False
WAVEFORM = [] # the waveform of the monitor box
# display a window with the optical flow?
OPTICAL_FLOW = False
OPTICAL_FLOW_IMAGE = None
# display a window with the cropped rectangle's optical flow?
CROPPED_OPTICAL_FLOW = False
CROPPED_OPTICAL_FLOW_IMAGE = None
# draw a color representation of the flow instead of a vector field?
OPTICAL_FLOW_HSV = False
# display the image fullscreen?
FULLSCREEN = False
# if we just want to scan through the images
SCAN = False
# show the mask of the background subtraction
MASK = False
# set once we load an image
WIDTH = 0
HEIGHT = 0
START_FRAME = 0
DISPLAY_IMAGE = None
# vector of windows, each of which is a time-indexed vector of avg intensities
windows = []
# these are where we'll draw our bounding boxes
motion_detected_windows = []
# we'll try and create some bounding boxes that contain all the periodic motion
PERIODIC_RECTANGLES = []
PERIODIC_RECTANGLE_COLOR = (0,0,255)
# for visualization purposes
CURRENT_PERIODIC_RECTANGLES = []
# we'll use this to label the regions we're finding, for output
PERIODIC_RECTANGLE_INDEX = 0
# export 30 frames of each candidate region to the classifier
CLASSIFIER_REGION_PERIOD = 30
# directory to save cropped regions to
OUTDIR = None
# hide all windows, for batch processing
HEADLESS = False
# don't save images, just optical flows
SKIP_IMAGES = False
# motion detection parameters
MOTION_WINDOW_SIZE = 25
MOTION_THRESHOLD = 1
# how long to require motion to stick around
MIN_HYSTERESIS_FRAMES = 3
# show edge image
EDGE_IMAGE = False
# how quickly to decay non-persistent cells
DECAY_RATE = 2
# playback framerate
FRAMERATE = 0.03
# the last frame, for optical flow or whatever
last_frame = None
last_color_frame = None
#-----------------------------------------------------------------
def initialize():
global windows, WIDTH, HEIGHT
imagefile = "frame%04d.jpg" % 1
path = "%s%s" % (directory, imagefile)
image = cv.LoadImage(path, cv.CV_LOAD_IMAGE_GRAYSCALE)
WIDTH, HEIGHT = cv.GetSize(image)
num_windows_x = WIDTH / (SPATIAL_WINDOW_X / OVERLAP_FACTOR)
num_windows_y = HEIGHT / (SPATIAL_WINDOW_Y / OVERLAP_FACTOR)
num_windows = num_windows_x * num_windows_y
for k in range(0, num_windows):
temporal_window = [0] * TEMPORAL_WINDOW
windows.append(temporal_window)
if not HEADLESS:
if DISPLAY_HIST:
plot.figure(1)
plot.show(block=False)
if DISPLAY_WAVEFORM:
plot.figure(2)
plot.show(block=False)
if DISPLAY_SECOND_DEGREE_FOURIER:
plot.figure(3)
plot.show(block=False)
cv.NamedWindow("display", cv.CV_WINDOW_NORMAL)
if FULLSCREEN:
cv2.setWindowProperty("display", cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
cv.SetMouseCallback("display", handle_mouse)
if OPTICAL_FLOW:
cv.NamedWindow("optical_flow", cv.CV_WINDOW_NORMAL)
if CROPPED_OPTICAL_FLOW:
cv.NamedWindow("cropped_optical_flow", cv.CV_WINDOW_NORMAL)
#-----------------------------------------------------------------
def detect_periodic(directory):
global windows, WIDTH, HEIGHT, WAVEFORM, FFT_THRESHOLD, last_frame, last_color_frame, motion_detected_windows, TEMPORAL_WINDOW_FULL, PERIODIC_RECTANGLES, DISPLAY_IMAGE, CURRENT_PERIODIC_RECTANGLES, OPTICAL_FLOW_IMAGE
initialize()
feature_params = dict( maxCorners = 100,
qualityLevel = 0.003,
minDistance = 7,
blockSize = 7 )
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03) )
color = np.random.randint(0,255,(100,3))
fgbg = cv2.BackgroundSubtractorMOG()
hysteresis = None
i = START_FRAME
# iterate until we can no longer read an image
while True:
frame_st = time.time()
imagefile = "frame%04d.jpg" % (i+1)
print imagefile
path = "%s%s" % (directory, imagefile)
image = cv.LoadImage(path)
gray_image = cv.LoadImage(path, cv.CV_LOAD_IMAGE_GRAYSCALE)
color_image = cv.LoadImage(path)
WIDTH, HEIGHT = cv.GetSize(image)
if i == START_FRAME:
hysteresis = np.zeros((WIDTH/MOTION_WINDOW_SIZE, HEIGHT/MOTION_WINDOW_SIZE))
hysteresis_sum = hysteresis
last_frame = image
# identify regions with sufficient optical flow
# calculate optical flow
cv2_last = np.asarray(cv.GetMat(last_frame))
cv2_next = np.asarray(cv.GetMat(image))
cv2_gray = np.asarray(cv.GetMat(gray_image))
cv2_color = np.asarray(cv.GetMat(color_image))
#start = time.time()
#flow = cv2.calcOpticalFlowFarneback(cv2_last, cv2_next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
#if (i-START_FRAME+1) % 60 == 1:
# p0 = cv2.goodFeaturesToTrack(cv2_last, mask = None, **feature_params)
# mask = np.zeros_like(np.asarray(cv.GetMat(image)))
#mid = time.time()
#p1, st, err = cv2.calcOpticalFlowPyrLK(cv2_last, cv2_next, p0, None, **lk_params)
#end = time.time()
#good_new = p1[st==1]
#good_old = p0[st==1]
# draw the tracks
#for idx,(new,old) in enumerate(zip(good_new,good_old)):
# a,b = new.ravel()
# c,d = old.ravel()
# cv2.line(mask, (a,b),(c,d), color[idx].tolist(), 2)
# cv2.circle(cv2_next,(a,b),5,color[idx].tolist(),-1)
#OPTICAL_FLOW_IMAGE = cv2.add(cv2_next,mask)
#if (i-START_FRAME+1) % 15 == 1:
# flow = cv2.calcOpticalFlowFarneback(cv2_last, cv2_next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
# st = time.time()
# significant_motion = find_significant_motion(flow)
# end = time.time()
# print "Finding significant motion took %s secs" % (end-st)
st = time.time()
fgmask = fgbg.apply(cv2_color)
end = time.time()
print "Background subtraction took %s secs" % (end-st)
st = time.time()
significant_motion = find_significant_motion_from_mask(fgmask)
end = time.time()
print "Finding areas of significant motion took %s secs" % (end-st)
colormask = cv2.cvtColor(fgmask, cv2.COLOR_GRAY2BGR)
if MASK:
OPTICAL_FLOW_IMAGE = colormask
elif EDGE_IMAGE:
coloredge = cv2.cvtColor(cv2.Canny(cv2_gray, 100, 200), cv2.COLOR_GRAY2BGR)
OPTICAL_FLOW_IMAGE = coloredge
else:
OPTICAL_FLOW_IMAGE = cv2_color
#OPTICAL_FLOW_IMAGE = draw_flow(cv2_next, flow, step=WIDTH / 25)
hysteresis = np.zeros((WIDTH/MOTION_WINDOW_SIZE, HEIGHT/MOTION_WINDOW_SIZE))
for rect in significant_motion:
coord = (rect[0][0] / MOTION_WINDOW_SIZE, rect[0][1] / MOTION_WINDOW_SIZE)
hysteresis[coord] = 1
for ri,row in enumerate(hysteresis_sum):
for ci,cell in enumerate(row):
if cell >= MIN_HYSTERESIS_FRAMES - 1:
coord = (ri,ci)
p0 = (coord[0] * MOTION_WINDOW_SIZE, coord[1] * MOTION_WINDOW_SIZE)
p1 = (p0[0] + MOTION_WINDOW_SIZE-1, p0[1] + MOTION_WINDOW_SIZE-1)
rect = (p0,p1)
cv2.rectangle(OPTICAL_FLOW_IMAGE, rect[0], rect[1], (0,0,255))
# increment persistent frame counts
hysteresis_sum += hysteresis
# decrement the non-persistent counts
decrement = hysteresis
decrement -= np.ones_like(decrement)
decrement *= (np.zeros_like(decrement) - np.ones_like(decrement))
# now decrement[x,y] == 1 iff hysteresis[x,y] == 0
for n in range(0,DECAY_RATE):
hysteresis_sum -= decrement
hysteresis_sum = np.clip(hysteresis_sum, 0, 3*MIN_HYSTERESIS_FRAMES)
# reset count for windows that weren't present this frame
#hysteresis_sum = np.multiply(hysteresis_sum, hysteresis)
display_visuals(image)
#p0 = p1
last_frame = image
i += 1
frame_end = time.time()
if (frame_end-frame_st) < FRAMERATE:
time.sleep(FRAMERATE - (frame_end-frame_st))
else:
print "Overloaded."
#-----------------------------------------------------------------
def find_significant_motion_from_mask(mask):
sig = []
size = MOTION_WINDOW_SIZE
threshold = MOTION_THRESHOLD
x = 0
y = 0
while x+size < WIDTH:
y = 0
while y+size < HEIGHT:
subimage = mask[y:y+size, x:x+size]
if np.sum(subimage)/255 >= threshold:
sig.append(((x,y),(x+size-1,y+size-1)))
y += size
x += size
return sig
#-----------------------------------------------------------------
def find_significant_motion_from_flow(flow):
sig = []
size = 2
threshold = 0.5
x = 0
y = 0
while x+size < WIDTH:
y = 0
while y+size < HEIGHT:
#total = 0.0
found = False
for xi in range(0,size):
for yi in range(0,size):
#total += np.linalg.norm(flow[y+yi, x+xi])
if np.linalg.norm(flow[y+yi, x+xi]) > threshold:
found = True
if found: #total/(size**2) > threshold:
sig.append(((x,y),(x+size,y+size)))
y += size
x += size
return sig
#-----------------------------------------------------------------
def draw_flow(img, flow, step=8):
h, w = img.shape[:2]
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1)
fx, fy = flow[y,x].T
lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.polylines(vis, lines, 0, (0, 255, 0))
for (x1, y1), (x2, y2) in lines:
cv2.circle(vis, (x1, y1), 0, (0, 255, 0), -1)
return vis
#-----------------------------------------------------------------
def draw_hsv(flow):
h, w = flow.shape[:2]
fx, fy = flow[:,:,0], flow[:,:,1]
ang = np.arctan2(fy, fx) + np.pi
v = np.sqrt(fx*fx+fy*fy)
hsv = np.zeros((h, w, 3), np.uint8)
hsv[...,0] = ang*(180/np.pi/2)
hsv[...,1] = 255
hsv[...,2] = np.minimum(v*4, 255)
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return bgr
#-----------------------------------------------------------------
def handle_mouse(event, x, y, dummy1, dummy2):
global MONITOR_BOX
MONITOR_BOX = get_subwindow_index(x, y)
#-----------------------------------------------------------------
def display_visuals(image):
DISPLAY_IMAGE = image
if not HEADLESS:
DISPLAY_IMAGE = draw_overlays(DISPLAY_IMAGE, motion_detected_windows)
cv.ShowImage("display", DISPLAY_IMAGE)
if OPTICAL_FLOW and OPTICAL_FLOW_IMAGE != None:
cv2.imshow("optical_flow", OPTICAL_FLOW_IMAGE)
if CROPPED_OPTICAL_FLOW and CROPPED_OPTICAL_FLOW_IMAGE != None:
cv2.imshow("cropped_optical_flow", CROPPED_OPTICAL_FLOW_IMAGE)
if DISPLAY_HIST:
plot.figure(1)
plot.clf()
plot.title("Frequency Spectrum")
plot.bar(range(0, len(FFT)), FFT)
plot.hlines(FFT_THRESHOLD, 0, len(FFT))
plot.draw()
if DISPLAY_WAVEFORM:
plot.figure(2)
plot.clf()
plot.title("Intensity Time-Series")
plot.axis([0, len(WAVEFORM), 0, 255])
plot.bar(range(0, len(WAVEFORM)), WAVEFORM)
plot.draw()
if DISPLAY_SECOND_DEGREE_FOURIER:
plot.figure(3)
plot.clf()
plot.title("Second Degree FFT")
plot.bar(range(0, len(SECOND_DEGREE_FFT)), SECOND_DEGREE_FFT)
plot.draw()
cv.WaitKey(1)
#-----------------------------------------------------------------
def wrap_rectangles_around_windows():
global PERIODIC_RECTANGLE_INDEX
if len(motion_detected_windows) == 0:
return []
# just create a single bounding rectangle for now
minx = sys.maxint
miny = sys.maxint
maxx = 0
maxy = 0
for window in motion_detected_windows:
if (window[0] < minx):
minx = window[0]
if (window[1] < miny):
miny = window[1]
if (window[0] + SPATIAL_WINDOW_X > maxx):
maxx = window[0] + SPATIAL_WINDOW_X
if (window[1] + SPATIAL_WINDOW_Y > maxy):
maxy = window[1] + SPATIAL_WINDOW_Y
clamped_minx = max(minx-1, 0)
clamped_miny = max(miny-1, 0)
clamped_maxx = min(maxx, WIDTH - 1)
clamped_maxy = min(maxy, HEIGHT - 1)
# make the window square, for scaling
if clamped_maxx - clamped_minx > clamped_maxy - clamped_miny:
diff = (clamped_maxx - clamped_minx) - (clamped_maxy - clamped_miny)
diffup = diff/2
diffdown = diff - diffup
clamped_miny -= diffup
clamped_maxy += diffup
if (clamped_miny < 0):
clamped_maxy -= clamped_miny
clamped_miny = 0
elif (clamped_maxy > HEIGHT - 1):
clamped_miny -= (clamped_maxy - HEIGHT - 1)
clamped_maxy = HEIGHT - 1
elif clamped_maxx - clamped_minx < clamped_maxy - clamped_miny:
diff = (clamped_maxy - clamped_miny) - (clamped_maxx - clamped_minx)
diffleft = diff/2
diffright = diff - diffleft
clamped_minx -= diffleft
clamped_maxx += diffright
if (clamped_minx < 0):
clamped_maxx -= clamped_minx
clamped_minx = 0
elif (clamped_maxx > WIDTH - 1):
clamped_minx -= (clamped_maxx - WIDTH - 1)
clamped_maxx = WIDTH - 1
# clean up remaining small differences...
while clamped_maxx - clamped_minx > clamped_maxy - clamped_miny:
if clamped_maxy < HEIGHT - 1:
clamped_maxy += 1
else:
clamped_miny -= 1
while clamped_maxx - clamped_minx < clamped_maxy - clamped_miny:
if clamped_maxx < WIDTH - 1:
clamped_maxx += 1
else:
clamped_minx -= 1
# since we've added a new rectangle, increment the label
PERIODIC_RECTANGLE_INDEX += 1
return [((clamped_minx, clamped_miny), (clamped_maxx, clamped_maxy), 0, PERIODIC_RECTANGLE_INDEX)]
#-----------------------------------------------------------------
def identify_periodic_windows(windows):
global FFT, FFT_THRESHOLD, SECOND_DEGREE_FFT
window_locations = []
# don't register windows until we've got a whole temporal window
if not TEMPORAL_WINDOW_FULL:
return window_locations
# if we just want to run through the video real quick
if SCAN:
return window_locations
for i in range(0, len(windows)):
fft = abs(np.fft.fft(windows[i]))
avg = 0
for j in range(1, len(fft) / 2):
avg += fft[j]
avg /= len(fft) / 2 - 1
fft_threshold = 0
if SPIKE_DETECTION == "factor":
fft_threshold = avg * PEAK_MAGNITUDE_FACTOR
elif SPIKE_DETECTION == "offset":
fft_threshold = avg + PEAK_MAGNITUDE_OFFSET
elif SPIKE_DETECTION == "combo":
fft_threshold = avg * PEAK_MAGNITUDE_FACTOR + PEAK_MAGNITUDE_OFFSET
elif SPIKE_DETECTION == "max":
fft_max = max(fft[2:len(fft)/2])
fft_threshold = fft_max
elif SPIKE_DETECTION == "outlier":
stdev = 0
for j in range(1, len(fft) / 2):
stdev += abs(avg - fft[j])
stdev /= len(fft) / 2 - 1
fft_threshold = avg + PEAK_STDEVS * stdev
# must be greater than some baseline to rule out noise
fft_threshold = max(fft_threshold, MIN_FREQ_INTENSITY)
if i == MONITOR_BOX:
FFT_THRESHOLD = fft_threshold
if (DISPLAY_HIST and i == MONITOR_BOX):
FFT = fft[0:len(fft)/2]
FFT[0] = 0
if (DISPLAY_SECOND_DEGREE_FOURIER and i == MONITOR_BOX):
SECOND_DEGREE_FFT = abs(np.fft.fft(fft[1:len(fft)/2]))
periodic = False
for f in FREQ_COMPONENTS:
if (fft[f] >= fft_threshold and fft[f] > fft[1]):
periodic = True
if periodic:
window_locations.append(get_subwindow_location(i))
return window_locations
#-----------------------------------------------------------------
def get_subwindow_index(x, y):
x = max(0, x-SPATIAL_WINDOW_X/2)
y = max(0, y-SPATIAL_WINDOW_Y/2)
x_index = x / (SPATIAL_WINDOW_X / OVERLAP_FACTOR)
y_index = y / (SPATIAL_WINDOW_Y / OVERLAP_FACTOR)
windows_per_row = WIDTH / (SPATIAL_WINDOW_X / OVERLAP_FACTOR)
return x_index + y_index * windows_per_row
#-----------------------------------------------------------------
def get_subwindow_location(i):
global SPATIAL_WINDOW_X, SPATIAL_WINDOW_Y, OVERLAP_FACTOR
x = (i % ( WIDTH / (SPATIAL_WINDOW_X / OVERLAP_FACTOR) ) ) * SPATIAL_WINDOW_X / OVERLAP_FACTOR
y = ( (i*SPATIAL_WINDOW_X / OVERLAP_FACTOR) / WIDTH ) * SPATIAL_WINDOW_Y / OVERLAP_FACTOR
return (x, y)
#-----------------------------------------------------------------
def avg_subwindow(image, subwindow_index):
# this function should compute the average pixel intensity
# in a given subwindow.
# for now, we'll try just a simple average,
# but Junaed's paper seems to talk about Gaussian
# weighted averages.
# if we just want to run through the video real quick
if (SCAN):
return 0
(firstX, firstY) = get_subwindow_location(subwindow_index)
total = 0
count = 1
for x in range(firstX, firstX + SPATIAL_WINDOW_X - 1):
for y in range(firstY, firstY + SPATIAL_WINDOW_Y - 1):
# make sure our windows don't go outside the image
if (x < WIDTH and y < HEIGHT):
pixval = cv.Get2D(image, y, x)
total += pixval[0]
count += 1
return total / count
#-----------------------------------------------------------------
def draw_overlays(image, motion_detected_windows):
# this function draws a rectangle in all areas of the image
# specified by motion_detected_windows
monitor_positive = False
monitor_loc = get_subwindow_location(MONITOR_BOX)
for window in motion_detected_windows:
pt1 = window
pt2 = (window[0] + SPATIAL_WINDOW_X - 1, window[1] + SPATIAL_WINDOW_Y - 1)
cv.Rectangle(image, pt1, pt2, OVERLAY_COLOR)
if (window[0] == monitor_loc[0] and window[1] == monitor_loc[1]):
monitor_positive = True
# draw the bounding rectangles for the periodic motion
for rect in CURRENT_PERIODIC_RECTANGLES:
cv.Rectangle(image, rect[0], rect[1], PERIODIC_RECTANGLE_COLOR)
# draw monitor box if we want it
if DISPLAY_HIST or DISPLAY_WAVEFORM or DISPLAY_SECOND_DEGREE_FOURIER:
pt1 = get_subwindow_location(MONITOR_BOX, WIDTH, HEIGHT)
pt2 = (pt1[0] + SPATIAL_WINDOW_X - 1, pt1[1] + SPATIAL_WINDOW_Y - 1)
if monitor_positive:
cv.Rectangle(image, pt1, pt2, MONITOR_BOX_COLOR_POSITIVE)
else:
cv.Rectangle(image, pt1, pt2, MONITOR_BOX_COLOR)
return image
#-----------------------------------------------------------------
def main():
try:
detect_periodic(directory)
except IOError:
print "Either no image was found, or iteration over images complete."
print "Restarting"
main()
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage: %s <image-directory>" % sys.argv[0]
sys.exit(1)
idx = 2
for arg in sys.argv[2:]:
if arg == "--hist":
DISPLAY_HIST = True
elif arg == "--doublefourier":
DISPLAY_SECOND_DEGREE_FOURIER = True
elif arg == "--waveform":
DISPLAY_WAVEFORM = True
elif arg == "--fs" or arg == "--fullscreen":
FULLSCREEN = True
elif arg == "--outdir":
OUTDIR = os.path.normpath(sys.argv[idx + 1]) + os.sep
if not os.path.exists(OUTDIR):
os.makedirs(OUTDIR)
elif arg == "--skip-images":
SKIP_IMAGES = True
elif arg == "--optical-flow":
OPTICAL_FLOW = True
elif arg == "--cropped-optical-flow":
CROPPED_OPTICAL_FLOW = True
elif arg == "--hsv":
OPTICAL_FLOW_HSV = True
elif arg == "--scan":
SCAN = True
DISPLAY_HIST = False
DISPLAY_WAVEFORM = False
OUTDIR = None
elif arg == "--headless":
HEADLESS = True
elif arg == "--start":
START_FRAME = int(sys.argv[idx + 1])
elif arg == "--mask":
MASK = True
elif arg == "--motion-window":
MOTION_WINDOW_SIZE = int(sys.argv[idx+1])
elif arg == "--motion-threshold":
MOTION_THRESHOLD = int(sys.argv[idx+1])
elif arg == "--framerate":
FRAMERATE = 1.0 / float(int(sys.argv[idx+1]))
elif arg == "--hysteresis":
MIN_HYSTERESIS_FRAMES = int(sys.argv[idx+1])
elif arg == "--decay-rate":
DECAY_RATE = int(sys.argv[idx+1])
elif arg == "--edge":
EDGE_IMAGE = True
idx += 1
directory = os.path.normpath(sys.argv[1]) + os.sep
main()
|
AutonomyLab/husky
|
periodic_gestures/reference-code/efficient-detect-periodic.py
|
Python
|
gpl-3.0
| 23,441
|
[
"Gaussian"
] |
510008bb646bade1516bade4299627a808063a38651688c1bc9598dda896a0c6
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import unittest
from skbio import RNA
class TestRNA(unittest.TestCase):
def test_nondegenerate_chars(self):
exp = set("ACGU")
self.assertEqual(RNA('').nondegenerate_chars, exp)
self.assertEqual(RNA.nondegenerate_chars, exp)
def test_degenerate_map(self):
exp = {
'B': set(['C', 'U', 'G']), 'D': set(['A', 'U', 'G']),
'H': set(['A', 'C', 'U']), 'K': set(['U', 'G']),
'M': set(['A', 'C']), 'N': set(['A', 'C', 'U', 'G']),
'S': set(['C', 'G']), 'R': set(['A', 'G']), 'W': set(['A', 'U']),
'V': set(['A', 'C', 'G']), 'Y': set(['C', 'U'])
}
self.assertEqual(RNA('').degenerate_map, exp)
self.assertEqual(RNA.degenerate_map, exp)
def test_complement_map(self):
exp = {
'-': '-', '.': '.', 'A': 'U', 'C': 'G', 'B': 'V', 'D': 'H',
'G': 'C', 'H': 'D', 'K': 'M', 'M': 'K', 'N': 'N', 'S': 'S',
'R': 'Y', 'U': 'A', 'W': 'W', 'V': 'B', 'Y': 'R'
}
self.assertEqual(RNA('').complement_map, exp)
self.assertEqual(RNA.complement_map, exp)
def test_motif_purine_run(self):
seq = RNA("")
self.assertEqual(list(seq.find_motifs("purine-run")), [])
seq = RNA("AARC--UCRG")
self.assertEqual(list(seq.find_motifs("purine-run")),
[slice(0, 3), slice(8, 10)])
seq = RNA("AA-RC--UCR-G")
self.assertEqual(list(seq.find_motifs("purine-run", min_length=3,
ignore=seq.gaps())),
[slice(0, 4)])
def test_motif_pyrimidine_run(self):
seq = RNA("")
self.assertEqual(list(seq.find_motifs("pyrimidine-run")), [])
seq = RNA("AARC--UCRG")
self.assertEqual(list(seq.find_motifs("pyrimidine-run")),
[slice(3, 4), slice(6, 8)])
seq = RNA("AA-RC--UCR-G")
self.assertEqual(list(seq.find_motifs("pyrimidine-run", min_length=3,
ignore=seq.gaps())),
[slice(4, 9)])
if __name__ == "__main__":
unittest.main()
|
jensreeder/scikit-bio
|
skbio/sequence/tests/test_rna.py
|
Python
|
bsd-3-clause
| 2,588
|
[
"scikit-bio"
] |
c12f463f9a96bed861109ed46e9d30e03a640ea80297f3b322f8ceb6cb45d543
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import tempfile
from string import ascii_letters, digits
from ansible.compat.six import string_types
from ansible.compat.six.moves import configparser
from ansible.parsing.quoting import unquote
from ansible.errors import AnsibleOptionsError
# copied from utils, avoid circular reference fun :)
def mk_boolean(value):
if value is None:
return False
val = str(value)
if val.lower() in [ "true", "t", "y", "1", "yes" ]:
return True
else:
return False
def shell_expand(path, expand_relative_paths=False):
'''
shell_expand is needed as os.path.expanduser does not work
when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE
'''
if path:
path = os.path.expanduser(os.path.expandvars(path))
if expand_relative_paths and not path.startswith('/'):
# paths are always 'relative' to the config?
if 'CONFIG_FILE' in globals():
CFGDIR = os.path.dirname(CONFIG_FILE)
path = os.path.join(CFGDIR, path)
path = os.path.abspath(path)
return path
def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False, isnone=False, ispath=False, ispathlist=False, istmppath=False, expand_relative_paths=False):
''' return a configuration variable with casting '''
value = _get_config(p, section, key, env_var, default)
if boolean:
value = mk_boolean(value)
if value:
if integer:
value = int(value)
elif floating:
value = float(value)
elif islist:
if isinstance(value, string_types):
value = [x.strip() for x in value.split(',')]
elif isnone:
if value == "None":
value = None
elif ispath:
value = shell_expand(value)
elif istmppath:
value = shell_expand(value)
if not os.path.exists(value):
os.makedirs(value, 0o700)
prefix = 'ansible-local-%s' % os.getpid()
value = tempfile.mkdtemp(prefix=prefix, dir=value)
elif ispathlist:
if isinstance(value, string_types):
value = [shell_expand(x, expand_relative_paths=expand_relative_paths) \
for x in value.split(os.pathsep)]
elif isinstance(value, string_types):
value = unquote(value)
return value
def _get_config(p, section, key, env_var, default):
''' helper function for get_config '''
if env_var is not None:
value = os.environ.get(env_var, None)
if value is not None:
return value
if p is not None:
try:
return p.get(section, key, raw=True)
except:
return default
return default
def load_config_file():
''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
p = configparser.ConfigParser()
path0 = os.getenv("ANSIBLE_CONFIG", None)
if path0 is not None:
path0 = os.path.expanduser(path0)
if os.path.isdir(path0):
path0 += "/ansible.cfg"
path1 = os.getcwd() + "/ansible.cfg"
path2 = os.path.expanduser("~/.ansible.cfg")
path3 = "/etc/ansible/ansible.cfg"
for path in [path0, path1, path2, path3]:
if path is not None and os.path.exists(path):
try:
p.read(path)
except configparser.Error as e:
raise AnsibleOptionsError("Error reading config file: \n{0}".format(e))
return p, path
return None, ''
p, CONFIG_FILE = load_config_file()
# check all of these extensions when looking for yaml files for things like
# group variables -- really anything we can load
YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ]
# the default whitelist for cow stencils
DEFAULT_COW_WHITELIST = ['bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'dragon', 'elephant-in-snake', 'elephant',
'eyes', 'hellokitty', 'kitty', 'luke-koala', 'meow', 'milk', 'moofasa', 'moose', 'ren', 'sheep',
'small', 'stegosaurus', 'stimpy', 'supermilker', 'three-eyes', 'turkey', 'turtle', 'tux', 'udder',
'vader-koala', 'vader', 'www',]
# sections in config file
DEFAULTS='defaults'
# FIXME: add deprecation warning when these get set
#### DEPRECATED VARS ####
# use more sanely named 'inventory'
DEPRECATED_HOST_LIST = get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts', ispath=True)
# this is not used since 0.5 but people might still have in config
DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, None)
#### GENERALLY CONFIGURABLE THINGS ####
DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True)
DEFAULT_HOST_LIST = get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', DEPRECATED_HOST_LIST, ispath=True)
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None, ispathlist=True)
DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles', ispathlist=True, expand_relative_paths=True)
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp')
DEFAULT_LOCAL_TMP = get_config(p, DEFAULTS, 'local_tmp', 'ANSIBLE_LOCAL_TEMP', '$HOME/.ansible/tmp', istmppath=True)
DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, integer=True)
DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '')
DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', os.getenv('LANG', 'en_US.UTF-8'))
DEFAULT_MODULE_SET_LOCALE = get_config(p, DEFAULTS, 'module_set_locale','ANSIBLE_MODULE_SET_LOCALE',False, boolean=True)
DEFAULT_MODULE_COMPRESSION= get_config(p, DEFAULTS, 'module_compression', None, 'ZIP_DEFLATED')
DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10, integer=True)
DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, integer=True)
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', None)
DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True)
DEFAULT_PRIVATE_KEY_FILE = get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None, ispath=True)
DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True)
DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True)
DEFAULT_VAULT_PASSWORD_FILE = get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None, ispath=True)
DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart')
DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', False, boolean=True)
DEFAULT_SFTP_BATCH_MODE = get_config(p, 'ssh_connection', 'sftp_batch_mode', 'ANSIBLE_SFTP_BATCH_MODE', True, boolean=True)
DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}')
DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True)
DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
DEFAULT_PRIVATE_ROLE_VARS = get_config(p, DEFAULTS, 'private_role_vars', 'ANSIBLE_PRIVATE_ROLE_VARS', False, boolean=True)
DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None)
DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh')
DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower()
DEFAULT_GATHER_SUBSET = get_config(p, DEFAULTS, 'gather_subset', 'ANSIBLE_GATHER_SUBSET', 'all').lower()
DEFAULT_GATHER_TIMEOUT = get_config(p, DEFAULTS, 'gather_timeout', 'ANSIBLE_GATHER_TIMEOUT', 10, integer=True)
DEFAULT_LOG_PATH = get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '', ispath=True)
DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True)
DEFAULT_INVENTORY_IGNORE = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE', ["~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo"], islist=True)
DEFAULT_VAR_COMPRESSION_LEVEL = get_config(p, DEFAULTS, 'var_compression_level', 'ANSIBLE_VAR_COMPRESSION_LEVEL', 0, integer=True)
# static includes
DEFAULT_TASK_INCLUDES_STATIC = get_config(p, DEFAULTS, 'task_includes_static', 'ANSIBLE_TASK_INCLUDES_STATIC', False, boolean=True)
DEFAULT_HANDLER_INCLUDES_STATIC = get_config(p, DEFAULTS, 'handler_includes_static', 'ANSIBLE_HANDLER_INCLUDES_STATIC', False, boolean=True)
# disclosure
DEFAULT_NO_LOG = get_config(p, DEFAULTS, 'no_log', 'ANSIBLE_NO_LOG', False, boolean=True)
DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBLE_NO_TARGET_SYSLOG', False, boolean=True)
ALLOW_WORLD_READABLE_TMPFILES = get_config(p, DEFAULTS, 'allow_world_readable_tmpfiles', None, False, boolean=True)
# selinux
DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs', islist=True)
DEFAULT_LIBVIRT_LXC_NOSECLABEL = get_config(p, 'selinux', 'libvirt_lxc_noseclabel', 'LIBVIRT_LXC_NOSECLABEL', False, boolean=True)
### PRIVILEGE ESCALATION ###
# Backwards Compat
DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, boolean=True)
DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root')
DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', None)
DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', None)
DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True)
DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True)
DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', None)
DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H -S -n')
DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True)
# Become
BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': '', 'doas': 'Permission denied', 'dzdo': ''} #FIXME: deal with i18n
BECOME_MISSING_STRINGS = {'sudo': 'sorry, a password is required to run sudo', 'su': '', 'pbrun': '', 'pfexec': '', 'runas': '', 'doas': 'Authorization required', 'dzdo': ''} #FIXME: deal with i18n
BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas','doas','dzdo']
BECOME_ALLOW_SAME_USER = get_config(p, 'privilege_escalation', 'become_allow_same_user', 'ANSIBLE_BECOME_ALLOW_SAME_USER', False, boolean=True)
DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True)
DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root')
DEFAULT_BECOME_EXE = get_config(p, 'privilege_escalation', 'become_exe', 'ANSIBLE_BECOME_EXE', None)
DEFAULT_BECOME_FLAGS = get_config(p, 'privilege_escalation', 'become_flags', 'ANSIBLE_BECOME_FLAGS', None)
DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True)
# PLUGINS
# Modules that can optimize with_items loops into a single call. Currently
# these modules must (1) take a "name" or "pkg" parameter that is a list. If
# the module takes both, bad things could happen.
# In the future we should probably generalize this even further
# (mapping of param: squash field)
DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apk, apt, dnf, package, pacman, pkgng, yum, zypper", islist=True)
# paths
DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action:/usr/share/ansible/plugins/action', ispathlist=True)
DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache', ispathlist=True)
DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback:/usr/share/ansible/plugins/callback', ispathlist=True)
DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '~/.ansible/plugins/connection:/usr/share/ansible/plugins/connection', ispathlist=True)
DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup', ispathlist=True)
DEFAULT_INVENTORY_PLUGIN_PATH = get_config(p, DEFAULTS, 'inventory_plugins', 'ANSIBLE_INVENTORY_PLUGINS', '~/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory', ispathlist=True)
DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars:/usr/share/ansible/plugins/vars', ispathlist=True)
DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter:/usr/share/ansible/plugins/filter', ispathlist=True)
DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS', '~/.ansible/plugins/test:/usr/share/ansible/plugins/test', ispathlist=True)
DEFAULT_STRATEGY_PLUGIN_PATH = get_config(p, DEFAULTS, 'strategy_plugins', 'ANSIBLE_STRATEGY_PLUGINS', '~/.ansible/plugins/strategy:/usr/share/ansible/plugins/strategy', ispathlist=True)
DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default')
# cache
CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory')
CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None)
CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts')
CACHE_PLUGIN_TIMEOUT = get_config(p, DEFAULTS, 'fact_caching_timeout', 'ANSIBLE_CACHE_PLUGIN_TIMEOUT', 24 * 60 * 60, integer=True)
# Display
ANSIBLE_FORCE_COLOR = get_config(p, DEFAULTS, 'force_color', 'ANSIBLE_FORCE_COLOR', None, boolean=True)
ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, boolean=True)
ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, boolean=True)
ANSIBLE_COW_SELECTION = get_config(p, DEFAULTS, 'cow_selection', 'ANSIBLE_COW_SELECTION', 'default')
ANSIBLE_COW_WHITELIST = get_config(p, DEFAULTS, 'cow_whitelist', 'ANSIBLE_COW_WHITELIST', DEFAULT_COW_WHITELIST, islist=True)
DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, boolean=True)
DEFAULT_UNDEFINED_VAR_BEHAVIOR = get_config(p, DEFAULTS, 'error_on_undefined_vars', 'ANSIBLE_ERROR_ON_UNDEFINED_VARS', True, boolean=True)
HOST_KEY_CHECKING = get_config(p, DEFAULTS, 'host_key_checking', 'ANSIBLE_HOST_KEY_CHECKING', True, boolean=True)
SYSTEM_WARNINGS = get_config(p, DEFAULTS, 'system_warnings', 'ANSIBLE_SYSTEM_WARNINGS', True, boolean=True)
DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings', 'ANSIBLE_DEPRECATION_WARNINGS', True, boolean=True)
DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True)
COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', True, boolean=True)
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True)
DEFAULT_CALLBACK_WHITELIST = get_config(p, DEFAULTS, 'callback_whitelist', 'ANSIBLE_CALLBACK_WHITELIST', [], islist=True)
RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', None, ispath=True)
DEFAULT_NULL_REPRESENTATION = get_config(p, DEFAULTS, 'null_representation', 'ANSIBLE_NULL_REPRESENTATION', None, isnone=True)
DISPLAY_ARGS_TO_STDOUT = get_config(p, DEFAULTS, 'display_args_to_stdout', 'ANSIBLE_DISPLAY_ARGS_TO_STDOUT', False, boolean=True)
MAX_FILE_SIZE_FOR_DIFF = get_config(p, DEFAULTS, 'max_diff_size', 'ANSIBLE_MAX_DIFF_SIZE', 1024*1024, integer=True)
# CONNECTION RELATED
ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', '-C -o ControlMaster=auto -o ControlPersist=60s')
ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r")
ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True)
ANSIBLE_SSH_RETRIES = get_config(p, 'ssh_connection', 'retries', 'ANSIBLE_SSH_RETRIES', 0, integer=True)
PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True)
PARAMIKO_PROXY_COMMAND = get_config(p, 'paramiko_connection', 'proxy_command', 'ANSIBLE_PARAMIKO_PROXY_COMMAND', None)
# obsolete -- will be formally removed
ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099, integer=True)
ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True)
ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, integer=True)
ACCELERATE_CONNECT_TIMEOUT = get_config(p, 'accelerate', 'accelerate_connect_timeout', 'ACCELERATE_CONNECT_TIMEOUT', 1.0, floating=True)
ACCELERATE_DAEMON_TIMEOUT = get_config(p, 'accelerate', 'accelerate_daemon_timeout', 'ACCELERATE_DAEMON_TIMEOUT', 30, integer=True)
ACCELERATE_KEYS_DIR = get_config(p, 'accelerate', 'accelerate_keys_dir', 'ACCELERATE_KEYS_DIR', '~/.fireball.keys')
ACCELERATE_KEYS_DIR_PERMS = get_config(p, 'accelerate', 'accelerate_keys_dir_perms', 'ACCELERATE_KEYS_DIR_PERMS', '700')
ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_file_perms', 'ACCELERATE_KEYS_FILE_PERMS', '600')
ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True)
PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True)
# galaxy related
GALAXY_SERVER = get_config(p, 'galaxy', 'server', 'ANSIBLE_GALAXY_SERVER', 'https://galaxy.ansible.com')
GALAXY_IGNORE_CERTS = get_config(p, 'galaxy', 'ignore_certs', 'ANSIBLE_GALAXY_IGNORE', False, boolean=True)
# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated
GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', islist=True)
# characters included in auto-generated passwords
DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_"
STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS', ['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], islist=True )
# colors
COLOR_HIGHLIGHT = get_config(p, 'colors', 'highlight', 'ANSIBLE_COLOR_HIGHLIGHT', 'white')
COLOR_VERBOSE = get_config(p, 'colors', 'verbose', 'ANSIBLE_COLOR_VERBOSE', 'blue')
COLOR_WARN = get_config(p, 'colors', 'warn', 'ANSIBLE_COLOR_WARN', 'bright purple')
COLOR_ERROR = get_config(p, 'colors', 'error', 'ANSIBLE_COLOR_ERROR', 'red')
COLOR_DEBUG = get_config(p, 'colors', 'debug', 'ANSIBLE_COLOR_DEBUG', 'dark gray')
COLOR_DEPRECATE = get_config(p, 'colors', 'deprecate', 'ANSIBLE_COLOR_DEPRECATE', 'purple')
COLOR_SKIP = get_config(p, 'colors', 'skip', 'ANSIBLE_COLOR_SKIP', 'cyan')
COLOR_UNREACHABLE = get_config(p, 'colors', 'unreachable', 'ANSIBLE_COLOR_UNREACHABLE', 'bright red')
COLOR_OK = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_OK', 'green')
COLOR_CHANGED = get_config(p, 'colors', 'changed', 'ANSIBLE_COLOR_CHANGED', 'yellow')
COLOR_DIFF_ADD = get_config(p, 'colors', 'diff_add', 'ANSIBLE_COLOR_DIFF_ADD', 'green')
COLOR_DIFF_REMOVE = get_config(p, 'colors', 'diff_remove', 'ANSIBLE_COLOR_DIFF_REMOVE', 'red')
COLOR_DIFF_LINES = get_config(p, 'colors', 'diff_lines', 'ANSIBLE_COLOR_DIFF_LINES', 'cyan')
# diff
DIFF_CONTEXT = get_config(p, 'diff', 'context', 'ANSIBLE_DIFF_CONTEXT', 3, integer=True)
# non-configurable things
MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script']
MODULE_NO_JSON = ['command', 'shell', 'raw']
DEFAULT_BECOME_PASS = None
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
DEFAULT_SUBSET = None
DEFAULT_SU_PASS = None
VAULT_VERSION_MIN = 1.0
VAULT_VERSION_MAX = 1.0
TREE_DIR = None
LOCALHOST = frozenset(['127.0.0.1', 'localhost', '::1'])
# module search
BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm', '.md', '.txt')
IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES", "test-docs.sh"]
|
Censio/ansible-dev
|
lib/ansible/constants.py
|
Python
|
gpl-3.0
| 23,692
|
[
"Galaxy",
"MOOSE"
] |
b83bfa30b5011bd9f3dded61b19ba56e3862e12d4edd723fe29742402465d3f0
|
#/usr/bin/env py.test
from __future__ import print_function
import os
import sys
import sysconfig
import numpy as np
from numpy.fft import fft2, ifft2
from numpy.testing import assert_allclose
from scipy.optimize import check_grad, approx_fprime
import cubefit
from cubefit.fitting import (sky_and_sn,
chisq_galaxy_single,
chisq_galaxy_sky_multi,
chisq_position_sky_sn_multi)
# -----------------------------------------------------------------------------
# Helper functions
def assert_real(x):
if np.all((x.imag == 0.) & (x.real == 0.)):
return
absfrac = np.abs(x.imag / x.real)
mask = absfrac < 1.e-3 #1.e-4
if not np.all(mask):
raise RuntimeError("array not real: max imag/real = {:g}"
.format(np.max(absfrac)))
def fftconvolve(x, kernel):
"""convolve 2-d array x with a kernel *centered* in array."""
ny, nx = kernel.shape
xctr, yctr = (nx-1)/2., (ny-1)/2.
# Phasor that will shift kernel to be centered at (0., 0.)
fshift = cubefit.fft_shift_phasor_2d(kernel.shape, (-xctr, -yctr))
return ifft2(fft2(kernel) * fft2(x) * fshift).real
def plot_gradient(im, fname, **kwargs):
"""Helper function for debugging only."""
import matplotlib.pyplot as plt
plt.imshow(im, cmap="bone", interpolation="nearest", origin="lower",
**kwargs)
plt.colorbar()
plt.savefig(fname)
plt.clf()
# -----------------------------------------------------------------------------
def test_sky_and_sn():
truesky = 3. * np.ones((10,))
truesn = 2. * np.ones((10,))
g = np.ones((10, 5, 5)) # fake galaxy, after convolution with PSF
s = np.zeros((10, 5, 5))
s[:, 3, 3] = 1. # psf is a single pixel
data = g + truesky[:, None, None] + truesn[:, None, None] * s
weight = np.ones_like(data)
sky, sn = cubefit.fitting.sky_and_sn(data, weight, g, s)
assert_allclose(sky, truesky)
assert_allclose(sn, truesn)
class TestFitting:
def setup_class(self):
"""Create some dummy data and a PSF."""
# some settings
MODEL_SHAPE = (32, 32)
nt = 3
nw = 3
ny = 15
nx = 15
yoffs = np.array([7,8,9]) # offset between model and data
xoffs = np.array([8,9,7])
# True data yctr, xctr given offset
self.trueyctrs = yoffs + (ny-1)/2. - (MODEL_SHAPE[0]-1)/2.
self.truexctrs = xoffs + (nx-1)/2. - (MODEL_SHAPE[1]-1)/2.
# Create a "true" underlying galaxy. This can be anything, but it
# should not be all zeros or flat.
ellip = 4.5 * np.ones(nw)
alpha = 6.0 * np.ones(nw)
sigma = 6.0 * np.ones(nw)
beta = 2. * np.ones(nw)
eta = 1.04 * np.ones(nw)
truegal = cubefit.psffuncs.gaussian_moffat_psf(
sigma, alpha, beta, ellip, eta,
np.zeros(nw) - 2., np.zeros(nw) - 2., MODEL_SHAPE)
# Create a PSF.
ellip = 1.5 * np.ones(nw)
alpha = 2.0 * np.ones(nw)
sigma = 2.0 * np.ones(nw)
beta = 2. * np.ones(nw)
eta = 1.04 * np.ones(nw)
yctr = np.zeros(nw)
xctr = np.zeros(nw)
A = cubefit.psffuncs.gaussian_moffat_psf(sigma, alpha, beta, ellip,
eta, yctr, xctr, MODEL_SHAPE)
self.psf = cubefit.TabularPSF(A)
# create the data by convolving the true galaxy model with the psf
# and taking a slice.
cubes = []
for j in range(nt):
data = np.empty((nw, ny, nx), dtype=np.float32)
for i in range(nw):
data_2d = fftconvolve(truegal[i], A[i])
data[i, :, :] = data_2d[yoffs[j]:yoffs[j]+ny,
xoffs[j]:xoffs[j]+nx]
cubes.append(cubefit.DataCube(data, np.ones_like(data),
np.ones(nw)))
self.cubes = cubes
# initialize galaxy model
self.galaxy = np.zeros((nw, MODEL_SHAPE[0], MODEL_SHAPE[1]))
self.truegal = truegal
def test_chisq_galaxy_single_gradient(self):
"""Test that gradient function (used in galaxy fitting) returns value
close to what you get with a finite differences method.
"""
EPS = 1.e-7
data = self.cubes[0].data
weight = self.cubes[0].weight
psf = self.psf
ctr = (0., 0.)
# analytic gradient is `grad`
val, grad = chisq_galaxy_single(self.galaxy, data, weight, ctr, psf)
# save data - model residuals for finite differences chi^2 gradient.
# need to carry out subtraction in float64 to avoid round-off errors.
scene = psf.evaluate_galaxy(self.galaxy, data.shape[1:3], ctr)
r0 = data.astype(np.float64) - scene
# finite differences gradient: alter each element by EPS one
# at a time and recalculate chisq.
fdgrad = np.zeros_like(self.galaxy)
nk, nj, ni = self.galaxy.shape
for k in range(nk):
for j in range(nj):
for i in range(ni):
self.galaxy[k, j, i] += EPS
scene = psf.evaluate_galaxy(self.galaxy, data.shape[1:3],
ctr)
self.galaxy[k, j, i] -= EPS # reset model value.
# NOTE: rather than calculating
# chisq1 - chisq0 = sum(w * r1^2) - sum(w * r0^2)
# we calculate
# sum(w * (r1^2 - r0^2))
# which is the same quantity but avoids summing large
# numbers.
r1 = data.astype(np.float64) - scene
chisq_diff = np.sum(weight * (r1**2 - r0**2))
fdgrad[k, j, i] = chisq_diff / EPS
assert_allclose(grad, fdgrad, rtol=0.001, atol=0.)
def test_chisq_galaxy_sky_multi_gradient(self):
"""Test that gradient function (used in galaxy fitting) returns value
close to what you get with a finite differences method.
"""
EPS = 1.e-8
datas = [self.cubes[0].data]
weights = [self.cubes[0].weight]
psfs = [self.psf]
ctrs = [(0., 0.)]
# analytic gradient is `grad`
_, grad = chisq_galaxy_sky_multi(self.galaxy, datas, weights, ctrs,
psfs)
# NOTE: Following is specific to only having one cube!
data = datas[0]
weight = weights[0]
psf = psfs[0]
ctr = ctrs[0]
# save data - model residuals for finite differences chi^2 gradient.
# need to carry out subtraction in float64 to avoid round-off errors.
scene = psf.evaluate_galaxy(self.galaxy, data.shape[1:3], ctr)
r0 = data.astype(np.float64) - scene
sky = np.average(r0, weights=weight, axis=(1, 2))
r0 -= sky[:, None, None]
# finite differences gradient: alter each element by EPS one
# at a time and recalculate chisq.
fdgrad = np.zeros_like(self.galaxy)
nk, nj, ni = self.galaxy.shape
for k in range(nk):
for j in range(nj):
for i in range(ni):
self.galaxy[k, j, i] += EPS
scene = psf.evaluate_galaxy(self.galaxy, data.shape[1:3],
ctr)
self.galaxy[k, j, i] -= EPS # reset model value.
# NOTE: rather than calculating
# chisq1 - chisq0 = sum(w * r1^2) - sum(w * r0^2)
# we calculate
# sum(w * (r1^2 - r0^2))
# which is the same quantity but avoids summing large
# numbers.
r1 = data.astype(np.float64) - scene
sky = np.average(r1, weights=weight, axis=(1, 2))
r1 -= sky[:, None, None]
chisq_diff = np.sum(weight * (r1**2 - r0**2))
fdgrad[k, j, i] = chisq_diff / EPS
assert_allclose(grad, fdgrad, rtol=0.005, atol=0.)
def pixel_regpenalty_diff(self, regpenalty, galmodel, k, j, i, eps):
"""What is the difference in the regpenalty caused by changing
galmodel[k, j, i] by EPS?"""
def galnorm(k, j, i, eps=0.0):
return ((galmodel[k, j, i] + eps - regpenalty.galprior[k, j, i]) /
regpenalty.mean_gal_spec[k])
dchisq = 0.
if k > 0:
d0 = galnorm(k, j, i) - galnorm(k-1, j, i)
d1 = galnorm(k, j, i, eps) - galnorm(k-1, j, i)
dchisq += regpenalty.mu_wave * (d1**2 - d0**2)
if k < galmodel.shape[0] - 1:
d0 = galnorm(k+1, j, i) - galnorm(k, j, i)
d1 = galnorm(k+1, j, i) - galnorm(k, j, i + eps)
dchisq += regpenalty.mu_wave * (d1**2 - d0**2)
if j > 0:
d0 = galnorm(k, j, i) - galnorm(k, j-1, i)
d1 = galnorm(k, j, i, eps) - galnorm(k, j-1, i)
dchisq += regpenalty.mu_xy * (d1**2 - d0**2)
if j < galmodel.shape[1] - 1:
d0 = galnorm(k, j+1, i) - galnorm(k, j, i)
d1 = galnorm(k, j+1, i) - galnorm(k, j, i, eps)
dchisq += regpenalty.mu_xy * (d1**2 - d0**2)
if i > 0:
d0 = galnorm(k, j, i) - galnorm(k, j, i-1)
d1 = galnorm(k, j, i, eps) - galnorm(k, j, i-1)
dchisq += regpenalty.mu_xy * (d1**2 - d0**2)
if i < galmodel.shape[2] - 1:
d0 = galnorm(k, j, i+1) - galnorm(k, j, i)
d1 = galnorm(k, j, i+1) - galnorm(k, j, i, eps)
dchisq += regpenalty.mu_xy * (d1**2 - d0**2)
return dchisq
def test_regularization_penalty_gradient(self):
"""Ensure that regularization penalty gradient matches what you
get with a finite-differences approach."""
EPS = 1.e-10
mu_wave = 0.07
mu_xy = 0.001
# set galaxy model to best-fit (so that it is not all zeros!)
self.galaxy[:, :, :] = self.truegal
mean_gal_spec = np.average(self.cubes[0].data, axis=(1, 2))
galprior = np.zeros_like(self.galaxy)
regpenalty = cubefit.RegularizationPenalty(galprior, mean_gal_spec,
mu_xy, mu_wave)
_, grad = regpenalty(self.galaxy)
fdgrad = np.zeros_like(self.galaxy)
nk, nj, ni = self.galaxy.shape
for k in range(nk):
for j in range(nj):
for i in range(ni):
fdgrad[k, j, i] = self.pixel_regpenalty_diff(
regpenalty, self.galaxy, k, j, i, EPS) / EPS
rtol = 0.001
atol = 1.e-5 * np.max(np.abs(fdgrad))
assert_allclose(grad, fdgrad, rtol=rtol, atol=atol)
def test_point_source(self):
"""Test that evaluate_point_source returns the expected point source.
"""
psf = self.psf.point_source((0., 0.), (15, 15), (0., 0.))
def test_fit_position_grad(self):
"""Test the gradient of the sn and sky position fitting function
"""
def func_part(ctrs, galaxy, datas, weights, psfs):
chisq, grad = chisq_position_sky_sn_multi(ctrs, galaxy,
datas, weights, psfs)
return chisq
def grad_part(ctrs, galaxy, datas, weights, psfs):
chisq, grad = chisq_position_sky_sn_multi(ctrs, galaxy,
datas, weights, psfs)
return grad
x0s = np.zeros(8)
datas = [cube.data for cube in self.cubes]
weights = [cube.weight for cube in self.cubes]
psfs = [self.psf for cube in self.cubes]
code_grad = grad_part(x0s, self.galaxy, datas, weights, psfs)
test_grad = approx_fprime(x0s, func_part, np.sqrt(np.finfo(float).eps),
self.galaxy, datas, weights, psfs)
assert_allclose(code_grad[:-2], test_grad[:-2], rtol=0.005)
|
snfactory/cubefit
|
cubefit/tests/test_fitting.py
|
Python
|
mit
| 12,205
|
[
"Galaxy"
] |
f249be47a5adcf2a20058ff219633a03c8415ee3d315a0ac0a0b74a87eccc93e
|
"""
Single Bubble Model
===================
Simulate the trajectory of a particle rising through the water column
This module defines the classes, methods, and functions necessary to simulate
the rise of a single particle (bubble, droplet or solid particle) through the
water column. The ambient water properties are provided through the
`ambient.Profile` class object, which contains a netCDF4-classic dataset of
CTD data and the needed interpolation methods. The `dbm` class objects
`dbm.FluidParticle` and `dbm.InsolubleParticle` report the properties and
behavior of the particle during the simulation. An interface to the `dbm`
objects is provided by the Particle class objects defined in
`dispersed_phases`.
Notes
-----
This model solves for the trajectory `vec(x)` by the simple transport
equation::
d vec(x) / dt = vec(u)
where `vec(u)` is the vector velocity of the particle, which may include the
rise velocity and an ambient current. The rise velocity depends on the
particle size, which changes with pressure (if compressible) and as a result
of mass transfer (when soluble). Hence, this equation is usually coupled to a
system of equations for the change in mass of each chemical component in the
particle `m_i`, given by::
d (m_i) / dt = - beta * A * (Cs - C)
where `Cs` is the local solubility of component `i` and `C` is the local
concentration of component `i` in the surrounding water; `beta` is the mass
transfer coefficient and `A` is the surface area. Methods to compute
`beta`, `Cs`, and `A` are provided in the `dbm` module. Since source fluids
may have different temperature than the ambient, heat transfer is also
modeled::
d H / dt = - rho_p * cp * A * beta_T * (T - Ta)
where `H` is the heat content, given by `m_p * cp * T`; `beta_T` is the heat
transfer coefficient and `m_p` is the total mass of the particle. Since some
mass is lost due to dissolution, the particle temperature must be adjusted
by::
d H / dt = cp * d (m_p) / dt * T # Note d (m_p) / dt < 0
and for the heat of solution, using::
d H / dt = sum (d (m_i) /dt * dH_solR_i * Ru / M_i)
where `dH_solR` is the enthalpy of solution divided by the universal gas
constant (`Ru`) and `M_i` is the molecular weight of constituent `i`.
When the particle becomes very small, the heat transfer and dissolution
become unstable, leading to rapid oscillations in the predicted particle
temperature. To avoid this problem, this module accounts for heat transfer
until the particle temperature reaches equilibrium with the seawater (which
happens very quickly). Thereafter, the particle is assumed to be equal to
the temperature of the ambient water.
The equations for heat and mass transfer and for slip velocity are
discontinuous at the boundaries between particle shapes (e.g., ellipsoid and
spherical cap, etc.), and this can sometimes lead to the solution getting
stuck at the shape transition. The convergence criteria for the ODE solver are
set at an optimal compromise for accuracy and for allowing a diverse range of
particles to be simulated. Nonetheless, there are situations where these
discontinuities may still break the solution.
Finally, if the diameter of a fluid particle is observed to rapidly increase,
this is usually associated with a phase change from liquid to gas. The
diagnostic plots help to identify these effects by plotting the state space
together with several descriptive variables, including diameter, density,
and shape. However, there is no mechanism in this module to allow a droplet
to break up into multiple bubbles.
"""
# S. Socolofsky, November 2014, Texas A&M University <socolofs@tamu.edu>.
from __future__ import (absolute_import, division, print_function)
from tamoc import model_share
from tamoc import seawater
from tamoc import ambient
from tamoc import dbm
from tamoc import dispersed_phases
from netCDF4 import Dataset
from datetime import datetime
import numpy as np
# mpl imports moved to plotting functions
# import matplotlib.pyplot as plt
# import matplotlib as mpl
from copy import copy
from scipy import integrate
from warnings import warn
import os
class Model(object):
"""
Master class object for controlling and post-processing the simulation
This is the main program interface, and the only object or function in
this module that the user should call. At instantiation, the model
parameters and the ambient water column data are organized. For a given
simulation, the user passes a `dbm` particle object and its initial
conditions (e.g., mass, temperature, location) to the `simulate` method,
and the object computes the trajectory and plots the resulting path and
particle properties. The simulation results can be stored to and loaded
from a netCDF file using the `save_sim` and `load_sim` methods. An
ascii table of data for the state space for reading into other programs
(e.g., Matlab) can be output using the `save_txt` method. The object
can only store simulation results in its attribute variables for one
simulation at a time. Each time a new simulation is run or a past
simulation results file is loaded, the current simulation (if present) is
overwritten.
Parameters
----------
profile : `ambient.Profile` object, default = None
An object containing the ambient CTD data and associated methods.
The netCDF dataset stored in the `ambient.Profile` object may be open
or closed at instantiation. If open, the initializer will close the
file since this model does not support changing the ambient data once
initialized.
simfile: str, default = None
File name of a netCDF file containing the results of a previous
simulation run.
Attributes
----------
profile : `ambient.Profile` object
Ambient CTD data for the model simulation
p : `ModelParams` object
Set of model parameters not adjustable by the user
sim_stored : bool
Flag indicating whether or not simulation results exist in the object
namespace
particle : `dispersed_phases.SingleParticle` object
Interface to the `dbm` module and container for particle-specific
parameters
t : ndarray
Times (s) associated with the state space
y : ndarray
State space along the trajectory of the particle
z0 : float
The release depth (m)
x0 : float, default = 0.
The release x-coordinate (m)
y0 : float, default = 0.
The release y-coordinate (m)
de : float
Initial diameter of the particle (m)
yk : ndarray
Initial mole fractions of each chemical component (--)
T0 : float, optional
Initial temperature (K) of the particle at release
K : float, default = 1.
Mass transfer reduction factor (--)
K_T : float, default = 1.
Heat transfer reduction factor (--)
fdis : float, default = 1.e-6
Remainder fraction that turns off dissolution for each component (--)
delta_t : float, default = 0.1 s
Maximum time step to use (s) in the simulation output
See Also
--------
simulate, save_sim, load_sim
Notes
-----
The `Model` object will be initialized either with the `profile` data
making it ready to start a new simulation or with the results of a
previous simulation stored in `simfile`.
"""
def __init__(self, profile=None, simfile=None):
super(Model, self).__init__()
if profile is None:
# Create a Model object from a saved file
self.load_sim(simfile)
else:
# Create a new Model object
self.profile = profile
profile.close_nc()
# Enter the model parameters that the user cannot adjust
self.p = ModelParams(self.profile)
self.sim_stored = False
def simulate(self, particle, X0, de, yk, T0=None, K=1., K_T=1.,
fdis=1.e-6, t_hyd=0., lag_time=True, delta_t=0.1):
"""
Simulate the trajectory of a particle from given initial conditions
Simulate the trajectory of a particle (bubble, droplet or solid
particle) until the particle dissolves or until it reaches the free
surface.
Parameters
----------
particle : `dbm.FluidParticle` or `dbm.InsolubleParticle` object
Object describing the properties and behavior of the particle.
X0 : float or ndarray
The release localtion (x0, y0, z0) depth (m) of the particle in
the simulation. If float, x0 = y0 = 0 is assumed.
de : float
Initial diameter of the particle (m)
yk : ndarray
Initial mole fractions of each component in the particle (--)
T0 : float, optional
Initial temperature (K) of the particle at release if not equal
to the temperature of the surrounding fluid. If omitted, the
model will set T0 to the ambient temperature.
K : float, default = 1.
Mass transfer reduction factor (--). Pre-multiplies the mass
transfer coefficients providing amplification (>1) or retardation
(<1) of the dissolution.
K_T : float, default = 1.
Heat transfer reduction factor (--). Pre-multiplies the heat
transfer coefficient providing amplification (>1) or retardation
(<1) of the heat flux.
fdis : float, default = 1.e-6
Fraction of the initial total mass remaining (--) for each
component in the particle when the particle should be considered
dissolved.
t_hyd : float, default = 0.
Hydrate film formation time (s). Mass transfer is computed by clean
bubble methods for t less than t_hyd and by dirty bubble methods
thereafter. The default behavior is to assume the particle is dirty
or hydrate covered from the release.
lag_time : bool, default = True
flag indicating whether the biodegradation rates should include
a lag time (True) or not (False). Default value is True.
delta_t : float, default = 0.1 s
Maximum time step to use (s) in the simulation. The ODE solver
in `calculate_path` is set up with adaptive step size integration,
so in theory this value determines the largest step size in the
output data, but not the numerical stability of the calculation.
See Also
--------
post_process, calculate_path, plot_state_space
Notes
-----
This method fills the object attributes `particle`, `t` and `y`
following successful simulation of the particle trajectory. It also
stores all the input variables as object attributes that do not
change during simulation.
"""
# Check the initial position and make it an array.
if not isinstance(X0, np.ndarray):
if not isinstance(X0, list):
X0 = np.array([0., 0., X0])
else:
X0 = np.array(X0)
# Make sure yk is an ndarray
if not isinstance(yk, np.ndarray):
if not isinstance(yk, list):
yk = np.array([yk])
else:
yk = np.array(yk)
# Check if the right number of elements are in yk
if len(yk) != len(particle.composition):
print('Wrong number of mole fractions:')
print(' yk : %d entries' % len(yk))
print(' composition : %d components\n' %
len(particle.composition))
return
# Save the input variables that are not part of the self.particle
# object
self.K_T0 = K_T
self.delta_t = delta_t
# Get the initial conditions for the simulation run
(self.particle, y0) = sbm_ic(self.profile, particle, X0, de, yk, T0,
K, K_T, fdis, t_hyd, lag_time)
# Open the simulation module
print('\n-- TEXAS A&M OIL-SPILL CALCULATOR (TAMOC) --')
print('-- Single Bubble Model --\n')
# Calculate the trajectory
print('Calculate the trajectory...')
self.t, self.y = calculate_path(self.profile, self.particle, self.p,
y0, delta_t)
print('Simulation complete.\n ')
self.sim_stored = True
# Restart heat transfer
self.particle.K_T = self.K_T0
def save_sim(self, fname, profile_path, profile_info):
"""
Save the current simulation results
Save the current simulation results and the model parameters so that
all information needed to rebuild the class object is stored in a
file. The output data are stored in netCDF4-classic format.
Parameters
----------
fname : str
File name of the file to write
profile_path : str
String stating the file path relative to the directory where
the output will be saved to the ambient profile data.
profile_info : str
Text describing the ambient profile data.
Notes
-----
It does not make sense to store the ambient data together with every
simulation output file. On the other hand, the simulation results
may be meaningless without the context of the ambient data. The
parameter `profile_path` provides a means to automatically load the
ambient data assuming the profile data are kept in the same place
relative to the output file. Since this cannot be guaranteed, the
`profile_info` variable provides additional descriptive information
so that the ambient data can be identified if they have been moved.
"""
if self.sim_stored is False:
print('No simulation results to store...')
print('Saved nothing to netCDF file.\n')
return
# Create the netCDF dataset object
title = 'Simulation results for the TAMOC Single Bubble Model'
nc = model_share.tamoc_nc_file(fname, title, profile_path, profile_info)
# Create variables for the dimensions
z = nc.createDimension('z', None)
p = nc.createDimension('profile', 1)
ns = nc.createDimension('ns', len(self.y[0,:]))
# Create variables for the model initial conditions
K_T0 = nc.createVariable('K_T0', 'f8', ('profile',))
K_T0.long_name = 'Initial heat transfer reduction factor'
K_T0.standard_name = 'K_T0'
K_T0.units = 'nondimensional'
delta_t = nc.createVariable('delta_t', 'f8', ('profile',))
delta_t.long_name = 'maximum simulation output time step'
delta_t.standard_name = 'delta_t'
delta_t.units = 'seconds'
# Create variables for the independent variable
t = nc.createVariable('t', 'f8', ('z',))
t.long_name = 'time coordinate'
t.standard_name = 'time'
t.units = 'seconds since release'
t.axis = 'T'
# Create variables for the state space
y = nc.createVariable('y', 'f8', ('z', 'ns',))
y.long_name = 'solution state space'
y.standard_name = 'y'
y.units = 'variable'
y.coordinate = 't'
# Store the initial conditions and model setup
K_T0[0] = self.K_T0
delta_t[0] = self.delta_t
# Store the dbm particle object
dispersed_phases.save_particle_to_nc_file(nc,
self.particle.composition, self.particle, self.K_T0)
# Save the model simulation result
t[:] = self.t
for i in range(len(nc.dimensions['ns'])):
y[0:len(self.t),i] = self.y[:,i]
# Close the netCDF dataset
nc.close()
def save_txt(self, base_name, profile_path, profile_info):
"""
Save the state space in ascii text format for exporting
Save the state space (dependent and independent variables) in an
ascii text file for exporting to other programs (e.g., Matlab).
Parameters
----------
base_name : str
The main name of the output file. This method writes two files:
the data are stored in base_name.txt, and the header information
describing each row of data are saved in base_name_header.txt.
profile_path : str
String stating the file path relative to the directory where
the output will be saved to the ambient profile data.
profile_info : str
Text describing the ambient profile data (less than 60
characters).
Notes
-----
The output data will be organized in columns, with each column
as follows:
0 : Time (s)
1 : Depth (m)
2 : (n-1) : Masses of the particle components (kg)
n : Heat (m_p * cp * T) (J)
A header will be written at the top of the file with the specific
details for that file.
The file is written using the `numpy.savetxt` method.
"""
if self.sim_stored is False:
print('No simulation results to store...')
print('Saved nothing to txt file.\n')
return
# Create the header string that contains the column descriptions
p_list = ['Single Bubble Model ASCII Output File \n']
p_list.append('Created: ' + datetime.today().isoformat(' ') + '\n\n')
p_list.append('Simulation based on CTD data in:\n')
p_list.append(profile_path)
p_list.append('\n\n')
p_list.append(profile_info)
p_list.append('\n\n')
p_list.append('Column Descriptions:\n')
p_list.append(' 0: Time in s\n')
p_list.append(' 1: x-coordinate in m\n')
p_list.append(' 2: y-coordinate in m\n')
p_list.append(' 3: Depth in m\n')
for i in range(len(self.particle.composition)):
p_list.append(' %d: Mass of %s in particle in kg\n' % \
(i+4, self.particle.composition[i]))
p_list.append(' %d: Heat content (m_p * cp * T) in J\n' % (i+5))
header = ''.join(p_list)
# Assemble and write the output data
data = np.hstack((np.atleast_2d(self.t).transpose(), self.y))
np.savetxt(base_name + '.txt', data)
with open(base_name + '_header.txt', 'w') as txt_file:
txt_file.write(header)
def load_sim(self, fname):
"""
Load in a saved simulation result file for post-processing
Load in a saved simulation result file and rebuild the `Model`
object attributes. The input files are in netCDF4-classic data
format.
Parameters
----------
fname : str
File name of the file to read
Notes
-----
This method will attempt to load the ambient profile data from the
`profile_path` attribute of the `fname` netCDF file. If the load
fails, a warning will be reported to the terminal, but the other
steps of loading the `Model` object attributes will be performed.
"""
# Open the netCDF dataset object containing the simulation results
nc = Dataset(fname)
# Try to open the profile data
self.profile = model_share.profile_from_model_savefile(nc, fname)
if self.profile is not None:
self.p = ModelParams(self.profile)
else:
self.p = None
# Load in the dispersed_phases.Particle object
self.particle = \
dispersed_phases.load_particle_from_nc_file(nc)[0][0]
# Extract the state space data
self.t = nc.variables['t'][:]
ns = len(nc.dimensions['ns'])
self.y = np.zeros((len(self.t), ns))
for i in range(ns):
self.y[:,i] = nc.variables['y'][0:len(self.t), i]
# Extract the initial conditions
self.K_T0 = nc.variables['K_T0'][0]
self.delta_t = nc.variables['delta_t'][0]
# Close the netCDF dataset
nc.close()
self.sim_stored = True
def post_process(self, fig=1):
"""
Plot the simulation state space and key interrogation parameters
Plot the standard set of post-processing figures, including the
state space and the key derived variables.
Parameters
----------
fig : int
Figure number to pass to the plotting methods
See Also
--------
plot_state_space
"""
if self.sim_stored is False:
print('No simulation results to plot...')
print('Plotting nothing.\n')
return
# Plot the results
print('Plotting the results...')
plot_state_space(self.profile, self.particle, self.p, self.t,
self.y, fig)
print('Done.\n')
class ModelParams(object):
"""
Fixed model parameters for the single bubble model
This class stores the set of model parameters that should not be adjusted
by the user and that are needed by the single bubble model.
Parameters
----------
profile : `ambient.Profile` object
The ambient CTD object used by the single bubble model simulation.
Attributes
----------
rho_r : float
Reference density (kg/m^3) evaluated at mid-depth of the water body.
g : float
Acceleration of gravity (m/s^2)
Ru : float
Ideal gas constant (J/mol/K)
"""
def __init__(self, profile):
super(ModelParams, self).__init__()
# Store a reference density for the water column
z_ave = profile.z_max - (profile.z_max - profile.z_min) / 2.
T, S, P = profile.get_values(z_ave, ['temperature', 'salinity',
'pressure'])
self.rho_r = seawater.density(T, S, P)
# Store some physical constants
self.g = 9.81
self.Ru = 8.314510
# ----------------------------------------------------------------------------
# Functions to compute the trajectory
# ----------------------------------------------------------------------------
def calculate_path(profile, particle, p, y0, delta_t):
"""
Calculate the trajectory of a particle
Calculate the trajectory of a particle by integrating its path using
the `scipy.integrate.ode` object and associated methods.
Parameters
----------
profile : `ambient.Profile` object
Ambient CTD data for the model simulation
particle : `LagrangianParticle` object
Object describing the properties and behavior of the particle.
p : `ModelParams` object
Collection of model parameters passed to `derivs`.
y0 : ndarray
Initial values of the state space (depth in m, masses in kg, and heat
content in J of the particle) at the release point
delta_t : float
Maximum step size (s) to take in the integration
Notes
-----
The differential equation in `derivs` is written with respect to time, so
the independent variable in this simulation is time. The vertical
coordinate; therefore, becomes a dependent variable, along with the masses
of each component in the particle and the particle temperature. Thus,
the state space is::
y = np.hstack((z0, m0, H0))
where `H0` is the initial heat content, `m_p * cp * T0`. The variables
in the state space can be returned by::
>>> import seawater
>>> z = y[2]
>>> m = y[3:-1]
>>> T = y[-1] / (np.sum(y[1:-1]) * particle.cp)
"""
# Create the integrator object: use "vode" with "backward
# differentiation formula" for stiff ODEs
r = integrate.ode(derivs).set_integrator('vode', method='bdf', atol=1.e-6,
rtol=1e-3, order=5, max_step=delta_t)
# Initialize the state space
t0 = 0.
r.set_initial_value(y0, t0)
# Set passing variables for derivs method
r.set_f_params(profile, particle, p)
# Create vectors (using the list data type) to store the solution
t = [t0]
y = [y0]
# Integrate to the free surface (profile.z_min)
k = 0
psteps = 10.
stop = False
while r.successful() and not stop:
# Print progress to the screen
m0 = np.sum(y[0][3:-1])
mt = np.sum(y[-1][3:-1])
f = mt / m0
if np.remainder(np.float(k), psteps) == 0.:
print(' Depth: %g (m), t: %g (s), k: %d, f: %g (--)' %
(r.y[2], t[-1], k, f))
# Perform one step of the integration
r.integrate(t[-1] + delta_t, step=True)
# Store the results
if particle.K_T == 0:
# Make the state-space heat correct
Ta = profile.get_values(r.y[2], 'temperature')
r.y[-1] = np.sum(r.y[3:-1]) * particle.cp * Ta
for i in range(len(r.y[3:-1])):
if r.y[i+3] < 0.:
# Concentration should not overshoot zero
r.y[i+3] = 0.
t.append(r.t)
y.append(r.y)
k += 1
# Evaluate stop criteria
if r.successful():
# Check if bubble dissolved (us = 0 or based on fdis) or reached
# the free surface
us = - (y[-2][2] - y[-1][2]) / (t[-2] - t[-1])
if r.y[2] <= profile.z_min or us <= 0. or f < particle.fdis:
stop = True
if k > 300000:
stop = True
if t[-1] > 1209600:
# Particle has reached 14 days of simulation
stop = True
# Remove any negative depths due to overshooting the free surface
t = np.array(t)
y = np.array(y)
rows = y[:,2] >= 0
t = t[rows]
y = y[rows,:]
# Return the solution
print(' Depth: %g (m), t: %g (s), k: %d' %
(y[-1,2], t[-1], k))
return (t, y)
def derivs(t, y, profile, particle, p):
"""
Compute the RHS of the ODE for the trajectory of a single particle
Compute the right-hand-side of the governing system of ordinary
differential equations for the trajectory of a single particle rising
through the water column.
Parameters
----------
t : float
Current simulation time (s)
y : ndarray
Model state space. Includes the current depth (m), the masses (kg)
of each component of the particle, and the particle heat content
(J)
profile : `ambient.Profile` object
Ambient CTD data for the model simulation
particle : `LagrangianParticle` object
Object describing the properties and behavior of the particle.
p : `ModelParams` object
Object containing the model parameters
Notes
-----
This function is called by the ODE solver `scipy.integrate.ode`. This
function should not generally be called by the user.
"""
# Set up the output variable
yp = np.zeros(y.shape)
# Extract the state space variables for speed and ease of reading code
z = y[2]
m = y[3:-1]
T = y[-1] / (np.sum(m) * particle.cp)
# Get the ambient profile data
Ta, Sa, P = profile.get_values(z, ['temperature', 'salinity', 'pressure'])
ua, va, wa = profile.get_values(z, ['ua', 'va', 'wa'])
C = profile.get_values(z, particle.composition)
# Get the physical particle properties
(us, rho_p, A, Cs, beta, beta_T, T) = particle.properties(m, T, P, Sa,
Ta, t)
# Get the biodegradation rate constants
k_bio = particle.biodegradation_rate(t)
# Advection
yp[0] = ua
yp[1] = va
yp[2] = -us - wa
# Dissolution
if len(Cs) > 0:
md_diss = - A * beta[:] * (Cs[:] - C[:])
else:
md_diss = np.array([0.])
# Biodegradation
md_biodeg = -k_bio * m
yp[3:-1] = md_diss + md_biodeg
# Account for heat transfer (ignore heat of solution since it is
# negligible in the beginning as the particle approaches equilibrium)
yp[-1] = - rho_p * particle.cp * A * beta_T * (T - Ta)
# Account for heat lost due to decrease in mass
yp[-1] += particle.cp * np.sum(md_diss + md_biodeg) * T
# Return the derivatives
return yp
def sbm_ic(profile, particle, X0, de, yk, T0, K, K_T, fdis, t_hyd, lag_time):
"""
Set the initial conditions for a single bubble model simulation
Set up the state space at the release point for the single bubble model
simulation
Parameters
----------
profile : `ambient.Profile` object
Ambient CTD data for the model simulation
particle : `dbm.FluidParticle` or `dbm.InsolubleParticle` object
Object describing the properties and behavior of the particle.
X0 : ndarray
The release location (x, y, y) in m of the particle in the simulation
de : float
Initial diameter of the particle (m)
yk : ndarray
Initial mole fractions of each component in the particle (--)
T0 : float, optional
Initial temperature (K) of the particle at release if not equal
to the temperature of the surrounding fluid. If omitted, the
model will set T0 to the ambient temperature.
K : float
Mass transfer reduction factor (--). Pre-multiplies the mass
transfer coefficients providing amplification (>1) or retardation
(<1) of the dissolution.
K_T : float
Heat transfer reduction factor (--). Pre-multiplies the heat
transfer coefficient providing amplification (>1) or retardation
(<1) of the heat flux.
fdis : float
Fraction of the initial total mass (--) remaining when the
particle should be considered dissolved.
t_hyd : float
Hydrate film formation time (s). Mass transfer is computed by clean
bubble methods for t less than t_hyd and by dirty bubble methods
thereafter. The default behavior is to assume the particle is dirty
or hydrate covered from the release.
Returns
-------
particle : `LagrangianParticle` object
A `LagrangianParticle` object containing a unified interface to the
`dbm` module and the particle-specific model parameters (e.g., mass
transfer reduction factor, etc.)
y0 : ndarray
Model state space at the release point. Includes the current depth
(m), the masses (kg) of each component of the particle, and the
particle heat content (J)
Notes
-----
This function converts an initial diameter and a list of mole fractions
to the actual mass of each component in a particle. This seems like
the most common method a single particle would be initialized. Note,
however, that the user does not specify the mass: it is calculated in
this function. If the same diameter particle is released as a deeper
depth, it will contain more mass (due to compressibility). Likewise,
if the composition is changed while the depth and diameter are
maintained constant, the mass will change, altering the trajectory
and simulation results. If the mass is to be kept constant, this must
be done outside this routine and the correct diameter calculated and
passed to this function.
"""
# Get the particle initial conditions from the dispersed_phases module
m0, T0, nb0, P, Sa, Ta = dispersed_phases.initial_conditions(profile,
X0[2], particle, yk, None, 0, de, T0)
# Initialize a LagrangianParticle object
particle = dispersed_phases.SingleParticle(particle, m0, T0, K, K_T,
fdis, t_hyd, lag_time)
# Assemble the state space
y0 = np.hstack((X0, m0, T0 * np.sum(m0) * particle.cp))
# Return the particle object and the state space
return (particle, y0)
# ----------------------------------------------------------------------------
# Functions to post process the simulation solution
# ----------------------------------------------------------------------------
def plot_state_space(profile, particle, p, t, y, fig):
"""
Create the basic plots to interrogate the solution for the particle path
Plots the basic state space variables for a solution of the particle
trajectory.
Parameters
----------
profile : `ambient.Profile` object
Ambient CTD data for the model simulation
particle : `LagrangianParticle` object
Object describing the properties and behavior of the particle.
p : `ModelParams` object
Collection of model parameters passed to `derivs`.
t : ndarray
Times (s) associated with the state space for the trajectory of the
particle
y : ndarray
State space along the trajectory of the particle. The state space
includes the location (m), masses (kg) of the particle components, and
the particle heat content (J). Each variable is contained in a
separate column of `y`.
fig : int
Figure number to place the first of the plots.
Notes
-----
Creates three figure windows:
1. State space variables versus time
2. Particle diameter, shape, density, and temperature
3. Solubility, mass transfer, and surface area
"""
# imported here so MPL will only be imported if you need it.
import matplotlib.pyplot as plt
import matplotlib as mpl
# Extract the state space variables
xi = y[:,0]
yi = y[:,1]
zi = y[:,2]
m = y[:,3:-1]
T = np.array([y[i,-1] / (np.sum(m[i,:]) * particle.cp)
for i in range(len(zi))])
# Compute the diameter and save the ambient temperature
rho_p = np.zeros(t.shape)
A = np.zeros(t.shape)
Cs = np.zeros((t.shape[0], len(particle.composition)))
beta = np.zeros((t.shape[0], len(particle.composition)))
Ta = np.zeros(t.shape)
shape = np.zeros(t.shape)
de = np.zeros(t.shape)
us = np.zeros(t.shape)
P = np.zeros(t.shape)
Sa = np.zeros(t.shape)
N = np.zeros(t.shape)
T_fun = np.zeros(t.shape)
for i in range(len(t)):
Ta[i], Sa[i], P[i] = profile.get_values(zi[i], ['temperature',
'salinity', 'pressure'])
N[i] = profile.buoyancy_frequency(zi[i], h=0.005)
(us[i], rho_p[i], A[i], Cs_local, beta_local, beta_T, T_fun[i]) = \
particle.properties(m[i,:], T[i], P[i], Sa[i], Ta[i], t[i])
if len(Cs_local) > 0:
Cs[i,:] = Cs_local
beta[i,:] = beta_local
shape[i] = particle.particle.particle_shape(m[i,:], T[i], P[i],
Sa[i], Ta[i])[0]
de[i] = particle.diameter(m[i,:], T[i], P[i], Sa[i], Ta[i])
# Start by plotting the raw state space versus t
plt.figure(fig)
plt.clf()
plt.show()
# Depth
ax1 = plt.subplot(221)
ax1.plot(zi, t)
ax1.set_xlabel('Depth (m)')
ax1.set_ylabel('Time (s)')
ax1.locator_params(tight=True, nbins=6)
ax1.grid(True)
# Slip Velocity
ax2 = plt.subplot(222)
ax2.plot(us, t)
ax2.set_xlabel('Slip velocity (m/s)')
ax2.locator_params(tight=True, nbins=6)
ax2.grid(True)
# Masses
ax3 = plt.subplot(223)
ax3.semilogx(m, t)
ax3.set_xlabel('Component masses (kg)')
ax3.locator_params(axis='y', tight=True, nbins=6)
#ax3.xaxis.set_major_locator(mpl.ticker.LogLocator(base=1e2))
ax3.grid(True)
# Heat
ax4 = plt.subplot(224)
ax4.semilogx(y[:,-1], t)
ax4.set_xlabel('Heat (J)')
ax4.locator_params(axis='y', tight=True, nbins=6)
#ax4.xaxis.set_major_locator(mpl.ticker.LogLocator(base=1e2))
ax4.grid(True)
plt.draw()
# Plot derived variables related to diameter
plt.figure(fig+1)
plt.clf()
plt.show()
# Diameter
ax1 = plt.subplot(221)
ax1.semilogx(de * 1000, zi)
ax1.set_xlabel('Diameter (mm)')
ax1.set_ylabel('Depth (m)')
ax1.locator_params(axis='y', tight=True, nbins=6)
#ax1.xaxis.set_major_locator(mpl.ticker.LogLocator(base=1e2))
ax1.invert_yaxis()
ax1.grid(True)
# Shape
ax2 = plt.subplot(222)
ax2.plot(shape, zi)
ax2.set_xlabel('Shape (--)')
ax2.set_xlim((0, 4))
ax2.invert_yaxis()
ax2.grid(which='major', axis='x')
ax2.locator_params(tight=True, nbins=4)
ax2.grid(True)
# Density
ax3 = plt.subplot(223)
ax3.plot(rho_p, zi)
ax3.set_xlabel('Density (kg)')
ax3.set_ylabel('Depth (m)')
ax3.invert_yaxis()
ax3.locator_params(tight=True, nbins=6)
ax3.grid(True)
# Temperature
ax4 = plt.subplot(224)
ax4.plot(T, zi)
ax4.plot(T_fun, zi)
ax4.plot(Ta, zi)
ax4.set_xlabel('Temperature (K)')
ax4.invert_yaxis()
ax4.locator_params(tight=True, nbins=6)
ax4.grid(True)
plt.draw()
# Plot dissolution data
plt.figure(fig+2)
plt.clf()
plt.show()
# Masses
ax1 = plt.subplot(221)
ax1.semilogx(m, zi)
ax1.set_xlabel('Component masses (kg)')
ax1.set_ylabel('Depth (m)')
ax1.locator_params(axis='y', tight=True, nbins=6)
#ax1.xaxis.set_major_locator(mpl.ticker.LogLocator(base=1e2))
ax1.invert_yaxis()
ax1.grid(True)
# Solubility
ax2 = plt.subplot(222)
ax2.plot(Cs, zi)
ax2.set_xlabel('Solubility (kg/m^3)')
ax2.locator_params(tight=True, nbins=6)
ax2.invert_yaxis()
ax2.grid(True)
# Mass transfer coefficient
ax3 = plt.subplot(223)
ax3.plot(beta, zi)
ax3.set_xlabel('Mass transfer (m/s)')
ax3.invert_yaxis()
ax3.locator_params(tight=True, nbins=6)
ax3.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
ax3.grid(True)
# Area
ax4 = plt.subplot(224)
ax4.semilogx(A, zi)
ax4.set_xlabel('Surface area (m^2)')
ax4.locator_params(axis='y', tight=True, nbins=6)
#ax4.xaxis.set_major_locator(mpl.ticker.LogLocator(base=1e2))
ax4.invert_yaxis()
ax4.grid(True)
plt.draw()
# Plot dissolution data
plt.figure(fig+3)
plt.clf()
plt.show()
# CTD Temperature
ax1 = plt.subplot(221)
ax1.plot(Ta - 273.15, zi)
ax1.set_xlabel('Temperature (deg C)')
ax1.set_ylabel('Depth (m)')
ax1.locator_params(tight=True, nbins=6)
ax1.invert_yaxis()
ax1.grid(True)
ax2 = plt.subplot(222)
ax2.plot(Sa, zi)
ax2.set_xlabel('Salinity (psu)')
ax2.locator_params(tight=True, nbins=6)
ax2.invert_yaxis()
ax2.grid(True)
ax3 = plt.subplot(223)
ax3.plot(P, zi)
ax3.set_xlabel('Pressure (Pa)')
ax3.set_ylabel('Depth (m)')
ax3.locator_params(tight=True, nbins=6)
ax3.invert_yaxis()
ax3.grid(True)
ax4= plt.subplot(224)
ax4.plot(N, zi)
ax4.set_xlabel('Buoyancy Frequency (1/s)')
ax4.locator_params(tight=True, nbins=6)
ax4.invert_yaxis()
ax4.grid(True)
plt.draw()
|
socolofs/tamoc
|
tamoc/single_bubble_model.py
|
Python
|
mit
| 38,945
|
[
"NetCDF"
] |
f4d5a1025926077c41b06f4a86dc0b22067feb3c598777a528fe60cb03e3837d
|
# coding: utf-8
from __future__ import unicode_literals
"""
This module implements various transmuter classes.
Transmuters are essentially classes that generate TransformedStructures from
various data sources. They enable the high-throughput generation of new
structures and input files.
It also includes the helper function, batch_write_vasp_input to generate an
entire directory of vasp input files for running.
"""
from six.moves import filter, map
__author__ = "Shyue Ping Ong, Will Richards"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 4, 2012"
import os
import re
import warnings
from multiprocessing import Pool
from pymatgen.alchemy.materials import TransformedStructure
class StandardTransmuter(object):
"""
An example of a Transmuter object, which performs a sequence of
transformations on many structures to generate TransformedStructures.
.. attribute: transformed_structures
List of all transformed structures.
"""
def __init__(self, transformed_structures, transformations=None,
extend_collection=0, ncores=None):
"""
Initializes a transmuter from an initial list of
:class:`pymatgen.alchemy.materials.TransformedStructure`.
Args:
transformed_structures ([TransformedStructure]): Input transformed
structures
transformations ([Transformations]): New transformations to be
applied to all structures.
extend_collection (int): Whether to use more than one output
structure from one-to-many transformations. extend_collection
can be an int, which determines the maximum branching for each
transformation.
ncores (int): Number of cores to use for applying transformations.
Uses multiprocessing.Pool. Default is None, which implies
serial.
"""
self.transformed_structures = transformed_structures
self.ncores = ncores
if transformations is not None:
for trans in transformations:
self.append_transformation(trans,
extend_collection=extend_collection)
def get_transformed_structures(self):
"""
Returns all TransformedStructures.
.. deprecated:: v2.1.0
Use transformed_structures attribute instead. Will be removed in
next version.
"""
warnings.warn("Use transformed_structures attribute instead.",
DeprecationWarning)
return self.transformed_structures
def __getitem__(self, index):
return self.transformed_structures[index]
def __getattr__(self, name):
return [getattr(x, name) for x in self.transformed_structures]
def undo_last_change(self):
"""
Undo the last transformation in the TransformedStructure.
Raises:
IndexError if already at the oldest change.
"""
for x in self.transformed_structures:
x.undo_last_change()
def redo_next_change(self):
"""
Redo the last undone transformation in the TransformedStructure.
Raises:
IndexError if already at the latest change.
"""
for x in self.transformed_structures:
x.redo_next_change()
def __len__(self):
return len(self.transformed_structures)
def append_transformation(self, transformation, extend_collection=False,
clear_redo=True):
"""
Appends a transformation to all TransformedStructures.
Args:
transformation: Transformation to append
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
clear_redo (bool): Whether to clear the redo list. By default,
this is True, meaning any appends clears the history of
undoing. However, when using append_transformation to do a
redo, the redo list should not be cleared to allow multiple
redos.
Returns:
List of booleans corresponding to initial transformed structures
each boolean describes whether the transformation altered the
structure
"""
if self.ncores and transformation.use_multiprocessing:
p = Pool(self.ncores)
#need to condense arguments into single tuple to use map
z = map(
lambda x: (x, transformation, extend_collection, clear_redo),
self.transformed_structures)
new_tstructs = p.map(_apply_transformation, z, 1)
self.transformed_structures = []
for ts in new_tstructs:
self.transformed_structures.extend(ts)
else:
new_structures = []
for x in self.transformed_structures:
new = x.append_transformation(transformation,
extend_collection,
clear_redo=clear_redo)
if new is not None:
new_structures.extend(new)
self.transformed_structures.extend(new_structures)
def extend_transformations(self, transformations):
"""
Extends a sequence of transformations to the TransformedStructure.
Args:
transformations: Sequence of Transformations
"""
for t in transformations:
self.append_transformation(t)
def apply_filter(self, structure_filter):
"""
Applies a structure_filter to the list of TransformedStructures
in the transmuter.
Args:
structure_filter: StructureFilter to apply.
"""
def test_transformed_structure(ts):
return structure_filter.test(ts.final_structure)
self.transformed_structures = list(filter(test_transformed_structure,
self.transformed_structures))
for ts in self.transformed_structures:
ts.append_filter(structure_filter)
def write_vasp_input(self, vasp_input_set, output_dir,
create_directory=True, subfolder=None,
include_cif=False):
"""
Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{formula}_{number}.
Args:
vasp_input_set: pymatgen.io.vaspio_set.VaspInputSet to create
vasp input files from structures
output_dir: Directory to output files
create_directory (bool): Create the directory if not present.
Defaults to True.
subfolder: Callable to create subdirectory name from
transformed_structure. e.g.,
lambda x: x.other_parameters["tags"][0] to use the first tag.
include_cif (bool): Whether to output a CIF as well. CIF files
are generally better supported in visualization programs.
"""
batch_write_vasp_input(self.transformed_structures, vasp_input_set,
output_dir, create_directory, subfolder,
include_cif)
def set_parameter(self, key, value):
"""
Add parameters to the transmuter. Additional parameters are stored in
the as_dict() output.
Args:
key: The key for the parameter.
value: The value for the parameter.
"""
for x in self.transformed_structures:
x.other_parameters[key] = value
def add_tags(self, tags):
"""
Add tags for the structures generated by the transmuter.
Args:
tags: A sequence of tags. Note that this should be a sequence of
strings, e.g., ["My awesome structures", "Project X"].
"""
self.set_parameter("tags", tags)
def __str__(self):
output = ["Current structures", "------------"]
for x in self.transformed_structures:
output.append(str(x.final_structure))
return "\n".join(output)
def append_transformed_structures(self, tstructs_or_transmuter):
"""
Method is overloaded to accept either a list of transformed structures
or transmuter, it which case it appends the second transmuter"s
structures.
Args:
tstructs_or_transmuter: A list of transformed structures or a
transmuter.
"""
if isinstance(tstructs_or_transmuter, self.__class__):
self.transformed_structures.extend(tstructs_or_transmuter
.transformed_structures)
else:
for ts in tstructs_or_transmuter:
assert isinstance(ts, TransformedStructure)
self.transformed_structures.extend(tstructs_or_transmuter)
@staticmethod
def from_structures(structures, transformations=None, extend_collection=0):
"""
Alternative constructor from structures rather than
TransformedStructures.
Args:
structures: Sequence of structures
transformations: New transformations to be applied to all
structures
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
Returns:
StandardTransmuter
"""
tstruct = [TransformedStructure(s, []) for s in structures]
return StandardTransmuter(tstruct, transformations, extend_collection)
class CifTransmuter(StandardTransmuter):
"""
Generates a Transmuter from a cif string, possibly containing multiple
structures.
"""
def __init__(self, cif_string, transformations=None, primitive=True,
extend_collection=False):
"""
Generates a Transmuter from a cif string, possibly
containing multiple structures.
Args:
cif_string: A string containing a cif or a series of cifs
transformations: New transformations to be applied to all
structures
primitive: Whether to generate the primitive cell from the cif.
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
"""
transformed_structures = []
lines = cif_string.split("\n")
structure_data = []
read_data = False
for line in lines:
if re.match("^\s*data", line):
structure_data.append([])
read_data = True
if read_data:
structure_data[-1].append(line)
for data in structure_data:
tstruct = TransformedStructure.from_cif_string("\n".join(data), [],
primitive)
transformed_structures.append(tstruct)
StandardTransmuter.__init__(self, transformed_structures,
transformations, extend_collection)
@staticmethod
def from_filenames(filenames, transformations=None, primitive=True,
extend_collection=False):
"""
Generates a TransformedStructureCollection from a cif, possibly
containing multiple structures.
Args:
filenames: List of strings of the cif files
transformations: New transformations to be applied to all
structures
primitive: Same meaning as in __init__.
extend_collection: Same meaning as in __init__.
"""
allcifs = []
for fname in filenames:
with open(fname, "r") as f:
allcifs.append(f.read())
return CifTransmuter("\n".join(allcifs), transformations,
primitive=primitive,
extend_collection=extend_collection)
class PoscarTransmuter(StandardTransmuter):
"""
Generates a transmuter from a sequence of POSCARs.
Args:
poscar_string: List of POSCAR strings
transformations: New transformations to be applied to all
structures.
extend_collection: Whether to use more than one output structure
from one-to-many transformations.
"""
def __init__(self, poscar_string, transformations=None,
extend_collection=False):
tstruct = TransformedStructure.from_poscar_string(poscar_string, [])
StandardTransmuter.__init__(self, [tstruct], transformations,
extend_collection=extend_collection)
@staticmethod
def from_filenames(poscar_filenames, transformations=None,
extend_collection=False):
"""
Convenient constructor to generates a POSCAR transmuter from a list of
POSCAR filenames.
Args:
poscar_filenames: List of POSCAR filenames
transformations: New transformations to be applied to all
structures.
extend_collection:
Same meaning as in __init__.
"""
tstructs = []
for filename in poscar_filenames:
with open(filename, "r") as f:
tstructs.append(TransformedStructure
.from_poscar_string(f.read(), []))
return StandardTransmuter(tstructs, transformations,
extend_collection=extend_collection)
def batch_write_vasp_input(transformed_structures, vasp_input_set, output_dir,
create_directory=True, subfolder=None,
include_cif=False):
"""
Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
transformed_structures: Sequence of TransformedStructures.
vasp_input_set: pymatgen.io.vaspio_set.VaspInputSet to creates
vasp input files from structures.
output_dir: Directory to output files
create_directory (bool): Create the directory if not present.
Defaults to True.
subfolder: Function to create subdirectory name from
transformed_structure.
e.g., lambda x: x.other_parameters["tags"][0] to use the first
tag.
include_cif (bool): Boolean indication whether to output a CIF as
well. CIF files are generally better supported in visualization
programs.
"""
for i, s in enumerate(transformed_structures):
formula = re.sub("\s+", "", s.final_structure.formula)
if subfolder is not None:
subdir = subfolder(s)
dirname = os.path.join(output_dir, subdir,
"{}_{}".format(formula, i))
else:
dirname = os.path.join(output_dir, "{}_{}".format(formula, i))
s.write_vasp_input(vasp_input_set, dirname,
create_directory=create_directory)
if include_cif:
from pymatgen.io.cifio import CifWriter
writer = CifWriter(s.final_structure)
writer.write_file(os.path.join(dirname, "{}.cif".format(formula)))
def _apply_transformation(inputs):
"""
Helper method for multiprocessing of apply_transformation. Must not be
in the class so that it can be pickled.
Args:
inputs: Tuple containing the transformed structure, the transformation
to be applied, a boolean indicating whether to extend the
collection, and a boolean indicating whether to clear the redo
Returns:
List of output structures (the modified initial structure, plus
any new structures created by a one-to-many transformation)
"""
ts, transformation, extend_collection, clear_redo = inputs
new = ts.append_transformation(transformation, extend_collection,
clear_redo=clear_redo)
o = [ts]
if new:
o.extend(new)
return o
|
yanikou19/pymatgen
|
pymatgen/alchemy/transmuters.py
|
Python
|
mit
| 16,770
|
[
"VASP",
"pymatgen"
] |
78fe0c774415b6c93d2381246c59d010971eca2e7c49abcd31331213df6dcf22
|
"""
Does the following:
1. Generates and saves random secret key
2. Removes the taskapp if celery isn't going to be used
3. Removes the .idea directory if PyCharm isn't going to be used
4. Copy files from /docs/ to {{ cookiecutter.project_slug }}/docs/
TODO: this might have to be moved to a pre_gen_hook
A portion of this code was adopted from Django's standard crypto functions and
utilities, specifically:
https://github.com/django/django/blob/master/django/utils/crypto.py
"""
from __future__ import print_function
import os
import random
import shutil
# Get the root project directory
PROJECT_DIRECTORY = os.path.realpath(os.path.curdir)
# Use the system PRNG if possible
try:
random = random.SystemRandom()
using_sysrandom = True
except NotImplementedError:
using_sysrandom = False
def get_random_string(
length=50,
allowed_chars='abcdefghijklmnopqrstuvwxyz0123456789!@#%^&*(-_=+)'):
"""
Returns a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
if using_sysrandom:
return ''.join(random.choice(allowed_chars) for i in range(length))
print(
"Cookiecutter Django couldn't find a secure pseudo-random number generator on your system."
" Please change change your SECRET_KEY variables in conf/settings/local.py and env.example"
" manually."
)
return "CHANGEME!!"
def set_secret_key(setting_file_location):
# Open locals.py
with open(setting_file_location) as f:
file_ = f.read()
# Generate a SECRET_KEY that matches the Django standard
SECRET_KEY = get_random_string()
# Replace "CHANGEME!!!" with SECRET_KEY
file_ = file_.replace('CHANGEME!!!', SECRET_KEY, 1)
# Write the results to the locals.py module
with open(setting_file_location, 'w') as f:
f.write(file_)
def make_secret_key(project_directory):
"""Generates and saves random secret key"""
# Determine the local_setting_file_location
local_setting = os.path.join(
project_directory,
'config/settings/local.py'
)
# local.py settings file
set_secret_key(local_setting)
env_file = os.path.join(
project_directory,
'env.example'
)
# env.example file
set_secret_key(env_file)
def remove_file(file_name):
if os.path.exists(file_name):
os.remove(file_name)
def remove_task_app(project_directory):
"""Removes the taskapp if celery isn't going to be used"""
# Determine the local_setting_file_location
task_app_location = os.path.join(
PROJECT_DIRECTORY,
'{{ cookiecutter.project_slug }}/taskapp'
)
shutil.rmtree(task_app_location)
def remove_pycharm_dir(project_directory):
"""
Removes directories related to PyCharm
if it isn't going to be used
"""
idea_dir_location = os.path.join(PROJECT_DIRECTORY, '.idea/')
if os.path.exists(idea_dir_location):
shutil.rmtree(idea_dir_location)
docs_dir_location = os.path.join(PROJECT_DIRECTORY, 'docs/pycharm/')
if os.path.exists(docs_dir_location):
shutil.rmtree(docs_dir_location)
def remove_heroku_files():
"""
Removes files needed for heroku if it isn't going to be used
"""
for filename in ["app.json", "Procfile", "requirements.txt", "runtime.txt"]:
file_name = os.path.join(PROJECT_DIRECTORY, filename)
remove_file(file_name)
def remove_docker_files():
"""
Removes files needed for docker if it isn't going to be used
"""
for filename in ["dev.yml", "docker-compose.yml", ".dockerignore"]:
os.remove(os.path.join(
PROJECT_DIRECTORY, filename
))
shutil.rmtree(os.path.join(
PROJECT_DIRECTORY, "compose"
))
def remove_grunt_files():
"""
Removes files needed for grunt if it isn't going to be used
"""
for filename in ["Gruntfile.js"]:
os.remove(os.path.join(
PROJECT_DIRECTORY, filename
))
def remove_gulp_files():
"""
Removes files needed for grunt if it isn't going to be used
"""
for filename in ["gulpfile.js"]:
os.remove(os.path.join(
PROJECT_DIRECTORY, filename
))
def remove_packageJSON_file():
"""
Removes files needed for grunt if it isn't going to be used
"""
for filename in ["package.json"]:
os.remove(os.path.join(
PROJECT_DIRECTORY, filename
))
def remove_certbot_files():
"""
Removes files needed for certbot if it isn't going to be used
"""
nginx_dir_location = os.path.join(PROJECT_DIRECTORY, 'compose/nginx')
for filename in ["nginx-secure.conf", "start.sh", "dhparams.example.pem"]:
file_name = os.path.join(nginx_dir_location, filename)
remove_file(file_name)
# IN PROGRESS
# def copy_doc_files(project_directory):
# cookiecutters_dir = DEFAULT_CONFIG['cookiecutters_dir']
# cookiecutter_django_dir = os.path.join(
# cookiecutters_dir,
# 'cookiecutter-django',
# 'docs'
# )
# target_dir = os.path.join(
# project_directory,
# 'docs'
# )
# for name in os.listdir(cookiecutter_django_dir):
# if name.endswith('.rst') and not name.startswith('index'):
# src = os.path.join(cookiecutter_django_dir, name)
# dst = os.path.join(target_dir, name)
# shutil.copyfile(src, dst)
# 1. Generates and saves random secret key
make_secret_key(PROJECT_DIRECTORY)
# 2. Removes the taskapp if celery isn't going to be used
if '{{ cookiecutter.use_celery }}'.lower() == 'n':
remove_task_app(PROJECT_DIRECTORY)
# 3. Removes the .idea directory if PyCharm isn't going to be used
if '{{ cookiecutter.use_pycharm }}'.lower() != 'y':
remove_pycharm_dir(PROJECT_DIRECTORY)
# 4. Removes all heroku files if it isn't going to be used
if '{{ cookiecutter.use_heroku }}'.lower() != 'y':
remove_heroku_files()
# 5. Removes all docker files if it isn't going to be used
if '{{ cookiecutter.use_docker }}'.lower() != 'y':
remove_docker_files()
# 6. Removes all JS task manager files if it isn't going to be used
if '{{ cookiecutter.js_task_runner}}'.lower() == 'gulp':
remove_grunt_files()
elif '{{ cookiecutter.js_task_runner}}'.lower() == 'grunt':
remove_gulp_files()
else:
remove_gulp_files()
remove_grunt_files()
remove_packageJSON_file()
# 7. Removes all certbot/letsencrypt files if it isn't going to be used
if '{{ cookiecutter.use_lets_encrypt }}'.lower() != 'y':
remove_certbot_files()
# 8. Display a warning if use_docker and use_grunt are selected. Grunt isn't
# supported by our docker config atm.
if '{{ cookiecutter.js_task_runner }}'.lower() in ['grunt', 'gulp'] and '{{ cookiecutter.use_docker }}'.lower() == 'y':
print(
"You selected to use docker and a JS task runner. This is NOT supported out of the box for now. You "
"can continue to use the project like you normally would, but you will need to add a "
"js task runner service to your docker configuration manually."
)
# 9. Removes the certbot/letsencrypt files and display a warning if use_lets_encrypt is selected and use_docker isn't.
if '{{ cookiecutter.use_lets_encrypt }}'.lower() == 'y' and '{{ cookiecutter.use_docker }}'.lower() != 'y':
remove_certbot_files()
print(
"You selected to use Let's Encrypt and didn't select to use docker. This is NOT supported out of the box for now. You "
"can continue to use the project like you normally would, but Let's Encrypt files have been included."
)
# 10. Directs the user to the documentation if certbot and docker are selected.
if '{{ cookiecutter.use_lets_encrypt }}'.lower() == 'y' and '{{ cookiecutter.use_docker }}'.lower() == 'y':
print(
"You selected to use Let's Encrypt, please see the documentation for instructions on how to use this in production. "
"You must generate a dhparams.pem file before running docker-compose in a production environment."
)
# 4. Copy files from /docs/ to {{ cookiecutter.project_slug }}/docs/
# copy_doc_files(PROJECT_DIRECTORY)
|
ingenioustechie/cookiecutter-django-openshift
|
hooks/post_gen_project.py
|
Python
|
mit
| 8,246
|
[
"GULP"
] |
cfd09cea9e2666ae1e9c000421709ff9c3b211a22ff8becc61cc455552ba4124
|
#####################################################################
# Ryuretic: A Modular Framework for RYU #
# !/ryu/ryu/app/Ryuretic/Pkt_Parse13.py #
# author: Jacob Cox
# Pkt_Parse13.py
# date 7 February 2016
#####################################################################
# Copyright (C) 2016 Jacob Cox - All Rights Reserved #
# You may use, distribute and modify this code under the #
# terms of the Ryuretic license, provided this work is cited #
# in the work for which it is used. #
# For latest updates, please visit: #
# https://github.gatech.edu/jcox70/RyureticLabs #
#####################################################################
"""How To Run This Program
1) Ensure you have Ryu installed.
2) Save the following files to /home/ubuntu/ryu/ryu/app/Ryuretic
a) coupler.py
b) NFGRD.py
c) Pkt_Parse13.py
d) switch_mod13.py
3) In your controller terminal type: cd ryu
4) Enter PYTHONPATH=. ./bin/ryu-manager ryu/app/Ryuretic/Ryuretic_Intf.py
"""
###################################################
import logging
import struct
# Standard RYU calls
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet, lldp, arp, ipv4, icmp, tcp, udp, dhcp, bpdu
# Needed for Ryuretic framework
import time
class Pkt_Parse(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(Pkt_Parse, self).__init__(*args, **kwargs)
pktinfo = {}
#Not yet set up to handle IPv6
def handle_pkt(self, ev):
pkt = {}
pkt['t_in']= int(round(time.time() *1000)) #time.time()*1000.0 #time.clock()
pkt['msg'] = ev.msg
pkt['dp'] = pkt['msg'].datapath
pkt['ofproto'] = pkt['msg'].datapath.ofproto
parser = pkt['dp'].ofproto_parser
pkt['inport']= pkt['msg'].match['in_port']
pkt['pkt'] = packet.Packet(pkt['msg'].data)
pkt['eth'] = pkt['pkt'].get_protocols(ethernet.ethernet)[0]
ether = pkt['pkt'].get_protocols(ethernet.ethernet)[0]
#print 'Parser Ether: ', ether
pkt['srcmac']= ether.src
pkt['dstmac']= ether.dst
pkt['ethtype'] = ether.ethertype
arp_p = pkt['arp'] = pkt['pkt'].get_protocol(arp.arp)
#print 'ARP: ', arp_p
if arp_p != None:
#pkt['arp'] = True
pkt['srcmac'] = arp_p.src_mac
pkt['dstmac'] = arp_p.dst_mac
pkt['srcip'] = arp_p.src_ip
pkt['dstip'] = arp_p.dst_ip
pkt['hlen'] = arp_p.hlen
pkt['plen'] = arp_p.plen
pkt['opcode'] = arp_p.opcode
pkt['proto'] = arp_p.proto
pkt['hwtype'] = arp_p.hwtype
ip = pkt['ip'] = pkt['pkt'].get_protocol(ipv4.ipv4)
#print 'IPv4: ', pkt['pkt'].get_protocol(ipv4.ipv4)
if ip != None:
#print ip
#pkt['ip'] = True
pkt['srcip'] = ip.src
pkt['dstip'] = ip.dst
pkt['ttl'] = ip.ttl
pkt['id'] = ip.identification
pkt['ver'] = ip.version
pkt['flags'] = ip.flags
pkt['hlen'] = ip.header_length
pkt['offset'] = ip.offset
pkt['opt'] = ip.option
pkt['proto'] = ip.proto
pkt['tos'] = ip.tos
pkt['csum'] = ip.csum
icmp_p = pkt['icmp'] = pkt['pkt'].get_protocol(icmp.icmp)
#print 'ICMP: ', icmp_p
if icmp_p != None:
#pkt['icmp'] = True
pkt['code'] = icmp_p.code
pkt['csum'] = icmp_p.csum
pkt['data'] = icmp_p.data
pkt['type'] = icmp_p.type
tcp_p = pkt['tcp'] = pkt['pkt'].get_protocol(tcp.tcp)
#print tcp_p
if tcp_p != None:
#print pkt['tcp']
#pkt['tcp'] = True
pkt['ack']=tcp_p.ack
pkt['csum'] = tcp_p.csum
pkt['dstport'] = tcp_p.dst_port
pkt['offset'] = tcp_p.offset
pkt['option'] = tcp_p.option
pkt['seq'] = tcp_p.seq
pkt['srcport'] = tcp_p.src_port
pkt['urgent'] = tcp_p.src_port
pkt['winsize'] = tcp_p.window_size
pkt['bits'] = tcp_p.bits
udp_p = pkt['udp']= pkt['pkt'].get_protocol(udp.udp)
#print udp_p
if udp_p != None:
#pkt['udp'] = True
pkt['csum'] = udp_p.csum
pkt['dstport'] = udp_p.dst_port
pkt['srcport'] = udp_p.src_port
pkt['t_length'] = udp_p.total_length
pkt['dhcp']= pkt['pkt'].get_protocol(dhcp.dhcp)
pkt['bpdu']= pkt['pkt'].get_protocol(bpdu.bpdu)
pkt['lldp']= pkt['pkt'].get_protocol(lldp.lldp)
return pkt
|
Ryuretic/RAP
|
ryu/ryu/app/Ryuretic/Pkt_Parse13.py
|
Python
|
apache-2.0
| 5,031
|
[
"VisIt"
] |
d699a00768b74d29847eb93313149ed384cf8252ffe4cb79720c73652db5d6ba
|
#!/usr/bin/env python
###############################################################################
# Name: setup.py #
# Purpose: Setup/build script for Editra #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2008 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""
Editra Setup Script
USAGE:
1) Windows:
- python setup.py py2exe
2) MacOSX:
- python setup.py py2app
3) Boil an Egg
- python setup.py bdist_egg
4) Install as a python package
- python setup.py install
- '--no-clean' can be specified to skip old file cleanup
@summary: Used for building the editra distribution files and installations
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: setup.py 63813 2010-03-31 00:05:50Z CJP $"
__revision__ = "$Revision: 63813 $"
#---- Imports ----#
import os
import sys
import glob
import shutil
import zipfile
import time
import src.info as info
import src.syntax.synextreg as synextreg # So we can get file extensions
# Version Check(s)
if sys.version_info < (2, 5):
sys.stderr.write("[ERROR] Not a supported Python version. Need 2.5+\n")
sys.exit(1)
try:
import wx
except ImportError:
if 'bdist_egg' not in sys.argv:
sys.stderr.write("[ERROR] wxPython2.8 is required.\n")
sys.exit(1)
else:
if wx.VERSION < (2, 8, 8):
sys.stderr.write("[ERROR] wxPython 2.8.8+ is required.\n")
sys.exit(1)
#---- System Platform ----#
__platform__ = os.sys.platform
#---- Global Settings ----#
APP = ['src/Editra.py']
AUTHOR = "Cody Precord"
AUTHOR_EMAIL = "staff@editra.org"
YEAR = 2008
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications :: GTK',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved',
'Natural Language :: English',
'Natural Language :: Chinese (Simplified)',
'Natural Language :: Chinese (Traditional)',
'Natural Language :: Croatian',
'Natural Language :: Czech',
'Natural Language :: Danish',
'Natural Language :: Dutch',
'Natural Language :: French',
'Natural Language :: Hungarian',
'Natural Language :: German',
'Natural Language :: Italian',
'Natural Language :: Latvian',
'Natural Language :: Japanese',
'Natural Language :: Norwegian',
'Natural Language :: Polish',
'Natural Language :: Portuguese (Brazilian)',
'Natural Language :: Romanian',
'Natural Language :: Russian',
'Natural Language :: Serbian',
'Natural Language :: Slovak',
'Natural Language :: Slovenian',
'Natural Language :: Spanish',
'Natural Language :: Swedish',
'Natural Language :: Turkish',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Text Editors'
]
def GenerateBinPackageFiles():
"""Generate the list of files needed for py2exe/py2app package files"""
data = [("include/python2.5",
glob.glob("include/python2.5/%s/*" % __platform__)),
("pixmaps/theme/Default", ["pixmaps/theme/Default/README"]),
("pixmaps/theme/Tango",["pixmaps/theme/Tango/AUTHORS",
"pixmaps/theme/Tango/COPYING"]),
("pixmaps/theme/Tango/toolbar",
glob.glob("pixmaps/theme/Tango/toolbar/*.png")),
("pixmaps/theme/Tango/menu",
glob.glob("pixmaps/theme/Tango/menu/*.png")),
("pixmaps/theme/Tango/mime",
glob.glob("pixmaps/theme/Tango/mime/*.png")),
("pixmaps/theme/Tango/other",
glob.glob("pixmaps/theme/Tango/other/*.png")),
("styles", glob.glob("styles/*.ess")),
("ekeys", glob.glob("ekeys/*.ekeys")),
("tests/syntax", glob.glob("tests/syntax/*")),
("docs", glob.glob("docs/*.txt")), "AUTHORS", "FAQ", "INSTALL",
"README","CHANGELOG","COPYING", "NEWS", "THANKS", "TODO",
"setup.cfg"
]
# Get the locale files
for loc_dir in os.listdir("locale"):
tmp = "locale/" + loc_dir + "/LC_MESSAGES"
if os.path.isdir(tmp):
tmp2 = tmp + "/Editra.mo"
if os.path.exists(tmp2):
data.append((tmp, [tmp2]))
# Only bundle the plugins for the running version of python being used for
# the build.
data.append(("plugins",
glob.glob("plugins/*py%d.%d.egg" % sys.version_info[:2])))
# Get platform specific icons
pixlist = ["pixmaps/editra.png", "pixmaps/editra_doc.png"]
if "darwin" in sys.platform:
data.append("pixmaps/editra_doc.icns")
pixlist.extend(["pixmaps/editra.icns", "pixmaps/editra_doc.icns"])
elif sys.platform.startswith("win"):
pixlist.append("pixmaps/editra.ico")
data.append(("pixmaps", pixlist))
return data
def GenerateSrcPackageFiles():
"""Generate the list of files to include in a source package dist/install"""
data = [ "src/*.py", "src/syntax/*.py", "src/autocomp/*.py",
"src/eclib/*.py", "docs/*.txt", "pixmaps/*.png", "pixmaps/*.ico",
"src/ebmlib/*.py",
"ekeys/*.ekeys",
"Editra",
"src/extern/*.py",
"src/extern/aui/*.py",
"src/extern/pygments/*.py",
"src/extern/pygments/formatters/*.py",
"src/extern/pygments/filters/*.py",
"src/extern/pygments/lexers/*.py",
"src/extern/pygments/styles/*.py",
"pixmaps/*.icns",
"pixmaps/theme/Default/README",
"pixmaps/theme/Tango/AUTHOR",
"pixmaps/theme/Tango/COPYING",
"pixmaps/theme/Tango/toolbar/*.png",
"pixmaps/theme/Tango/menu/*.png",
"pixmaps/theme/Tango/mime/*.png",
"pixmaps/theme/Default/README",
"pixmaps/theme/Tango/other/*.png",
"styles/*.ess", "tests/syntax/*",
"AUTHORS", "CHANGELOG","COPYING", "FAQ", "INSTALL", "NEWS",
"README", "THANKS", "TODO", "setup.cfg" ]
# Get the local files
for loc_dir in os.listdir("locale"):
tmp = "locale/" + loc_dir
if os.path.isdir(tmp):
tmp = tmp + "/LC_MESSAGES/Editra.mo"
if os.path.exists(tmp):
data.append(tmp)
# NOTE: plugins selected to package in build step
return data
DESCRIPTION = "Developer's Text Editor"
LONG_DESCRIPT = \
r"""
========
Overview
========
Editra is a multi-platform text editor with an implementation that focuses on
creating an easy to use interface and features that aid in code development.
Currently it supports syntax highlighting and variety of other useful features
for over 60 programing languages. For a more complete list of features and
screenshots visit the projects homepage at `Editra.org
<http://www.editra.org/>`_.
============
Dependancies
============
* Python 2.4+
* wxPython 2.8.3+ (Unicode build suggested)
* setuptools 0.6+
"""
ICON = { 'Win' : "pixmaps/editra.ico",
'WinDoc' : "pixmaps/editra_doc.ico",
'Mac' : "pixmaps/Editra.icns"
}
# Explicitly include some libraries that are either loaded dynamically
# or otherwise not able to be found by py2app/exe
INCLUDES = ['syntax.*', 'ed_log', 'shutil', 'subprocess', 'zipfile',
'pygments.*', 'pygments.lexers.*', 'pygments.formatters.*',
'pygments.filters.*', 'pygments.styles.*', 'ftplib',
'extern.flatnotebook'] # temporary till all references can be removed
if sys.platform.startswith('win'):
INCLUDES.extend(['ctypes', 'ctypes.wintypes'])
else:
INCLUDES.extend(['pty', 'tty'])
LICENSE = "wxWindows"
NAME = "Editra"
URL = "http://editra.org"
VERSION = info.VERSION
MANIFEST_TEMPLATE = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
RT_MANIFEST = 24
#---- End Global Settings ----#
#---- Packaging Functions ----#
def BuildPy2Exe():
"""Generate the Py2exe files"""
from distutils.core import setup
try:
import py2exe
except ImportError:
print "\n!! You dont have py2exe installed. !!\n"
exit()
# put package on path for py2exe
sys.path.append(os.path.abspath('src/'))
sys.path.append(os.path.abspath('src/extern'))
DATA_FILES = GenerateBinPackageFiles()
try:
import enchant
except ImportError:
pass
else:
from enchant import utils as enutil
DATA_FILES += enutil.win32_data_files()
setup(
name = NAME,
version = VERSION,
options = {"py2exe" : {"compressed" : 1,
"optimize" : 1,
"bundle_files" : 2,
"includes" : INCLUDES,
"excludes" : ["Tkinter",] }},
windows = [{"script": "src/Editra.py",
"icon_resources": [(0, ICON['Win'])],
"other_resources" : [(RT_MANIFEST, 1,
MANIFEST_TEMPLATE % dict(prog=NAME))],
}],
description = NAME,
author = AUTHOR,
author_email = AUTHOR_EMAIL,
maintainer = AUTHOR,
maintainer_email = AUTHOR_EMAIL,
license = LICENSE,
url = URL,
data_files = DATA_FILES,
)
def BuildOSXApp():
"""Build the OSX Applet"""
# Check for setuptools and ask to download if it is not available
import src.extern.ez_setup as ez_setup
ez_setup.use_setuptools()
from setuptools import setup
CleanBuild()
PLIST = dict(CFBundleName = info.PROG_NAME,
CFBundleIconFile = 'Editra.icns',
CFBundleShortVersionString = info.VERSION,
CFBundleGetInfoString = info.PROG_NAME + " " + info.VERSION,
CFBundleExecutable = info.PROG_NAME,
CFBundleIdentifier = "org.editra.%s" % info.PROG_NAME.title(),
CFBundleDocumentTypes = [dict(CFBundleTypeExtensions=synextreg.GetFileExtensions(),
CFBundleTypeIconFile='editra_doc',
CFBundleTypeRole="Editor"
),
],
CFBundleTypeMIMETypes = ['text/plain',],
CFBundleDevelopmentRegion = 'English',
# TODO Causes errors with the system menu translations and text rendering
# CFBundleLocalizations = ['English', 'Spanish', 'French', 'Japanese'],
# ['de_DE', 'en_US', 'es_ES', 'fr_FR',
# 'it_IT', 'ja_JP', 'nl_NL', 'nn_NO',
# 'pt_BR', 'ru_RU', 'sr_SR', 'tr_TR',
# 'uk_UA', 'zh_CN'],
# NSAppleScriptEnabled="YES",
NSHumanReadableCopyright = u"Copyright %s 2005-%d" % (AUTHOR, YEAR)
)
PY2APP_OPTS = dict(iconfile = ICON['Mac'],
argv_emulation = True,
optimize = True,
includes = INCLUDES,
plist = PLIST)
# Add extra mac specific files
DATA_FILES = GenerateBinPackageFiles()
DATA_FILES.append("scripts/editramac.sh")
# Put extern package on path for py2app
sys.path.append(os.path.abspath('src/extern'))
setup(
app = APP,
version = VERSION,
options = dict( py2app = PY2APP_OPTS),
description = DESCRIPTION,
author = AUTHOR,
author_email = AUTHOR_EMAIL,
maintainer = AUTHOR,
maintainer_email = AUTHOR_EMAIL,
license = LICENSE,
url = URL,
data_files = DATA_FILES,
setup_requires = ['py2app'],
)
CreateDMG(VERSION)
def CreateDMG(version):
"""Create an OSX DMG
@param version: version number string
@todo: cleanup and generalize
"""
Log("Creating DMG for osx installer...")
assert os.path.exists('dist')
os.chdir('dist')
vname = "Editra-%s" % version
fname = vname + ".dmg"
mpath = "/Volumes/Editra-%s" % version
comp = "Editra-%s_2.dmg" % version
if os.path.exists("dist/%s" % fname):
Log("Found image from previous running")
os.remove("dist/%s" % fname)
# Create the temporary image
Log("Creating disk image...")
os.system("hdiutil create -size 75m -fs HFS+ -volname %s %s" % (vname, fname))
Log("Mounting disk image...")
os.system("hdiutil mount %s" % fname) # Mount the image
# Move installation files to the new image
Log("Copying installation files to installer image...")
if not os.path.exists(mpath + "/.bk"):
os.mkdir(mpath + "/.bk")
shutil.copy2("../pixmaps/installer/inst_bk.png", mpath + "/.bk/inst_bk.png")
os.system("ditto -rsrcFork Editra.app %s/Editra.app" % mpath)
Log("Configuring Finder View Options...")
# shutil.copy2("../scripts/installer/INSTALLER_DS_Store", mpath + "/.DS_Store")
# os.chmod(mpath + "/.DS_Store", 777)
f = open("tmpscript", 'w')
f.write(APPLE_SCRIPT % vname)
f.close()
status = os.system("osascript tmpscript")
os.remove("tmpscript")
Log("Applescript return status: %d" % status)
# Unmount the image
Log("Unmounting the installer image...")
os.system("hdiutil eject %s" % mpath)
# Create the compressed image
Log("Converting the disk image to a compressed format...")
os.system("hdiutil convert %s -format UDZO -imagekey zlib-level=9 -o %s" % (fname, comp))
# Cleanup
Log("Cleaning up temporary installer build files...")
os.remove(fname)
os.rename(comp, fname)
# Template for controlling some finder options via apple script
APPLE_SCRIPT = """
tell application "Finder"
tell disk ("%s" as string)
open
tell container window
set current view to icon view
set toolbar visible to false
set statusbar visible to false
set the bounds to {10, 60, 522, 402}
set statusbar visible to false
end tell
set opts to the icon view options of container window
tell opts
set icon size to 128
end tell
set background picture of opts to file ".bk:inst_bk.png"
set position of item "Editra.app" to {260, 145}
update without registering applications
end tell
end tell
"""
def DoSourcePackage():
"""Build a source package or do a source install"""
# Get the package data
DATA = GenerateSrcPackageFiles()
# Force optimization
if 'install' in sys.argv and ('O1' not in sys.argv or '02' not in sys.argv):
sys.argv.append('-O2')
# Install the plugins for this version of Python
DATA.append("plugins/*py%d.%d.egg" % sys.version_info[:2])
# Import proper setup function
if 'bdist_egg' in sys.argv:
try:
from setuptools import setup
# Only bundle eggs for the given python version
DATA.append("plugins/*py%d.%d.egg" % sys.version_info[:2])
except ImportError:
print "To build an egg setuptools must be installed"
else:
from distutils.core import setup
# Try to remove possibly conflicting files from an old install
if '--no-clean' not in sys.argv:
try:
import Editra
path = Editra.__file__
if '__init__' in path:
path = os.path.dirname(path)
path = os.path.join(path, 'src')
del sys.modules['Editra']
shutil.rmtree(path)
except (ImportError, OSError):
pass
except:
sys.stderr.write("[ERROR] Failed to remove old source files")
else:
sys.argv.remove('--no-clean')
# Make sure to delete any existing MANIFEST file beforehand to
# prevent stale file lists
if os.path.exists('MANIFEST'):
try:
os.remove('MANIFEST')
except OSError:
pass
setup(
name = NAME,
scripts = ['Editra', 'Editra.pyw'],
version = VERSION,
description = DESCRIPTION,
long_description = LONG_DESCRIPT,
author = AUTHOR,
author_email = AUTHOR_EMAIL,
maintainer = AUTHOR,
maintainer_email = AUTHOR_EMAIL,
url = URL,
download_url = "http://editra.org/?page=download",
license = LICENSE,
platforms = [ "Many" ],
packages = [ NAME ],
package_dir = { NAME : '.' },
package_data = { NAME : DATA },
classifiers= CLASSIFIERS,
install_requires = ['wxPython',]
)
def BuildECLibDemo():
"""Build the Editra Control Library Demo package"""
assert 'eclib' in sys.argv, "Should only be called for eclib build"
DATA = [ "../src/eclib/*.py", "../tests/controls/*.py"]
OUT = 'dist/eclibdemo'
Log("Cleaning up files")
if not os.path.exists('dist'):
os.mkdir('dist')
if os.path.exists('dist/eclibdemo.zip'):
os.remove('dist/eclibdemo.zip')
if os.path.exists(OUT):
shutil.rmtree(OUT)
# Copy the Files
Log("Preparing output package...")
os.mkdir(OUT)
shutil.copytree('src/eclib', 'dist/eclibdemo/eclib')
shutil.copytree('tests/controls', 'dist/eclibdemo/demo')
shutil.copy('COPYING', 'dist/eclibdemo/')
f = open(os.path.abspath('./dist/eclibdemo/__init__.py'), 'wb')
f.close()
# Make the launcher
f = open(os.path.abspath('./dist/eclibdemo/RunDemo.py'), 'wb')
f.write("import os\nos.chdir('demo')\n"
"import demo.demo as demo\n"
"demo.Main()\nos.chdir('..')")
f.close()
# Zip it up
Log("Create zip file")
os.chdir('dist')
zfile = zipfile.ZipFile('eclibdemo.zip', 'w',
compression=zipfile.ZIP_DEFLATED)
files = list()
for dpath, dname, fnames in os.walk('eclibdemo'):
files.extend([ os.path.join(dpath, fname).\
lstrip(os.path.sep)
for fname in fnames])
for fname in files:
zfile.write(fname.encode(sys.getfilesystemencoding()))
os.chdir('../')
Log("ECLIB Demo build is complete")
def CleanBuild():
"""Cleanup all build related files"""
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
for path in ('dist', 'build', 'tmp'):
if os.path.exists(path):
Log("Cleaning %s..." % path)
shutil.rmtree(path)
def Log(msg):
"""Write to the build log"""
# TODO add log file, just write to console for now
print(msg)
#----------------------------------------------------------------------------#
if __name__ == '__main__':
if __platform__ == "win32" and 'py2exe' in sys.argv:
BuildPy2Exe()
elif __platform__ == "darwin" and 'py2app' in sys.argv:
BuildOSXApp()
elif 'eclib' in sys.argv:
BuildECLibDemo()
elif 'clean' in sys.argv:
CleanBuild()
else:
DoSourcePackage()
|
163gal/Time-Line
|
libs/wx/tools/Editra/setup.py
|
Python
|
gpl-3.0
| 20,538
|
[
"VisIt"
] |
6d3278f310a8e0284a0b08c2082f024a428472813f173a74ede188c6a59d2e5f
|
#!/usr/bin/env python
#
# $File: caseControlSample.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
from simuPOP.sampling import drawCaseControlSamples
pop = sim.Population([10000], loci=5)
sim.initGenotype(pop, freq=[0.2, 0.8])
sim.maPenetrance(pop, loci=2, penetrance=[0.11, 0.15, 0.20])
# draw multiple case control sample
samples = drawCaseControlSamples(pop, cases=500, controls=500, numOfSamples=5)
for sample in samples:
sim.stat(sample, association=range(5))
print(', '.join(['%.6f' % sample.dvars().Allele_ChiSq_p[x] for x in range(5)]))
|
BoPeng/simuPOP
|
docs/caseControlSample.py
|
Python
|
gpl-2.0
| 1,570
|
[
"VisIt"
] |
ebbf593d25e51b823b1c33f25b48e27564433a5d9af14865dd815c13a1fd1a90
|
import numpy as np
from numpy.testing import assert_raises
from skimage.filters._gaussian import gaussian
from skimage._shared._warnings import expected_warnings
def test_negative_sigma():
a = np.zeros((3, 3))
a[1, 1] = 1.
assert_raises(ValueError, gaussian, a, sigma=-1.0)
assert_raises(ValueError, gaussian, a, sigma=[-1.0, 1.0])
assert_raises(ValueError, gaussian, a,
sigma=np.asarray([-1.0, 1.0]))
def test_null_sigma():
a = np.zeros((3, 3))
a[1, 1] = 1.
assert np.all(gaussian(a, 0) == a)
def test_default_sigma():
a = np.zeros((3, 3))
a[1, 1] = 1.
assert np.all(gaussian(a) == gaussian(a, sigma=1))
def test_energy_decrease():
a = np.zeros((3, 3))
a[1, 1] = 1.
gaussian_a = gaussian(a, sigma=1, mode='reflect')
assert gaussian_a.std() < a.std()
def test_multichannel():
a = np.zeros((5, 5, 3))
a[1, 1] = np.arange(1, 4)
gaussian_rgb_a = gaussian(a, sigma=1, mode='reflect',
multichannel=True)
# Check that the mean value is conserved in each channel
# (color channels are not mixed together)
assert np.allclose([a[..., i].mean() for i in range(3)],
[gaussian_rgb_a[..., i].mean() for i in range(3)])
# Test multichannel = None
with expected_warnings(['multichannel']):
gaussian_rgb_a = gaussian(a, sigma=1, mode='reflect')
# Check that the mean value is conserved in each channel
# (color channels are not mixed together)
assert np.allclose([a[..., i].mean() for i in range(3)],
[gaussian_rgb_a[..., i].mean() for i in range(3)])
# Iterable sigma
gaussian_rgb_a = gaussian(a, sigma=[1, 2], mode='reflect',
multichannel=True)
assert np.allclose([a[..., i].mean() for i in range(3)],
[gaussian_rgb_a[..., i].mean() for i in range(3)])
if __name__ == "__main__":
from numpy import testing
testing.run_module_suite()
|
paalge/scikit-image
|
skimage/filters/tests/test_gaussian.py
|
Python
|
bsd-3-clause
| 2,010
|
[
"Gaussian"
] |
c227cf955ef5fa390497683d7d4926a4dc96c1e57a9f517beee995b85ccc0841
|
from __future__ import annotations
class GaussianRSProfileModelExt:
"""An extension class implementing a reciprocal space gaussian profile model."""
name = "gaussian_rs"
default = True
@staticmethod
def phil():
from dials.algorithms.profile_model.gaussian_rs import phil_scope
return phil_scope
@staticmethod
def algorithm():
from dials.algorithms.profile_model.gaussian_rs import Model
return Model
@classmethod
def from_dict(cls, d):
return cls.algorithm().from_dict(d)
|
dials/dials
|
extensions/gaussian_rs_profile_model_ext.py
|
Python
|
bsd-3-clause
| 557
|
[
"Gaussian"
] |
551b804c8738ff54292515de6fa1c00dd2ea5f1ee4cc6927242e464ccca1143d
|
# Low-level execution of AST commands using xdotool.
import os, platform
from spark import GenericASTTraversal
from automators import XDoAutomator, CLIClickAutomator, NirCmdAutomator
class ExecuteCommands(GenericASTTraversal):
def __init__(self, ast, real = True):
GenericASTTraversal.__init__(self, ast)
self.output = []
if 'Linux' in platform.system():
self.automator = XDoAutomator(real)
elif 'Darwin' in platform.system():
self.automator = CLIClickAutomator(real)
elif 'Windows' in platform.system():
self.automator = NirCmdAutomator(real)
else:
print "No suitable automator for platform", platform.system()
self.postorder_flat()
self.automator.flush()
# a version of postorder which does not visit children recursively
def postorder_flat(self, node=None):
if node is None:
node = self.ast
#for kid in node:
# self.postorder(kid)
name = 'n_' + self.typestring(node)
if hasattr(self, name):
func = getattr(self, name)
func(node)
else:
self.default(node)
def n_chain(self, node):
for n in node.children:
self.postorder_flat(n)
def n_char(self, node):
self.automator.key(node.meta[0])
def n_raw_char(self, node):
self.automator.raw_key(node.meta[0])
def n_mod_plus_key(self, node):
self.automator.mod_plus_key(node.meta, node.children[0].meta[0])
def n_movement(self, node):
self.automator.key_movement(node.meta[0].type)
def n_sequence(self, node):
for c in node.meta[0]:
self.automator.raw_key(c)
def n_word_sequence(self, node):
n = len(node.children)
for i in range(0, n):
word = node.children[i].meta
for c in word:
self.automator.raw_key(c)
if(i + 1 < n):
self.automator.raw_key('space')
def n_null(self, node):
pass
def n_repeat(self, node):
self.postorder_flat(node.children[0])
char_list = self.automator.char_list[-1]
for n in range(1, node.meta[0]):
self.automator.add_keystrokes(char_list)
def default(self, node):
pass
def execute(ast, real):
ExecuteCommands(ast, real)
|
dwks/silvius
|
grammar/execute.py
|
Python
|
bsd-2-clause
| 2,379
|
[
"VisIt"
] |
62976f5a72a62698cf9cf94d81beae6c6728993ea34b4b44b1bb77ffd10a776b
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# cgishared - cgi helper function
# Copyright (C) 2003-2014 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Common cgi functions"""
import os
from shared.base import requested_page
from shared.cgioutput import CGIOutput
from shared.conf import get_configuration_object
from shared.httpsclient import extract_client_id
def cgiscript_header(header_info=None, content_type='text/html'):
"""Output header used by CGI scripts before any output"""
# first header line
print 'Content-Type: %s' % content_type
if header_info:
# header_info is '\n' seperated
header_array = header_info.split('\n')
for header_line in header_array:
print header_line
# blank line, end of headers
print ''
def init_cgiscript_possibly_with_cert(print_header=True,
content_type='text/html'):
"""Prepare for CGI script with optional client certificate. Only used from
some of the cgi scripts still on the legacy-form like requestnewjob and
put. I.e. scripts where certs are not required due to use of sessionid.
"""
# Always rely on os.environ here since only called from cgi scripts
environ = os.environ
if print_header:
cgiscript_header(content_type=content_type)
configuration = get_configuration_object()
logger = configuration.logger
out = CGIOutput(logger)
# get DN of user currently logged in
client_id = extract_client_id(configuration, environ)
if not client_id:
logger.debug('(No client ID available in SSL session)')
logger.info('script: %s cert: %s' % (requested_page(), client_id))
return (logger, configuration, client_id, out)
|
heromod/migrid
|
mig/shared/cgishared.py
|
Python
|
gpl-2.0
| 2,511
|
[
"Brian"
] |
0ab51e12f8b101b3101ed794e239f47f8a36697124d57d9ba843f768d85bbd98
|
#!/usr/bin/env python
# map transcription factor binding (peak calls) into unique binding regions
import sys
import time
import optparse
import general
import numpy
import metrn
import modencode
import network
import bed
import os
import copy
import pdb
import re
import pickle
import random
from scipy.stats.stats import pearsonr
from quantile import Quantile
print "Command:", " ".join(sys.argv)
print "Timestamp:", time.asctime(time.localtime())
""" define classes and functions of internal use """
def setorder(inlist):
outlist = list()
for item in inlist:
if not item in outlist:
outlist.append(item)
return outlist
""" define a function to count peaks in a gffkde output file """
def gffkdePeakCounter(indata, mode="file"):
if mode == "file":
processed, inlines = list(), open(indata).readlines()
elif mode == "list":
processed, inlines = list(), indata
for inline in inlines:
for peakData in inline.strip().split("\t")[7].rstrip(";").split(";")[1:]:
processed.append(peakData.split(",")[0])
return len(list(set(processed)))
""" define a function to scan regions for peaks from the same dataset """
def gffkdeDuplicateScanner(indata, mode="file"):
if mode == "file":
processed, inlines = list(), open(indata).readlines()
elif mode == "list":
processed, inlines = list(), indata
r, k = 0, 0
for inline in inlines:
datasets = list()
for peakData in inline.strip().split("\t")[7].rstrip(";").split(";")[1:]:
processed.append(peakData.split(",")[0])
datasets.append(peakData.split(",")[0].split("_peaks_")[0])
if (len(datasets) - len(set(datasets))) > 0:
duplicates = list()
for dataset in sorted(list(set(datasets))):
if datasets.count(dataset) > 1:
duplicates.append(dataset)
print len(duplicates), len(datasets) - len(set(datasets)), ", ".join(duplicates)
k += 1
r += 1
return len(list(set(processed))), r, k, round(float(k)/r, 2)
""" define a function to annotate peaks based on the density files """
def annotatePeaks(infile, peak_dict, maphot=True, export_file=False):
# open output merged file:
if export_file:
m_output = open(export_file, "wb")
print >>m_output, "\t".join(["chrm", "start", "end", "feature", "score", "strand", "dataset.count", "factor.count", "stage.count", "signal.avg", "signal.med", "signal.sum", "signal.min", "signal.max", "collapsed.strains", "collapsed.factors", "collapsed.stages", "collapsed.institutes", "collapsed.methods", "collapsed.info"])
# gather peaks from merged file:
bind_dict, region_dict, index, peaks = dict(), dict(), 1, list()
inlines = open(infile).readlines()
if maphot:
inlines.pop(0)
for inline in inlines:
if maphot:
#V InferredHS300bw HOTspot 58437 58743 1.00084493975139 . . 1,1;OP327_F23F12.9_EM_yale_stn_sample_optimal_narrowPeak_MACS_peak_9720,58593,0.999950001249979;
chrm, start, end, feature, hits, strand, density, collapsed_details = inline.strip("\n").split("\t")
signals, contributions, info = list(), list(), list()
datasets, strains, factors, stages, institutes, methods = list(), list(), list(), list(), list(), list()
collapsed_details = collapsed_details.split(";")
collapsed_details.pop(0)
for collapsed_detail in collapsed_details:
if not collapsed_detail == "":
collapsed_peak, midpoint, contribution = collapsed_detail.split(",")
dataset, peak = collapsed_peak.split("_peaks_")
strain, factor, stage, institute, method = dataset.split("_")[:5]
pchrm, pstart, pend, pscore, pstrand, psignal, ppvalue, pqvalue, ppoint = peak_dict[strain][factor][stage][institute][method][peak]
signals.append(float(psignal))
contributions.append(str(contribution))
datasets.append(dataset)
strains.append(strain)
factors.append(factor)
stages.append(stage)
institutes.append(institute)
methods.append(method)
info.append(",".join(map(str, [dataset,peak,pstart,pend,psignal,contribution])))
if not strain in bind_dict:
bind_dict[strain] = dict()
if not factor in bind_dict[strain]:
bind_dict[strain][factor] = dict()
if not stage in bind_dict[strain][factor]:
bind_dict[strain][factor][stage] = dict()
if not institute in bind_dict[strain][factor][stage]:
bind_dict[strain][factor][stage][institute] = dict()
if not method in bind_dict[strain][factor][stage][institute]:
bind_dict[strain][factor][stage][institute][method] = dict()
bind_dict[strain][factor][stage][institute][method][peak] = [feature, chrm, start, end, contribution, hits, density, pstrand, psignal, ppvalue, pqvalue, ppoint]
# store tfbs (region) data:
number = len(signals)
values = [numpy.mean(signals), numpy.median(signals), sum(signals), min(signals), max(signals)]
datasets = setorder(datasets)
strains = setorder(strains)
factors = setorder(factors)
stages = setorder(stages)
institutes = setorder(institutes)
methods = setorder(methods)
dataset_count = len(datasets)
factor_count = len(factors)
stage_count = len(stages)
collapsed_strains = ";".join(strains)
collapsed_factors = ";".join(factors)
collapsed_stages = ";".join(stages)
collapsed_institutes = ";".join(institutes)
collapsed_methods = ";".join(methods)
collapsed_info = ";".join(info)
#hits = int(hits)
#if hits != number or number != dataset_count:
# print feature, hits, float(density), number, dataset_count, factor_count, stage_count
# pdb.set_trace()
region_dict[index] = map(str, [chrm, start, end, feature, number, strand, dataset_count, factor_count, stage_count] + values + [collapsed_strains, collapsed_factors, collapsed_stages, collapsed_institutes, collapsed_methods, collapsed_info])
index += 1
# return the peak (bind) and region (tfbs) dictionaries, and the peak count!
return bind_dict, region_dict, gffkdePeakCounter(inlines, mode="list")
""" define a function to compute number of regions and coverage for sets of peaks """
def computeCoverage(peakfiles, peakspath, regionspath, outfile):
# define temporary files:
completefile = regionspath + "computeCoverage_complete.bed"
sortingsfile = regionspath + "computeCoverage_sortings.bed"
collapsefile = regionspath + "computeCoverage_collapse.bed"
# collect peaks into single file:
command = "cat"
for peakfile in peakfiles:
command = command + " " + peakspath + peakfile
command = command + " > " + completefile
os.system(command)
# sort complete file:
command = "sortBed -i " + completefile + " > " + sortingsfile
os.system(command)
# generate collapsed file:
command = "mergeBed -i " + sortingsfile + " -nms > " + collapsefile
os.system(command)
# recover number of regions and coverage:
regions, bases = 0, 0
for inline in open(collapsefile).readlines():
chrm, start, stop, region = inline.strip().split("\t")
bases += int(stop) - int(start) + 1
regions += 1
# export counts:
f_output = open(outfile, "w")
print >>f_output, "\t".join(["regions", "bases"])
print >>f_output, "\t".join(map(str, [regions, bases]))
f_output.close()
def main():
parser = optparse.OptionParser()
parser.add_option("--path", action = "store", type = "string", dest = "path", help = "Path from script to files")
parser.add_option("--organism", action = "store", type = "string", dest = "organism", help = "Target organism for operations...", default="OFF")
parser.add_option("--mode", action = "store", type = "string", dest = "mode", help = "Operations to be executed: build, map")
parser.add_option("--peaks", action = "store", type = "string", dest = "peaks", help = "Basename for target peaks", default="OFF")
parser.add_option("--source", action = "store", type = "string", dest = "source", help = "Which peaks should be used as source for 'build' and 'expand' modes?", default=False)
parser.add_option("--infile", action = "store", type = "string", dest = "infile", help = "input BED file of feature coordinates", default=False)
parser.add_option("--name", action = "store", type = "string", dest = "name", help = "Output name for mapping, input name for map", default="OFF")
parser.add_option("--header", action = "store", type = "string", dest = "header", help = "Does the annotation file have a header?", default="ON")
parser.add_option("--target", action = "store", type = "string", dest = "target", help = "Feature column:type to target", default="feature")
parser.add_option("--fraction", action = "store", type = "string", dest = "fraction", help = "Fractional overlap required", default="0.1")
parser.add_option("--queries", action = "store", type = "string", dest = "queries", help = "Which types of features should be registered", default="standard")
parser.add_option("--ids", action = "store", type = "string", dest = "ids", help = "How should feature IDs be keyed in?", default="standard")
parser.add_option("--others", action = "store", type = "string", dest = "others", help = "Should 'other' category be created?", default="OFF")
parser.add_option("--elsewhere", action = "store", type = "string", dest = "elsewhere", help = "Should features that do not overlap categories be counted?", default="OFF")
parser.add_option("--index", action = "store", type = "str", dest = "index", help = "Column indexes for feature and classification", default="OFF")
parser.add_option("--policy", action = "store", type = "str", dest = "policy", help = "Class assignment policy: 'score', 'sum' or 'max'", default="sum")
parser.add_option("--group", action = "store", type = "str", dest = "group", help = "Groups semi-colon-separated (;), members are comma-separated", default="OFF")
parser.add_option("--start", action = "store", type = "int", dest = "start", help = "Start index for range", default=1)
parser.add_option("--stop", action = "store", type = "int", dest = "stop", help = "End index for range", default=40)
parser.add_option("--headerDict", action = "store", type = "string", dest = "headerDict", help = "Header dictionary...", default="bed")
parser.add_option("--label", action = "store", type = "string", dest = "label", help = "Dataset labeling mode", default="factor.context")
parser.add_option("--rename", action = "store", type = "string", dest = "rename", help = "Targets to rename. Comma-separated list of 'target:replacement' pairs to search and replace.", default="OFF")
parser.add_option("--reference", action = "store", type = "string", dest = "reference", help = "Query to which ratios should be scaled...", default="OFF")
parser.add_option("--order", action = "store", type = "string", dest = "order", help = "How should the inputs be ordered?", default="OFF")
parser.add_option("--prioritize", action = "store", type = "string", dest = "prioritize", help = "Should we prioritize counts? That is, assign overlaps preferentially to classes.", default="OFF")
parser.add_option("--complexity", action = "store", type = "string", dest = "complexity", help = "Max complexity (factor.count) allowed", default=False)
parser.add_option("--method", action = "store", type = "string", dest = "method", help = "Filter for a specific method?", default=False)
parser.add_option("--exclude", action = "store", type = "string", dest = "exclude", help = "Comma-separated list of factors to exclude", default="")
parser.add_option("--window", action = "store", type = "string", dest = "window", help = "Window surrounding feature for promoter and downstream analysis", default="0")
parser.add_option("--report", action = "store", type = "string", dest = "report", help = "Export report/assignments per region?", default="OFF")
parser.add_option("--max", action = "store", type = "string", dest = "max", help = "Maximum number of peaks allowed per experiment", default=False)
parser.add_option("--min", action = "store", type = "string", dest = "min", help = "Minimum number of peaks allowed per experiment", default=False)
parser.add_option("--filter", action = "store", type = "string", dest = "filter", help = "Filter peaks based on density or overlap?", default="OFF")
parser.add_option("--quality", action = "store", type = "string", dest = "quality", help = "Filter peaks based on quality?", default="OFF")
parser.add_option("--nuclear", action = "store", type = "string", dest = "nuclear", help = "Peaks are only nuclear?", default="ON")
parser.add_option("--parameters", action = "store", type = "string", dest = "parameters", help = "Variable parameters...", default="")
parser.add_option("--threads", action = "store", type = "int", dest = "threads", help = "Parallel processing threads", default=1)
parser.add_option("--chunks", action = "store", type = "int", dest = "chunks", help = "", default=100)
parser.add_option("--module", action = "store", type = "string", dest = "module", help = "", default="md1")
parser.add_option("--qsub", action = "store", type = "string", dest = "qsub", help = "Qsub configuration header", default="OFF")
parser.add_option("--server", action = "store", type = "string", dest = "server", help = "Are we on the server?", default="OFF")
parser.add_option("--job", action = "store", type = "string", dest = "job", help = "Job name for cluster", default="OFF")
parser.add_option("--copy", action = "store", type = "string", dest = "copy", help = "Copy simulated peaks to analysis folder?", default="OFF")
parser.add_option("--tag", action = "store", type = "string", dest = "tag", help = "Add tag to TFBS?", default="")
(option, args) = parser.parse_args()
# import paths:
if option.server == "OFF":
path_dict = modencode.configBuild(option.path + "/input/" + "configure_path.txt")
elif option.server == "ON":
path_dict = modencode.configBuild(option.path + "/input/" + "configure_server.txt")
# specify input and output paths:
inpath = path_dict["input"]
extraspath = path_dict["extras"]
pythonpath = path_dict["python"]
scriptspath = path_dict["scripts"]
downloadpath = path_dict["download"]
fastqpath = path_dict["fastq"]
bowtiepath = path_dict["bowtie"]
bwapath = path_dict["bwa"]
macspath = path_dict["macs"]
memepath = path_dict["meme"]
idrpath = path_dict["idr"]
igvpath = path_dict["igv"]
testpath = path_dict["test"]
processingpath = path_dict["processing"]
annotationspath = path_dict["annotations"]
peakspath = path_dict["peaks"]
gopath = path_dict["go"]
hotpath = path_dict["hot"]
qsubpath = path_dict["qsub"]
bindingpath = path_dict["binding"]
neuronspath = path_dict["neurons"]
cellspath = path_dict["cells"]
# standardize paths for analysis:
alignerpath = bwapath
indexpath = alignerpath + "index/"
alignmentpath = alignerpath + "alignment/"
qcfilterpath = alignerpath + "qcfilter/"
qcmergepath = alignerpath + "qcmerge/"
# import configuration dictionaries:
source_dict = modencode.configBuild(inpath + "configure_source.txt")
method_dict = modencode.configBuild(inpath + "configure_method.txt")
context_dict = modencode.configBuild(inpath + "configure_context.txt")
# define organism parameters:
if option.organism == "hs" or option.organism == "h.sapiens":
organismTag = "hs"
#organismIGV = "ce6"
elif option.organism == "mm" or option.organism == "m.musculus":
organismTag = "mm"
#organismIGV = "ce6"
elif option.organism == "ce" or option.organism == "c.elegans":
organismTag = "ce"
#organismIGV = "ce6"
elif option.organism == "dm" or option.organism == "d.melanogaster":
organismTag = "dm"
#organismIGV = "dm5"
# specify genome size file:
if option.nuclear == "ON":
chromosomes = metrn.chromosomes[organismTag]["nuclear"]
genome_size_file = option.path + "/input/" + metrn.reference[organismTag]["nuclear_sizes"]
genome_size_dict = general.build_config(genome_size_file, mode="single", separator="\t", spaceReplace=True)
else:
chromosomes = metrn.chromosomes[organismTag]["complete"]
genome_size_file = option.path + "/input/" + metrn.reference[organismTag]["complete_sizes"]
genome_size_dict = general.build_config(genome_size_file, mode="single", separator="\t", spaceReplace=True)
# load gene ID dictionaries:
#id2name_dict, name2id_dict = modencode.idBuild(inpath + metrn.reference[organismTag]["gene_ids"], "Sequence Name (Gene)", "Gene Public Name", mode="label", header=True, idUpper=True, nameUpper=True)
# set some of the basic parameters:
exclude = option.exclude.split(",")
window = int(option.window)
if option.max:
max_peaks = int(option.max)
else:
max_peaks = "X"
# define output name:
outkey = "mapbinding_" + option.peaks
# define relevant paths:
bindingpath = bindingpath + option.peaks + "/" + option.name + "/"
mappingpath = bindingpath + "mapping/"
datasetpath = bindingpath + "dataset/"
overlappath = bindingpath + "overlap/"
picklespath = bindingpath + "pickles/"
reportspath = bindingpath + "reports/"
summarypath = bindingpath + "summary/"
compilepath = bindingpath + "compile/"
densitypath = bindingpath + "density/"
saturationpath = bindingpath + "saturation/"
fractionalpath = bindingpath + "fractional/"
general.pathGenerator(mappingpath)
general.pathGenerator(datasetpath)
general.pathGenerator(overlappath)
general.pathGenerator(picklespath)
general.pathGenerator(reportspath)
general.pathGenerator(summarypath)
general.pathGenerator(compilepath)
general.pathGenerator(densitypath)
general.pathGenerator(saturationpath)
general.pathGenerator(fractionalpath)
# Note: These are the retired queries:
# queries = ['CDS', 'protein_coding_primary_transcript', 'ncRNA_primary_transcript', 'snoRNA', 'Pseudogene', 'tRNA', 'miRNA_primary_transcript', 'snRNA', 'rRNA_primary_transcript', 'intron', 'exon', 'TSS', 'SL1_acceptor_site', 'SL2_acceptor_site', 'transcription_end_site', 'polyA_site', 'polyA_signal_sequence', 'five_prime_UTR', 'three_prime_UTR', 'snlRNA', 'DNAse_I_hypersensitivity']
# map overlap between peaks and genomic features:
if option.mode == "map:overlap":
# define input and output files:
c_infile = peakspath + "mappeaks_" + option.peaks + "_complete.bed"
c_outfile = overlappath + outkey + "_complete_overlap_"
c_tmpfile = overlappath + outkey + "_complete.tmp"
m_infile = peakspath + "mappeaks_" + option.peaks + "_compiled.bed"
m_outfile = overlappath + outkey + "_compiled_overlap_"
m_tmpfile = overlappath + outkey + "_compiled.tmp"
i_infile = annotationspath + option.infile
i_tmpfile = overlappath + option.infile + ".tmp"
i_colfile = overlappath + option.infile + "_header.tmp"
f_outfile = overlappath + outkey + "_compiled"
s_outfile = summarypath + outkey + "_compiled"
# define feature key setup:
idComplex = ["chrm", "start", "end", "feature"]
# define header presence
if option.header == "ON":
headerFlag = True
else:
headerFlag = False
# define annotation headers:
if option.headerDict == "auto":
annotationHeader = general.build_header_dict(i_infile)
elif option.headerDict == "bed":
annotationHeader = metrn.bedHeader
else:
annotationHeader = dict()
for entry in option.headerDict.split(","):
key, value = entry.split(":")
annotationHeader[key] = int(value)
# load annotations:
print
print "Loading input annotations..."
annotationDict = general.build2(i_infile, id_complex=idComplex, header_dict=annotationHeader, header=headerFlag, id_include=True, separator=":")
#x = annotationDict.keys()[0]
#y = annotationDict[x]
#overlapBed = annotationDict
#query = "7_Egn4"
#queryColumn = "feature"
#queryBed = bed.valueFilter(overlapBed, filterDict={queryColumn:query}, modeDict={queryColumn:"match"}, structure="tree")
#print queryBed.keys()
#pdb.set_trace()
# define standard targets/queries:
if option.queries == "standard":
queries = ['tss', 'five_prime_utr', 'three_prime_utr', 'exon', 'intron', 'protein_coding_primary_transcript']
elif option.queries == "auto":
queries = list()
for feature in annotationDict:
query = annotationDict[feature][option.target]
if not query in option.exclude.split(","):
queries.append(query)
queries = sorted(list(set(queries)))
elif option.queries != "OFF":
queries = list()
for query in option.queries.split(","):
if not query in option.exclude.split(","):
queries.append(query)
print "Preparing temporary files..."
command = 'cp ' + c_infile + ' ' + c_tmpfile
os.system(command)
command = 'cp ' + m_infile + ' ' + m_tmpfile
os.system(command)
if option.header == "ON":
command = 'grep -v "feature" ' + i_infile + ' > ' + i_tmpfile
os.system(command)
command = "head -n 1 " + i_infile + ' > ' + i_colfile
os.system(command)
else:
command = "cp " + i_infile + " " + i_tmpfile
os.system(command)
print "Intersecting annotation features with peaks..."
command = "intersectBed -u -f 0.1 -a " + i_tmpfile + " -b " + c_tmpfile + " > " + c_outfile
os.system(command)
print "Intersecting annotation features with regions..."
command = "intersectBed -u -f 0.1 -a " + i_tmpfile + " -b " + m_tmpfile + " > " + m_outfile
os.system(command)
print "Gathering compiled peak regions..."
mergedBed = general.build2(m_infile, id_column="feature", header_dict=metrn.regionHeader, header=False, id_include=True)
print
overlapDict = dict()
technique_dict = {
"ox":["equal","exactly"],
"og":["greater.equal","at least"],
"ol":["lesser.equal","at most"],
"ob":["greater","more than"]
}
for technique in ["ox","og","ol","ob"]:
overlapDict[technique] = dict()
for factorCount in range(option.start, option.stop + 1):
modeCall, modeText = technique_dict[technique]
fm_outfile = f_outfile.replace("_compiled", "_compiled_" + technique + str(factorCount).zfill(2)) + ".bed"
om_outfile = fm_outfile.replace(".bed","") + "_overlap"
filteredBed = bed.valueFilter(mergedBed, filterDict={"occupancy":factorCount}, modeDict={"occupancy":modeCall}, structure="tree")
bed.export(filteredBed, fm_outfile, headerMode="explicit", headerDict=metrn.regionHeader, structure="tree")
print "...with", modeText, factorCount, "factor(s) bound:", len(filteredBed)
command = "intersectBed -u -a " + i_tmpfile + " -b " + fm_outfile + " > " + om_outfile
os.system(command)
queryCount = 0
overlapDict[technique][factorCount] = dict()
#overlapBed = general.build2(om_outfile, id_column=idColumn, header_dict=annotationHeader, header=True, id_complex=idComplex, id_include=True, id_index=True, separator=":")
overlapBed = general.build2(om_outfile, id_complex=idComplex, header_dict=annotationHeader, header=headerFlag, id_include=True, separator=":")
for query in queries:
#queryBed = bed.valueFilter(overlapBed, filterDict={queryColumn:query.lower()}, modeDict={queryColumn:"match"}, structure="tree")
queryBed = bed.valueFilter(overlapBed, filterDict={option.target:query}, modeDict={option.target:"match"}, structure="tree")
overlapDict[technique][factorCount][query] = len(queryBed)
queryCount += len(queryBed)
if option.others == "ON":
overlapDict[technique][factorCount]["others"] = len(overlapBed) - queryCount
print
print
print "Removing temporary files..."
command = "rm -rf " + bindingpath + "*.tmp"
os.system(command)
command = "rm -rf " + overlappath + "*.tmp"
os.system(command)
command = "rm -rf " + annotationspath + "*.tmp"
os.system(command)
print
# update queries to include "others" if necessary:
if option.others == "ON":
queries.append("others")
# export per factor-count feature-type summary:
for technique in overlapDict:
sv_sumfile = s_outfile.replace("_compiled", "_compiled_" + technique + "XX") + "_overlap_summary_values"
sn_sumfile = s_outfile.replace("_compiled", "_compiled_" + technique + "XX") + "_overlap_summary_normal"
sv_output = open(sv_sumfile, "w")
sn_output = open(sn_sumfile, "w")
print >>sv_output, "\t".join(queries)
print >>sn_output, "\t".join(queries)
for factorCount in sorted(overlapDict[technique].keys()):
values, normalized = list(), list()
for query in queries:
values.append(overlapDict[technique][factorCount][query])
for value in values:
if sum(values) > 0:
normalized.append(float(value)/sum(values))
else:
normalized.append(0)
print >>sv_output, "\t".join(map(str, values))
print >>sn_output, "\t".join(map(str, normalized))
sv_output.close()
sn_output.close()
# map overlap between peaks/regions and genomic features:
if option.mode == "map:dataset" or option.mode == "map:regions":
# define source path:
if option.mode == "map:dataset":
sourcepath = peakspath + option.peaks + "/"
elif option.mode == "map:regions":
sourcepath = path_dict["binding"] + option.peaks + "/input/"
# define input and output files:
i_infile = annotationspath + option.infile
i_tmpfile = datasetpath + option.infile + ".tmp"
i_colfile = datasetpath + option.infile + "_header.tmp"
f_outfile = datasetpath + outkey + "_compiled"
s_outfile = summarypath + outkey + "_compiled"
r_outfile = reportspath + outkey + "_report"
# will we be exporting reports?
#if option.report == "ON":
# r_output = open(r_outfile, "w")
# define header presence
if option.header == "ON":
headerFlag = True
else:
headerFlag = False
# define annotation headers:
if option.headerDict == "auto":
annotationHeader = general.build_header_dict(i_infile)
elif option.headerDict == "bed":
annotationHeader = metrn.bedHeader
else:
annotationHeader = dict()
for entry in option.headerDict.split(","):
key, value = entry.split(":")
annotationHeader[key] = int(value)
# define feature key setup:
if option.ids == "standard":
idComplex = ["chrm", "start", "end", "feature"]
idOverlap = 3
elif option.ids == "feature":
idComplex = ["feature"]
idOverlap = 3
# load annotations:
print
print "Loading input annotations..."
annotationDict = general.build2(i_infile, i=option.target, j=idComplex, x="", mode="matrix", header_dict=annotationHeader, header=headerFlag, separator=":", counter=True)
#k = annotationDict.keys()[0]
#print k
#print annotationDict[k]
#pdb.set_trace()
# define standard targets/queries:
if option.queries == "standard":
queries = ['tss', 'five_prime_utr', 'three_prime_utr', 'exon', 'intron', 'protein_coding_primary_transcript']
elif option.queries == "auto":
queries = list()
for query in sorted(annotationDict.keys()):
if not query in option.exclude.split(","):
queries.append(query)
queries = sorted(list(set(queries)))
elif option.queries != "OFF":
queries = list()
for query in option.queries.split(","):
if not query in option.exclude.split(","):
queries.append(query)
if option.header == "ON":
command = 'grep -v "feature" ' + i_infile + ' > ' + i_tmpfile
os.system(command)
command = "head -n 1 " + i_infile + ' > ' + i_colfile
os.system(command)
else:
command = "cp " + i_infile + " " + i_tmpfile
os.system(command)
print "Intersecting annotation features with peaks..."
print
overlapDict = dict()
for dataset in os.listdir(sourcepath):
# generate dataset label:
if option.mode == "map:dataset":
datasetID = metrn.labelGenerator(option.label, dataset=dataset, mode="label")
elif option.mode == "map:regions":
datasetID = dataset.replace(".bed", "")
# rename elements if necessary:
if option.rename != "OFF":
for scheme in option.rename.split(","):
target, replace = scheme.split(":")
datasetID = datasetID.replace(target, replace)
# store dataset in dictionary:
overlapDict[datasetID] = dict()
# define peak source and output
p_infile = sourcepath + dataset
p_outfile = datasetpath + dataset.replace(".bed", "_intersect.bed")
# adjust annotationHeader dictionary:
adjustHeader = dict()
adjustFactor = len(open(p_infile).readline().split("\t"))
for key in annotationHeader:
adjustHeader[key] = annotationHeader[key] + adjustFactor
print "Processing:", datasetID
if option.fraction == "OFF":
command = "intersectBed -wo -a " + p_infile + " -b " + i_tmpfile + " > " + p_outfile
os.system(command)
else:
command = "intersectBed -wo -f " + str(option.fraction) + " -a " + p_infile + " -b " + i_tmpfile + " > " + p_outfile
os.system(command)
#print command
# gather annotation overlap peak regions
overlapBed = general.build2(p_outfile, i=option.target, j=idOverlap, x="", mode="matrix", header_dict=adjustHeader, header=headerFlag, separator=":", counter=True)
#os.system("head -n 5 " + i_tmpfile)
#print
#os.system("head -n 5 " + p_infile)
#print
#os.system("head -n 5 " + p_outfile)
#print
#x = overlapBed.keys()[0]
#print overlapBed.keys()
#print overlapBed[x]
#pdb.set_trace()
# count everything ...
if option.prioritize == "OFF":
queryCount = 0
for query in queries:
if query in overlapBed:
overlapDict[datasetID][query] = sum(overlapBed[query].values())
else:
overlapDict[datasetID][query] = 0
queryCount += overlapDict[datasetID][query]
if option.others == "ON":
totalCount = 0
for entry in overlapBed:
totalCount += sum(overlapBed[entry].values())
overlapDict[datasetID]["others"] = totalCount - queryCount
# ...or preferentially assign counts to classes?
else:
if option.prioritize == "ON":
prioritize = sorted(overlapBed.keys())
else:
prioritize = option.prioritize.split(",")
queryCount = 0
invertedBed = general.dictinvert(overlapBed, mode="matrix")
for feature in invertedBed:
for query in prioritize:
if not query in overlapDict[datasetID]:
overlapDict[datasetID][query] = 0
if query in invertedBed[feature]:
overlapDict[datasetID][query] += 1
break
for query in prioritize:
if query in overlapDict[datasetID]:
queryCount += overlapDict[datasetID][query]
if option.others == "ON":
totalCount = len(invertedBed)
overlapDict[datasetID]["others"] = totalCount - queryCount
# is it necessary to determine feature counts?
if option.elsewhere == "ON":
featureCount = general.countLines(p_infile)
overlapDict[datasetID]["others"] = featureCount - queryCount
#if True:
# featureCount = general.countLines(p_infile)
# invertedBed = general.dictinvert(overlapBed, mode="matrix")
# feature = invertedBed.keys()[0]
# print featureCount, len(invertedBed), queryCount
# print feature
# print invertedBed[feature]
# pdb.set_trace()
# update queries to include "others" if necessary:
if option.others == "ON":
queries.append("others")
# check completeness (and fill missing):
for datasetID in sorted(overlapDict.keys()):
for query in queries:
if not query in overlapDict[datasetID]:
overlapDict[datasetID][query] = 0
# score datasets with highest promoter ratio:
if option.reference != "OFF":
referenceDict = dict()
for datasetID in sorted(overlapDict.keys()):
values = list()
for query in queries:
values.append(overlapDict[datasetID][query])
if query == option.reference:
reference = overlapDict[datasetID][query]
if sum(values) != 0:
referenceDict[datasetID] = float(reference)/sum(values)
else:
referenceDict[datasetID] = 0
datasetIDs = general.valuesort(referenceDict)
datasetIDs.reverse()
elif option.order != "OFF":
datasetIDs = option.order.split(",")
else:
datasetIDs = sorted(overlapDict.keys())
# export per factor-count feature-type summary:
print
print "Exporting datasets as ranked by overlap with reference:"
print
sv_sumfile = s_outfile.replace("_compiled", "_compiled_dataset_summary_values")
sn_sumfile = s_outfile.replace("_compiled", "_compiled_dataset_summary_normal")
sv_output = open(sv_sumfile, "w")
sn_output = open(sn_sumfile, "w")
print >>sv_output, "\t".join(["dataset"] + queries)
print >>sn_output, "\t".join(["dataset"] + queries)
rank = 1
for datasetID in datasetIDs:
print "#" + str(rank), ":", datasetID
values, normalized = list(), list()
for query in queries:
if query in overlapDict[datasetID]:
values.append(overlapDict[datasetID][query])
else:
values.append(0)
for value in values:
if sum(values) > 0:
normalized.append(float(value)/sum(values))
else:
normalized.append(0)
print >>sv_output, "\t".join(map(str, [datasetID] + values))
print >>sn_output, "\t".join(map(str, [datasetID] + normalized))
rank += 1
sv_output.close()
sn_output.close()
print
print "Removing temporary files..."
command = "rm -rf " + bindingpath + "*.tmp"
os.system(command)
command = "rm -rf " + datasetpath + "*.tmp"
os.system(command)
command = "rm -rf " + annotationspath + "*.tmp"
os.system(command)
print
# close report if necessary:
#if option.report == "ON":
# r_output.close()
# map correlations between promoter/enhancer distances and number of peaks:
elif option.mode == "map:peaks":
# load peak counts:
peakDict = general.build2(peakspath + "mappeaks_" + option.peaks + "_report.txt", id_column="dataset", mode="table")
# load promoters/enhancer distances:
classDict = general.build2(bindingpath + "summary/mapbinding_" + option.peaks + "_compiled_dataset_summary_normal", id_column="dataset", mode="table")
# scan correlation:
inlines = open(bindingpath + "summary/mapbinding_" + option.peaks + "_compiled_dataset_summary_normal").readlines()
inlines.pop(0)
factors, peaks, fractions = list(), list(), list()
for inline in inlines:
query = inline.strip().split("\t")[0]
for dataset in peakDict:
organism, strain, factor, context, institute, method = metrn.labelComponents(dataset)
if query == factor:
#print factor, int(peakDict[dataset]["peaks"]), float(classDict[factor]["0:500"])
factors.append(factor)
peaks.append(int(peakDict[dataset]["peaks"]))
fractions.append(float(classDict[factor]["0:500"]))
print
print "Correlation:", numpy.corrcoef(peaks, fractions)[0][1]
print
# map peak calls to genomic features:
elif option.mode == "map:annotation":
# get feature coordinates and names:
print
print "loading annotations..."
feature_dict = bed.build2(annotationspath + option.infile, mode="tree", chrmTree=True, featureMode="feature", header="auto", headerDict="auto", featureExtension=False, filtering=True, filterDict={"feature.type":"cds"}, modeDict={"feature.type":"match"}, testPrint=False)
# define output:
f_output = open(mappingpath + outkey + "_w" + str(option.window) + "_m" + str(max_peaks) + "_" + option.infile.replace(".bed","").replace(".txt","") + "_" + option.target,"w")
print >>f_output, "\t".join(["chrm", "start", "end", "feature", "score", "strand", "strain", "factor", "stage", "institute", "method", "peak", "up", "in", "dn", "tfbs.mark", "tfbs.a", "tfbs.b", "tfbs.c", "tfbs.d"])
# load peak dictionary:
print "loading peaks..."
complete_data = open(picklespath + outkey + "_complete.pickle")
complete_dict = pickle.load(complete_data)
# scan peak call hits to genomic features:
check = False
print "mapping peaks to features..."
print
network_dict, p = dict(), 0
for strain in complete_dict:
for factor in complete_dict[strain]:
for stage in complete_dict[strain][factor]:
if not stage in network_dict:
network_dict[stage] = dict()
for institute in complete_dict[strain][factor][stage]:
for method in complete_dict[strain][factor][stage][institute]:
if not method in network_dict[stage]:
network_dict[stage][method] = dict()
p += 1
print p, "_".join([strain,factor,stage,institute,method]), "(" + str(len(complete_dict[strain][factor][stage][institute][method])) + " peaks)"
for peak in complete_dict[strain][factor][stage][institute][method]:
feature, chrm, pstart, pend, contribution, hits, density, strand, signal, pvalue, qvalue, point = complete_dict[strain][factor][stage][institute][method][peak]
#if check:
# print [chrm, pstart, pend, pscore, strand, signal, pvalue, qvalue, point]
# pdb.set_trace()
if chrm in cetrn.chrm2code_dict:
chrm = cetrn.chrm2code_dict[chrm]
for feature in feature_dict[chrm]:
fstart = int(feature_dict[chrm][feature]["start"])
fend = int(feature_dict[chrm][feature]["end"])
fstrand = feature_dict[chrm][feature]["strand"]
if (pstart >= fstart - window and pstart <= fend + window) or (pend >= fstart - window and pend <= fend + window) or (pstart <= fstart - window and pend >= fend + window):
upstream, inside, dnstream = "-","-","-"
if fstrand == "+":
upL = fstart - window
upR = fstart
dnL = fend
dnR = fend + window
else:
upL = fend
upR = fend + window
dnL = fstart - window
dnR = fstart
inL, inR = sorted([fstart, fend])
if (pstart >= upL and pstart <= upR) or (pend >= upL and pend <= upR) or (pstart <= upL and pend >= upR):
upstream = "+"
if (pstart >= inL and pstart <= inR) or (pend >= inL and pend <= inR) or (pstart <= inL and pend >= inR):
inside = "+"
if (pstart >= dnL and pstart <= dnR) or (pend >= dnL and pend <= dnR) or (pstart <= dnL and pend >= dnR):
dnstream = "+"
#if feature == "T07D1.2" and peak == "RepAll_peak_16299":
# check = True
print >>f_output, "\t".join(map(str, [chrm, pstart, pend, feature, signal, "+", strain, factor, stage, institute, method, peak, upstream, inside, dnstream, strand, signal, pvalue, qvalue, point]))
# close output file:
f_output.close()
# compile binding into matrix, recording for each feature the classifications across multiple comparisons:
if option.mode == "map:compile":
# define output file:
m_outfile = compilepath + outkey + "_matrix"
m_output = open(m_outfile, "w")
# define header presence
headerFlag = True
# define column indexes:
if option.index == "regions":
featureIndex, classIndex, overlapIndex = 3, 14, 20
elif option.index != "OFF":
featureIndex, classIndex, overlapIndex = map(int, option.index.split(","))
"""
# define standard targets/queries:
if option.queries == "standard":
queries = ['tss', 'five_prime_utr', 'three_prime_utr', 'exon', 'intron', 'protein_coding_primary_transcript']
elif option.queries == "auto":
queries = list()
for query in sorted(annotationDict.keys()):
if not query in option.exclude.split(","):
queries.append(query)
queries = sorted(list(set(queries)))
elif option.queries != "OFF":
queries = list()
for query in option.queries.split(","):
if not query in option.exclude.split(","):
queries.append(query)
"""
# let's start to load overlaps observed in the sources:
print
print "Collecting intersections for features/regions..."
overlapDict, featureDict = dict(), dict()
for source in option.source.split(","):
print "Loading:", source
# define source:
sourcepath = path_dict["binding"] + option.peaks + "/" + source + "/dataset/"
# store source:
if not source in overlapDict:
overlapDict[source] = dict()
featureDict[source] = dict()
# load overlaps in this analysis:
for dataset in os.listdir(sourcepath):
indata = open(sourcepath + dataset)
inline = indata.readline()
while inline:
initems = inline.strip().split("\t")
feature, fstart, fend, classification, cstart, cend, overlap = initems[featureIndex], int(initems[featureIndex-2]), int(initems[featureIndex-1]), initems[classIndex], int(initems[classIndex-2]), int(initems[classIndex-1]), int(initems[overlapIndex])
# store classification info:
if not classification in option.exclude.split(","):
if not feature in overlapDict[source]:
overlapDict[source][feature] = dict()
featureDict[source][feature] = [fstart, fend, fend-fstart + 1]
if not classification in overlapDict[source][feature]:
overlapDict[source][feature][classification] = list()
overlapDict[source][feature][classification].append(overlap)
# reload...
inline = indata.readline()
#print feature, fstart, fend, classification, cstart, cend, overlap
#pdb.set_trace()
# define scoring groups:
if option.group != "OFF":
groupDict = dict()
orderDict = dict()
o = 1
for group in option.group.split("-"):
key, members = group.split(":")
for member in members.split(","):
groupDict[member] = key
orderDict[o] = key
o += 1
print groupDict
# alright, now let's do assignments (and store the features):
print
print "Assigning classes or scores to features..."
matrix = dict()
for source in overlapDict:
for feature in overlapDict[source]:
# assignment policies...
if option.policy in ["sum", "max"]:
for classification in overlapDict[source][feature]:
if option.policy == "sum":
overlapDict[source][feature][classification] = sum(overlapDict[source][feature][classification])
if option.policy == "max":
overlapDict[source][feature][classification] = max(overlapDict[source][feature][classification])
classes = general.valuesort(overlapDict[source][feature])
classes.reverse()
if not feature in matrix:
matrix[feature] = dict()
matrix[feature][source] = classes[0]
# scoring policies...
elif option.policy == "score":
scoreDict = dict()
for classification in overlapDict[source][feature]:
overlapDict[source][feature][classification] = sum(overlapDict[source][feature][classification])
if classification in groupDict:
group = groupDict[classification]
if not group in scoreDict:
scoreDict[group] = 0
scoreDict[group] += overlapDict[source][feature][classification]
totalScore = sum(scoreDict.values())
if float(totalScore)/featureDict[source][feature][2] >= float(option.min):
if not feature in matrix:
matrix[feature] = dict()
mainGroup = orderDict[1]
normGroup = orderDict[2]
if mainGroup in scoreDict:
matrix[feature][source] = float(scoreDict[mainGroup])/totalScore
else:
matrix[feature][source] = 0
# sort features:
print "Sorting features in matrix..."
sortDict = dict()
for feature in matrix.keys():
if feature.count(".") == 1:
number = int(feature.split(".")[1])
sortDict[feature] = number
# excelent, now let's export the matrix:
print "Exporting features assignment matrix..."
print >>m_output, "\t".join(["feature"] + option.source.split(","))
features = general.valuesort(sortDict)
for feature in features:
if sorted(matrix[feature].keys()) == sorted(option.source.split(",")):
output = [feature]
for source in option.source.split(","):
output.append(matrix[feature][source])
print >>m_output, "\t".join(map(str, output))
# close output matrix:
m_output.close()
print
"""
# define annotation headers:
if option.headerDict == "auto":
annotationHeader = general.build_header_dict(i_infile)
elif option.headerDict == "bed":
annotationHeader = metrn.bedHeader
else:
annotationHeader = dict()
for entry in option.headerDict.split(","):
key, value = entry.split(":")
annotationHeader[key] = int(value)
"""
"""
# generate dataset label:
if option.mode == "map:dataset":
datasetID = metrn.labelGenerator(option.label, dataset=dataset, mode="label")
elif option.mode == "map:regions":
datasetID = dataset.replace(".bed", "")
# rename elements if necessary:
if option.rename != "OFF":
for scheme in option.rename.split(","):
target, replace = scheme.split(":")
datasetID = datasetID.replace(target, replace)
# store dataset in dictionary:
overlapDict[datasetID] = dict()
# define peak source and output
p_infile = sourcepath + dataset
p_outfile = datasetpath + dataset.replace(".bed", "_intersect.bed")
# adjust annotationHeader dictionary:
adjustHeader = dict()
adjustFactor = len(open(p_infile).readline().split("\t"))
for key in annotationHeader:
adjustHeader[key] = annotationHeader[key] + adjustFactor
print "Processing:", datasetID
if option.fraction == "OFF":
command = "intersectBed -wo -a " + p_infile + " -b " + i_tmpfile + " > " + p_outfile
os.system(command)
else:
command = "intersectBed -wo -f " + str(option.fraction) + " -a " + p_infile + " -b " + i_tmpfile + " > " + p_outfile
os.system(command)
# gather annotation overlap peak regions
overlapBed = general.build2(p_outfile, i=option.target, j=idOverlap, x="", mode="matrix", header_dict=adjustHeader, header=headerFlag, separator=":", counter=True)
#os.system("head -n 5 " + i_tmpfile)
#print
#os.system("head -n 5 " + p_infile)
#print
#os.system("head -n 5 " + p_outfile)
#print
#x = overlapBed.keys()[0]
#print overlapBed.keys()
#print overlapBed[x]
#pdb.set_trace()
# count everything ...
if option.prioritize == "OFF":
queryCount = 0
for query in queries:
if query in overlapBed:
overlapDict[datasetID][query] = sum(overlapBed[query].values())
else:
overlapDict[datasetID][query] = 0
queryCount += overlapDict[datasetID][query]
if option.others == "ON":
totalCount = 0
for entry in overlapBed:
totalCount += sum(overlapBed[entry].values())
overlapDict[datasetID]["others"] = totalCount - queryCount
# ...or preferentially assign counts to classes?
else:
if option.prioritize == "ON":
prioritize = sorted(overlapBed.keys())
else:
prioritize = option.prioritize.split(",")
queryCount = 0
invertedBed = general.dictinvert(overlapBed, mode="matrix")
for feature in invertedBed:
for query in prioritize:
if not query in overlapDict[datasetID]:
overlapDict[datasetID][query] = 0
if query in invertedBed[feature]:
overlapDict[datasetID][query] += 1
break
for query in prioritize:
if query in overlapDict[datasetID]:
queryCount += overlapDict[datasetID][query]
if option.others == "ON":
totalCount = len(invertedBed)
overlapDict[datasetID]["others"] = totalCount - queryCount
# is it necessary to determine feature counts?
if option.elsewhere == "ON":
featureCount = general.countLines(p_infile)
overlapDict[datasetID]["others"] = featureCount - queryCount
#if True:
# featureCount = general.countLines(p_infile)
# invertedBed = general.dictinvert(overlapBed, mode="matrix")
# feature = invertedBed.keys()[0]
# print featureCount, len(invertedBed), queryCount
# print feature
# print invertedBed[feature]
# pdb.set_trace()
# update queries to include "others" if necessary:
if option.others == "ON":
queries.append("others")
# check completeness (and fill missing):
for datasetID in sorted(overlapDict.keys()):
for query in queries:
if not query in overlapDict[datasetID]:
overlapDict[datasetID][query] = 0
# score datasets with highest promoter ratio:
if option.reference != "OFF":
referenceDict = dict()
for datasetID in sorted(overlapDict.keys()):
values = list()
for query in queries:
values.append(overlapDict[datasetID][query])
if query == option.reference:
reference = overlapDict[datasetID][query]
if sum(values) != 0:
referenceDict[datasetID] = float(reference)/sum(values)
else:
referenceDict[datasetID] = 0
datasetIDs = general.valuesort(referenceDict)
datasetIDs.reverse()
elif option.order != "OFF":
datasetIDs = option.order.split(",")
else:
datasetIDs = sorted(overlapDict.keys())
# export per factor-count feature-type summary:
print
print "Exporting datasets as ranked by overlap with reference:"
print
sv_sumfile = s_outfile.replace("_compiled", "_compiled_dataset_summary_values")
sn_sumfile = s_outfile.replace("_compiled", "_compiled_dataset_summary_normal")
sv_output = open(sv_sumfile, "w")
sn_output = open(sn_sumfile, "w")
print >>sv_output, "\t".join(["dataset"] + queries)
print >>sn_output, "\t".join(["dataset"] + queries)
rank = 1
for datasetID in datasetIDs:
print "#" + str(rank), ":", datasetID
values, normalized = list(), list()
for query in queries:
if query in overlapDict[datasetID]:
values.append(overlapDict[datasetID][query])
else:
values.append(0)
for value in values:
if sum(values) > 0:
normalized.append(float(value)/sum(values))
else:
normalized.append(0)
print >>sv_output, "\t".join(map(str, [datasetID] + values))
print >>sn_output, "\t".join(map(str, [datasetID] + normalized))
rank += 1
sv_output.close()
sn_output.close()
print
print "Removing temporary files..."
command = "rm -rf " + bindingpath + "*.tmp"
os.system(command)
command = "rm -rf " + datasetpath + "*.tmp"
os.system(command)
command = "rm -rf " + annotationspath + "*.tmp"
os.system(command)
print
# close report if necessary:
#if option.report == "ON":
# r_output.close()
"""
# make pairwise overlaps matrix from Alan's format:
elif option.mode == "pairwise":
# specify input header:
inHeader = {
"species.i" : 0,
"factor.i" : 1,
"context.i" : 2,
"matched" : 3,
"enhancer.i" : 4,
"0:500.i" : 5,
"501:1000.i" : 6,
"1001:2000.i" : 7,
"2001:10000.i" : 8,
"others.i" : 9,
"species.j" : 10,
"factor.j" : 11,
"context.j" : 12,
"flag" : 13,
"enhancer.j" : 14,
"0:500.j" : 15,
"501:1000.j" : 16,
"1001:2000.j" : 17,
"2001:10000.j" : 18,
"others.j" : 19
}
# species conversion:
species = {
"Human" : "hs",
"Worm" : "ce",
"Fly" : "dm"
}
# load peak counts:
indict = general.build2(path_dict[option.source] + option.infile, header_dict=inHeader)
# generate output file:
m_outfile = summarypath + "mapbinding_" + option.peaks + "_pairwise_dataset_summary_matrix"
m_output = open(m_outfile, "w")
# get all ortholog comparisons:
print
print "Loading ortholog comparison vectors..."
inlines = open(path_dict[option.source] + option.infile).readlines()
ivectors, jvectors = dict(), dict()
k = 1
for inline in inlines:
initems = inline.strip().split("\t")
ivector, jvector = initems[0:10], initems[10:20]
ivectors[k] = ivector
jvectors[k] = jvector
k += 1
#print initems
#print ivector
#print jvector
#pdb.set_trace()
#print k
# generate all pairwise comparisons:
print "Generating non-ortholog comparison vectors..."
for i in ivectors:
for j in jvectors:
if i == j:
orthologFlag = "1"
else:
orthologFlag = "0"
output = ivectors[i] + jvectors[j] + [orthologFlag]
ivector, jvector = map(int, output[4:10]), map(int, output[14:20])
correlation, corPvalue = pearsonr(ivector, jvector)
output.append(str(correlation))
print >>m_output, "\t".join(output)
"""
n_outfile = summarypath + "mapbinding_" + option.peaks + "_pairwise_dataset_summary_normal"
v_outfile = summarypath + "mapbinding_" + option.peaks + "_pairwise_dataset_summary_values"
n_output = open(n_outfile, "w")
v_output = open(v_outfile, "w")
n_output.close()
v_output.close()
"""
# close output files:
m_output.close()
print
# convert overlaps from Alan's format to mine:
elif option.mode == "convert":
# specify input header:
inHeader = {
"organism" : 0,
"factor" : 1,
"context" : 2,
"matched" : 3,
"enhancer" : 4,
"0:500" : 5,
"501:1000" : 6,
"1001:2000" : 7,
"2001:10000" : 8,
"others" : 9
}
# species conversion:
species = {
"Human" : "hs",
"Worm" : "ce",
"Fly" : "dm"
}
# load peak counts:
indict = general.build2(path_dict[option.source] + option.infile, header_dict=inHeader)
# generate output file:
n_outfile = summarypath + "mapbinding_" + option.peaks + "_convert_dataset_summary_normal"
v_outfile = summarypath + "mapbinding_" + option.peaks + "_convert_dataset_summary_values"
n_output = open(n_outfile, "w")
v_output = open(v_outfile, "w")
# print output headers:
columns = ["organism", "factor", "context", "matched", "dataset", "0:500", "501:1000", "1001:2000", "2001:10000", "others", "enhancer"]
print >>n_output, "\t".join(columns)
print >>v_output, "\t".join(columns)
# parse loaded lines:
for inline in indict:
total = 0
for column in indict[inline]:
if not column in ["organism", "factor", "context", "matched"]:
total += int(indict[inline][column])
dataset = indict[inline]["factor"] + " (" + indict[inline]["context"] + ")"
normal, values = list(), list()
for column in columns:
if column == "dataset":
normal.append(dataset)
values.append(dataset)
elif column == "organism":
normal.append(species[indict[inline][column]])
values.append(species[indict[inline][column]])
elif column in ["factor", "context", "matched"]:
normal.append(indict[inline][column])
values.append(indict[inline][column])
else:
normal.append(float(indict[inline][column])/total)
values.append(indict[inline][column])
print >>n_output, "\t".join(map(str, normal))
print >>v_output, "\t".join(map(str, values))
# close output files:
n_output.close()
v_output.close()
# aggregate overlaps previous outputs:
elif option.mode == "aggregate":
# generate output file:
n_outfile = summarypath + "mapbinding_" + option.peaks + "_compiled_dataset_summary_normal"
v_outfile = summarypath + "mapbinding_" + option.peaks + "_compiled_dataset_summary_values"
n_output = open(n_outfile, "w")
v_output = open(v_outfile, "w")
start = True
print
print "Loading binding trends..."
for source in option.source.split(","):
print source
n_inlines = open(path_dict["binding"] + source + "/" + option.name + "/summary/mapbinding_" + source + "_compiled_dataset_summary_normal").readlines()
v_inlines = open(path_dict["binding"] + source + "/" + option.name + "/summary/mapbinding_" + source + "_compiled_dataset_summary_values").readlines()
n_header = n_inlines.pop(0)
v_header = v_inlines.pop(0)
organism, selection, classes, context, peaks = source.split("_")
if start:
print >>n_output, "\t".join(["organism", "factor", "context", "matched"] + n_header.strip().split("\t"))
print >>v_output, "\t".join(["organism", "factor", "context", "matched"] + v_header.strip().split("\t"))
start = False
for n_inline in n_inlines:
n_items = n_inline.strip().split("\t")
factor = n_items.pop(0)
dataset = factor + " (" + context + ")"
print >>n_output, "\t".join([organism, factor, context, "1", dataset] + n_items)
for v_inline in v_inlines:
v_items = v_inline.strip().split("\t")
factor = v_items.pop(0)
dataset = factor + " (" + context + ")"
print >>v_output, "\t".join([organism, factor, context, "1", dataset] + v_items)
print
# close output files:
n_output.close()
v_output.close()
# coverage saturation mode:
elif option.mode == "saturation":
# update output path:
regionspath = saturationpath + "regions/"
numberspath = saturationpath + "numbers/"
resultspath = saturationpath + "results/"
general.pathGenerator(regionspath)
general.pathGenerator(numberspath)
general.pathGenerator(resultspath)
# update peak path and load input peak files:
peakspath = peakspath + option.peaks + "/"
peakfiles = os.listdir(peakspath)
# gather previous results:
computedfiles = os.listdir(numberspath)
# start 'em up:
print
print "Randomly sampling peak files:"
regionsDict, coverageDict = dict(), dict()
for k in range(1, len(peakfiles) + 1):
print "Processing:", k, "files..."
# generate k-files index tag:
depth = "depth" + general.indexTag(k, len(peakfiles))
# sampling iterations per k number of peaks:
for n in range(1, int(option.parameters) + 1):
# generate iteration index tag:
iteration = depth + "-n" + general.indexTag(n, int(option.parameters))
# randomly sample peaks:
samples = random.sample(peakfiles, k)
# temporary output file:
computedfile = "computeCoverage_" + iteration + ".txt"
# compute total regions and genomic coverage:
if not computedfile in computedfiles:
computeCoverage(samples, peakspath, regionspath, numberspath + computedfile)
# load total regions and genomic coverage:
resultsDict = general.build2(numberspath + computedfile)
if not k in coverageDict:
coverageDict[k] = list()
regionsDict[k] = list()
coverageDict[k].append(int(resultsDict[".1"]["bases"]))
regionsDict[k].append(int(resultsDict[".1"]["regions"]))
# export the data:
print "Exporting regions and coverage per number of samples..."
f_output = open(resultspath + "mapbinding_covered_report.txt", "w")
print >>f_output, "\t".join(["index", "regions.avg", "regions.med", "regions.std", "coverage.avg", "coverage.med", "coverage.std"])
for k in sorted(coverageDict.keys()):
output = [k]
output.append(numpy.mean(regionsDict[k]))
output.append(numpy.median(regionsDict[k]))
output.append(numpy.std(regionsDict[k]))
output.append(numpy.mean(coverageDict[k]))
output.append(numpy.median(coverageDict[k]))
output.append(numpy.std(coverageDict[k]))
print >>f_output, "\t".join(map(str, output))
f_output.close()
print
# coverage fraction mode:
elif option.mode == "fractional":
# specify temporary file:
completefile = peakspath + "mappeaks_" + option.peaks + "_complete.bed"
queryfile = fractionalpath + "mapcells_query_" + option.infile.replace(".nh.bed", "").replace(".bed", "") + ".bed"
overlapfile = fractionalpath + "mapcells_overlap_" + option.infile.replace(".nh.bed", "").replace(".bed", "") + ".bed"
# update query file path:
print
print "Pruning input file..."
features = list()
featureDict = dict()
inputfile = path_dict[option.source] + option.infile
q_output = open(queryfile, "w")
for inline in open(inputfile).readlines():
chrm, start, stop, feature, score, strand = inline.strip().split("\t")[:6]
parts = feature.split(".")
featurette = ".".join(parts[:len(parts)-1])
if not featurette in featureDict:
featureDict[featurette] = list()
if int(start) > 0 and int(stop) > 0:
print >>q_output, "\t".join([chrm, start, stop, featurette, score, strand])
featureDict[featurette].append(feature)
features.append(feature)
q_output.close()
# load entry regions:
print "Loading query regions.."
queryHits = list()
inlines = open(queryfile).readlines()
for inline in inlines:
queryHits.append(inline.strip().split("\t")[3])
queryHits = list(set(queryHits))
# execute overlap function:
print "Determining overlap with queries..."
command = "intersectBed -a " + queryfile + " -b " + completefile + " > " + overlapfile
os.system(command)
# examine overlaps:
overlapHits = list()
inlines = open(overlapfile).readlines()
for inline in inlines:
overlapHits.append(inline.strip().split("\t")[3])
overlapHits = list(set(overlapHits))
# show fractional results:
print "Query inputs:", len(queryHits)
print "Query overlaps:", len(overlapHits)
print "Query fraction:", round(100*float(len(overlapHits))/len(queryHits), 2), "%"
print
#pdb.set_trace()
# occupancy (density) mode:
elif option.mode == "density":
# define input file:
compiledfile = peakspath + "mappeaks_" + option.peaks + "_compiled.bed"
# define output file:
densityfile = densitypath + "mapbinding_density_" + option.peaks + "_report.txt"
# load density scores:
print
print "Loading occupancy scores..."
occupancyDict = dict()
indata = open(compiledfile)
inline = indata.readline()
while inline:
chrm, start, end, region, occupancy, strand, density, datasetCounts, factorCounts, contextCounts, details = inline.strip().split("\t")
occupancy, peaks = int(occupancy), details.strip(";").split(";")
peaks.pop(0)
for peak in peaks:
dataset, peakID = peak.split(":")
if not dataset in occupancyDict:
occupancyDict[dataset] = list()
occupancyDict[dataset].append(occupancy)
inline = indata.readline()
# apply simplification and sorting:
medianDict = dict()
for dataset in occupancyDict:
medianDict[dataset] = numpy.median(occupancyDict[dataset])
datasets = general.valuesort(medianDict)
datasets.reverse()
# export median-ranked dataset information:
print "Exporting ranked occupancy scores..."
f_output = open(densityfile, "w")
k = 1
print >>f_output, "\t".join(["dataset", "factor", "context", "rank", "peaks", "occupancy.avg", "occupancy.med", "occupancy.std", "low.quartile", "top.quartile", "low.decile", "top.decile"])
for dataset in datasets:
label = metrn.labelGenerator(option.target, mode="label", dataset=dataset)
organism, strain, factor, context, institute = dataset.split("_")
output = [label, factor, context, k, len(occupancyDict[dataset])]
output.append(numpy.mean(occupancyDict[dataset]))
output.append(numpy.median(occupancyDict[dataset]))
output.append(numpy.std(occupancyDict[dataset]))
output.append(Quantile(occupancyDict[dataset], 0.25))
output.append(Quantile(occupancyDict[dataset], 0.75))
output.append(Quantile(occupancyDict[dataset], 0.10))
output.append(Quantile(occupancyDict[dataset], 0.90))
print >>f_output, "\t".join(map(str, output))
k += 1
f_output.close()
print
if __name__ == "__main__":
main()
print "Completed:", time.asctime(time.localtime())
#python mapBinding.py --path ~/meTRN --organism ce --mode fractional --peaks ce_selection_reg_cx_raw --infile in2shape_ce_wormbased_TSS_gx_slopbed_up1000_dn100.nh.bed --source annotations --name promoter.regions
#python mapBinding.py --path ~/meTRN --organism ce --mode fractional --peaks ce_selection_reg_cx_raw --infile in2shape_ce_wormbased_TSS_gx_slopbed_up2000_dn200.nh.bed --source annotations --name promoter.regions
#python mapBinding.py --path ~/meTRN --organism ce --mode fractional --peaks ce_selection_reg_cx_raw --infile in2shape_ce_wormbased_TSS_gx_slopbed_up3000_dn300.nh.bed --source annotations --name promoter.regions
#python mapBinding.py --path ~/meTRN --organism ce --mode fractional --peaks ce_selection_reg_cx_raw --infile in2shape_ce_wormbased_TSS_gx_slopbed_up4000_dn400.nh.bed --source annotations --name promoter.regions
#python mapBinding.py --path ~/meTRN --organism ce --mode fractional --peaks ce_selection_reg_cx_raw --infile in2shape_ce_wormbased_TSS_gx_slopbed_up5000_dn500.nh.bed --source annotations --name promoter.regions
#python mapBinding.py --path ~/meTRN --organism ce --mode density --peaks ce_selection_reg_cx_raw --name coverage --target factor.context
|
claraya/meTRN
|
python/mapBinding.py
|
Python
|
mit
| 63,606
|
[
"BWA",
"Bowtie"
] |
2695300286f15aefa3170d274a3dbe25d9f6d04bdd6c284b1ed3e76106236b0b
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
__all__ = [
'HydrogenBondAutoCorrel', 'find_hydrogen_donors',
]
from .hbond_autocorrel import HydrogenBondAutoCorrel, find_hydrogen_donors
|
MDAnalysis/mdanalysis
|
package/MDAnalysis/analysis/hbonds/__init__.py
|
Python
|
gpl-2.0
| 1,200
|
[
"MDAnalysis"
] |
27584c76d2344cf912540b9da75e26426b6cd188cb3babb60e8c1d314c75c7ca
|
"""
Functions to operate on polynomials.
"""
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import functools
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.core import overrides
from numpy.core.overrides import set_module
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
@set_module('numpy')
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def _poly_dispatcher(seq_of_zeros):
return seq_of_zeros
@array_function_dispatch(_poly_dispatcher)
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
.. note::
This forms part of the old polynomial API. Since version 1.4, the
new polynomial API defined in `numpy.polynomial` is preferred.
A summary of the differences can be found in the
:doc:`transition guide </reference/routines.polynomials>`.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Compute polynomial values.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1., 0., 0., 0.])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) # random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())):
a = a.real.copy()
return a
def _roots_dispatcher(p):
return p
@array_function_dispatch(_roots_dispatcher)
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
.. note::
This forms part of the old polynomial API. Since version 1.4, the
new polynomial API defined in `numpy.polynomial` is preferred.
A summary of the differences can be found in the
:doc:`transition guide </reference/routines.polynomials>`.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Compute polynomial values.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if p.ndim != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def _polyint_dispatcher(p, m=None, k=None):
return (p,)
@array_function_dispatch(_polyint_dispatcher)
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
.. note::
This forms part of the old polynomial API. Since version 1.4, the
new polynomial API defined in `numpy.polynomial` is preferred.
A summary of the differences can be found in the
:doc:`transition guide </reference/routines.polynomials>`.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : array_like or poly1d
Polynomial to integrate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : list of `m` scalars or scalar, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def _polyder_dispatcher(p, m=None):
return (p,)
@array_function_dispatch(_polyder_dispatcher)
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
.. note::
This forms part of the old polynomial API. Since version 1.4, the
new polynomial API defined in `numpy.polynomial` is preferred.
A summary of the differences can be found in the
:doc:`transition guide </reference/routines.polynomials>`.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([0])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None):
return (x, y, w)
@array_function_dispatch(_polyfit_dispatcher)
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
.. note::
This forms part of the old polynomial API. Since version 1.4, the
new polynomial API defined in `numpy.polynomial` is preferred.
A summary of the differences can be found in the
:doc:`transition guide </reference/routines.polynomials>`.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error in the order `deg`, `deg-1`, ... `0`.
The `Polynomial.fit <numpy.polynomial.polynomial.Polynomial.fit>` class
method is recommended for new code as it is more stable numerically. See
the documentation of the method for more information.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
Weights to apply to the y-coordinates of the sample points. For
gaussian uncertainties, use 1/sigma (not 1/sigma**2).
cov : bool or str, optional
If given and not `False`, return not just the estimate but also its
covariance matrix. By default, the covariance are scaled by
chi2/dof, where dof = M - (deg + 1), i.e., the weights are presumed
to be unreliable except in a relative sense and everything is scaled
such that the reduced chi2 is unity. This scaling is omitted if
``cov='unscaled'``, as is relevant for the case that the weights are
1/sigma**2, with sigma known to be a reliable estimate of the
uncertainty.
Returns
-------
p : ndarray, shape (deg + 1,) or (deg + 1, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond
Present only if `full` = True. Residuals is sum of squared residuals
of the least-squares fit, the effective rank of the scaled Vandermonde
coefficient matrix, its singular values, and the specified value of
`rcond`. For more details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Compute polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
https://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
https://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> import warnings
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179 # may vary
>>> p(3.5)
-0.34732142857143039 # may vary
>>> p(10)
22.579365079365115 # may vary
High-order polynomials may oscillate wildly:
>>> with warnings.catch_warnings():
... warnings.simplefilter('ignore', np.RankWarning)
... p30 = np.poly1d(np.polyfit(x, y, 30))
...
>>> p30(4)
-0.80000000000000204 # may vary
>>> p30(5)
-0.99999999999999445 # may vary
>>> p30(4.5)
-0.10547061179440398 # may vary
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning, stacklevel=4)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
if cov == "unscaled":
fac = 1
else:
if len(x) <= order:
raise ValueError("the number of data points must exceed order "
"to scale the covariance matrix")
# note, this used to be: fac = resids / (len(x) - order - 2.0)
# it was deciced that the "- 2" (originally justified by "Bayesian
# uncertainty analysis") is not was the user expects
# (see gh-11196 and gh-11197)
fac = resids / (len(x) - order)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def _polyval_dispatcher(p, x):
return (p, x)
@array_function_dispatch(_polyval_dispatcher)
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
.. note::
This forms part of the old polynomial API. Since version 1.4, the
new polynomial API defined in `numpy.polynomial` is preferred.
A summary of the differences can be found in the
:doc:`transition guide </reference/routines.polynomials>`.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then ``p(x)`` is returned for each element of ``x``.
If `x` is another polynomial then the composite polynomial ``p(x(t))``
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, an array of numbers, or an instance of poly1d, at
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
If `x` is a subtype of `ndarray` the return value will be of the same type.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([76])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([76])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asanyarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def _binary_op_dispatcher(a1, a2):
return (a1, a2)
@array_function_dispatch(_binary_op_dispatcher)
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
.. note::
This forms part of the old polynomial API. Since version 1.4, the
new polynomial API defined in `numpy.polynomial` is preferred.
A summary of the differences can be found in the
:doc:`transition guide </reference/routines.polynomials>`.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print(p1)
1 x + 2
>>> print(p2)
2
9 x + 5 x + 4
>>> print(np.polyadd(p1, p2))
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
@array_function_dispatch(_binary_op_dispatcher)
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
.. note::
This forms part of the old polynomial API. Since version 1.4, the
new polynomial API defined in `numpy.polynomial` is preferred.
A summary of the differences can be found in the
:doc:`transition guide </reference/routines.polynomials>`.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
@array_function_dispatch(_binary_op_dispatcher)
def polymul(a1, a2):
"""
Find the product of two polynomials.
.. note::
This forms part of the old polynomial API. Since version 1.4, the
new polynomial API defined in `numpy.polynomial` is preferred.
A summary of the differences can be found in the
:doc:`transition guide </reference/routines.polynomials>`.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print(p1)
2
1 x + 2 x + 3
>>> print(p2)
2
9 x + 5 x + 1
>>> print(np.polymul(p1, p2))
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def _polydiv_dispatcher(u, v):
return (u, v)
@array_function_dispatch(_polydiv_dispatcher)
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
.. note::
This forms part of the old polynomial API. Since version 1.4, the
new polynomial API defined in `numpy.polynomial` is preferred.
A summary of the differences can be found in the
:doc:`transition guide </reference/routines.polynomials>`.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([1.5 , 1.75]), array([0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(v, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.astype(w.dtype)
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"\*\*([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
@set_module('numpy')
class poly1d:
"""
A one-dimensional polynomial class.
.. note::
This forms part of the old polynomial API. Since version 1.4, the
new polynomial API defined in `numpy.polynomial` is preferred.
A summary of the differences can be found in the
:doc:`transition guide </reference/routines.polynomials>`.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print(np.poly1d(p))
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print(p)
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1., -3., 2.])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
__hash__ = None
@property
def coeffs(self):
""" The polynomial coefficients """
return self._coeffs
@coeffs.setter
def coeffs(self, value):
# allowing this makes p.coeffs *= 2 legal
if value is not self._coeffs:
raise AttributeError("Cannot set attribute")
@property
def variable(self):
""" The name of the polynomial variable """
return self._variable
# calculated attributes
@property
def order(self):
""" The order or degree of the polynomial """
return len(self._coeffs) - 1
@property
def roots(self):
""" The roots of the polynomial, where self(x) == 0 """
return roots(self._coeffs)
# our internal _coeffs property need to be backed by __dict__['coeffs'] for
# scipy to work correctly.
@property
def _coeffs(self):
return self.__dict__['coeffs']
@_coeffs.setter
def _coeffs(self, coeffs):
self.__dict__['coeffs'] = coeffs
# alias attributes
r = roots
c = coef = coefficients = coeffs
o = order
def __init__(self, c_or_r, r=False, variable=None):
if isinstance(c_or_r, poly1d):
self._variable = c_or_r._variable
self._coeffs = c_or_r._coeffs
if set(c_or_r.__dict__) - set(self.__dict__):
msg = ("In the future extra properties will not be copied "
"across when constructing one poly1d from another")
warnings.warn(msg, FutureWarning, stacklevel=2)
self.__dict__.update(c_or_r.__dict__)
if variable is not None:
self._variable = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if c_or_r.ndim > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0], dtype=c_or_r.dtype)
self._coeffs = c_or_r
if variable is None:
variable = 'x'
self._variable = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
return not self.__eq__(other)
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self._coeffs = NX.concatenate((zr, self.coeffs))
ind = 0
self._coeffs[ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
|
pbrod/numpy
|
numpy/lib/polynomial.py
|
Python
|
bsd-3-clause
| 43,813
|
[
"Gaussian"
] |
6bcfc441900e2b714d0e460cf6b17823cfe0352ad88fd1d09e745829ed304a08
|
"""
This is only meant to add docs to objects defined in C-extension modules.
The purpose is to allow easier editing of the docstrings without
requiring a re-compile.
NOTE: Many of the methods of ndarray have corresponding functions.
If you update these docstrings, please keep also the ones in
core/fromnumeric.py, core/defmatrix.py up-to-date.
"""
from __future__ import division, absolute_import, print_function
from numpy.core import numerictypes as _numerictypes
from numpy.core import dtype
from numpy.core.function_base import add_newdoc
###############################################################################
#
# flatiter
#
# flatiter needs a toplevel description
#
###############################################################################
add_newdoc('numpy.core', 'flatiter',
"""
Flat iterator object to iterate over arrays.
A `flatiter` iterator is returned by ``x.flat`` for any array `x`.
It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in row-major, C-style order (the last
index varying the fastest). The iterator can also be indexed using
basic slicing or advanced indexing.
See Also
--------
ndarray.flat : Return a flat iterator over an array.
ndarray.flatten : Returns a flattened copy of an array.
Notes
-----
A `flatiter` iterator can not be constructed directly from Python code
by calling the `flatiter` constructor.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> type(fl)
<class 'numpy.flatiter'>
>>> for item in fl:
... print(item)
...
0
1
2
3
4
5
>>> fl[2:4]
array([2, 3])
""")
# flatiter attributes
add_newdoc('numpy.core', 'flatiter', ('base',
"""
A reference to the array that is iterated over.
Examples
--------
>>> x = np.arange(5)
>>> fl = x.flat
>>> fl.base is x
True
"""))
add_newdoc('numpy.core', 'flatiter', ('coords',
"""
An N-dimensional tuple of current coordinates.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.coords
(0, 0)
>>> fl.next()
0
>>> fl.coords
(0, 1)
"""))
add_newdoc('numpy.core', 'flatiter', ('index',
"""
Current flat index into the array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.index
0
>>> fl.next()
0
>>> fl.index
1
"""))
# flatiter functions
add_newdoc('numpy.core', 'flatiter', ('__array__',
"""__array__(type=None) Get array from iterator
"""))
add_newdoc('numpy.core', 'flatiter', ('copy',
"""
copy()
Get a copy of the iterator as a 1-D array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> fl = x.flat
>>> fl.copy()
array([0, 1, 2, 3, 4, 5])
"""))
###############################################################################
#
# nditer
#
###############################################################################
add_newdoc('numpy.core', 'nditer',
"""
Efficient multi-dimensional iterator object to iterate over arrays.
To get started using this object, see the
:ref:`introductory guide to array iteration <arrays.nditer>`.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
flags : sequence of str, optional
Flags to control the behavior of the iterator.
* ``buffered`` enables buffering when required.
* ``c_index`` causes a C-order index to be tracked.
* ``f_index`` causes a Fortran-order index to be tracked.
* ``multi_index`` causes a multi-index, or a tuple of indices
with one per iteration dimension, to be tracked.
* ``common_dtype`` causes all the operands to be converted to
a common data type, with copying or buffering as necessary.
* ``copy_if_overlap`` causes the iterator to determine if read
operands have overlap with write operands, and make temporary
copies as necessary to avoid overlap. False positives (needless
copying) are possible in some cases.
* ``delay_bufalloc`` delays allocation of the buffers until
a reset() call is made. Allows ``allocate`` operands to
be initialized before their values are copied into the buffers.
* ``external_loop`` causes the ``values`` given to be
one-dimensional arrays with multiple values instead of
zero-dimensional arrays.
* ``grow_inner`` allows the ``value`` array sizes to be made
larger than the buffer size when both ``buffered`` and
``external_loop`` is used.
* ``ranged`` allows the iterator to be restricted to a sub-range
of the iterindex values.
* ``refs_ok`` enables iteration of reference types, such as
object arrays.
* ``reduce_ok`` enables iteration of ``readwrite`` operands
which are broadcasted, also known as reduction operands.
* ``zerosize_ok`` allows `itersize` to be zero.
op_flags : list of list of str, optional
This is a list of flags for each operand. At minimum, one of
``readonly``, ``readwrite``, or ``writeonly`` must be specified.
* ``readonly`` indicates the operand will only be read from.
* ``readwrite`` indicates the operand will be read from and written to.
* ``writeonly`` indicates the operand will only be written to.
* ``no_broadcast`` prevents the operand from being broadcasted.
* ``contig`` forces the operand data to be contiguous.
* ``aligned`` forces the operand data to be aligned.
* ``nbo`` forces the operand data to be in native byte order.
* ``copy`` allows a temporary read-only copy if required.
* ``updateifcopy`` allows a temporary read-write copy if required.
* ``allocate`` causes the array to be allocated if it is None
in the ``op`` parameter.
* ``no_subtype`` prevents an ``allocate`` operand from using a subtype.
* ``arraymask`` indicates that this operand is the mask to use
for selecting elements when writing to operands with the
'writemasked' flag set. The iterator does not enforce this,
but when writing from a buffer back to the array, it only
copies those elements indicated by this mask.
* ``writemasked`` indicates that only elements where the chosen
``arraymask`` operand is True will be written to.
* ``overlap_assume_elementwise`` can be used to mark operands that are
accessed only in the iterator order, to allow less conservative
copying when ``copy_if_overlap`` is present.
op_dtypes : dtype or tuple of dtype(s), optional
The required data type(s) of the operands. If copying or buffering
is enabled, the data will be converted to/from their original types.
order : {'C', 'F', 'A', 'K'}, optional
Controls the iteration order. 'C' means C order, 'F' means
Fortran order, 'A' means 'F' order if all the arrays are Fortran
contiguous, 'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible. This also
affects the element memory order of ``allocate`` operands, as they
are allocated to be compatible with iteration order.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when making a copy
or buffering. Setting this to 'unsafe' is not recommended,
as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
op_axes : list of list of ints, optional
If provided, is a list of ints or None for each operands.
The list of axes for an operand is a mapping from the dimensions
of the iterator to the dimensions of the operand. A value of
-1 can be placed for entries, causing that dimension to be
treated as `newaxis`.
itershape : tuple of ints, optional
The desired shape of the iterator. This allows ``allocate`` operands
with a dimension mapped by op_axes not corresponding to a dimension
of a different operand to get a value not equal to 1 for that
dimension.
buffersize : int, optional
When buffering is enabled, controls the size of the temporary
buffers. Set to 0 for the default value.
Attributes
----------
dtypes : tuple of dtype(s)
The data types of the values provided in `value`. This may be
different from the operand data types if buffering is enabled.
Valid only before the iterator is closed.
finished : bool
Whether the iteration over the operands is finished or not.
has_delayed_bufalloc : bool
If True, the iterator was created with the ``delay_bufalloc`` flag,
and no reset() function was called on it yet.
has_index : bool
If True, the iterator was created with either the ``c_index`` or
the ``f_index`` flag, and the property `index` can be used to
retrieve it.
has_multi_index : bool
If True, the iterator was created with the ``multi_index`` flag,
and the property `multi_index` can be used to retrieve it.
index
When the ``c_index`` or ``f_index`` flag was used, this property
provides access to the index. Raises a ValueError if accessed
and ``has_index`` is False.
iterationneedsapi : bool
Whether iteration requires access to the Python API, for example
if one of the operands is an object array.
iterindex : int
An index which matches the order of iteration.
itersize : int
Size of the iterator.
itviews
Structured view(s) of `operands` in memory, matching the reordered
and optimized iterator access pattern. Valid only before the iterator
is closed.
multi_index
When the ``multi_index`` flag was used, this property
provides access to the index. Raises a ValueError if accessed
accessed and ``has_multi_index`` is False.
ndim : int
The dimensions of the iterator.
nop : int
The number of iterator operands.
operands : tuple of operand(s)
The array(s) to be iterated over. Valid only before the iterator is
closed.
shape : tuple of ints
Shape tuple, the shape of the iterator.
value
Value of ``operands`` at current iteration. Normally, this is a
tuple of array scalars, but if the flag ``external_loop`` is used,
it is a tuple of one dimensional arrays.
Notes
-----
`nditer` supersedes `flatiter`. The iterator implementation behind
`nditer` is also exposed by the NumPy C API.
The Python exposure supplies two iteration interfaces, one which follows
the Python iterator protocol, and another which mirrors the C-style
do-while pattern. The native Python approach is better in most cases, but
if you need the coordinates or index of an iterator, use the C-style pattern.
Examples
--------
Here is how we might write an ``iter_add`` function, using the
Python iterator protocol:
>>> def iter_add_py(x, y, out=None):
... addop = np.add
... it = np.nditer([x, y, out], [],
... [['readonly'], ['readonly'], ['writeonly','allocate']])
... with it:
... for (a, b, c) in it:
... addop(a, b, out=c)
... return it.operands[2]
Here is the same function, but following the C-style pattern:
>>> def iter_add(x, y, out=None):
... addop = np.add
... it = np.nditer([x, y, out], [],
... [['readonly'], ['readonly'], ['writeonly','allocate']])
... with it:
... while not it.finished:
... addop(it[0], it[1], out=it[2])
... it.iternext()
... return it.operands[2]
Here is an example outer product function:
>>> def outer_it(x, y, out=None):
... mulop = np.multiply
... it = np.nditer([x, y, out], ['external_loop'],
... [['readonly'], ['readonly'], ['writeonly', 'allocate']],
... op_axes=[list(range(x.ndim)) + [-1] * y.ndim,
... [-1] * x.ndim + list(range(y.ndim)),
... None])
... with it:
... for (a, b, c) in it:
... mulop(a, b, out=c)
... return it.operands[2]
>>> a = np.arange(2)+1
>>> b = np.arange(3)+1
>>> outer_it(a,b)
array([[1, 2, 3],
[2, 4, 6]])
Here is an example function which operates like a "lambda" ufunc:
>>> def luf(lamdaexpr, *args, **kwargs):
... '''luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)'''
... nargs = len(args)
... op = (kwargs.get('out',None),) + args
... it = np.nditer(op, ['buffered','external_loop'],
... [['writeonly','allocate','no_broadcast']] +
... [['readonly','nbo','aligned']]*nargs,
... order=kwargs.get('order','K'),
... casting=kwargs.get('casting','safe'),
... buffersize=kwargs.get('buffersize',0))
... while not it.finished:
... it[0] = lamdaexpr(*it[1:])
... it.iternext()
... return it.operands[0]
>>> a = np.arange(5)
>>> b = np.ones(5)
>>> luf(lambda i,j:i*i + j/2, a, b)
array([ 0.5, 1.5, 4.5, 9.5, 16.5])
If operand flags `"writeonly"` or `"readwrite"` are used the operands may
be views into the original data with the `WRITEBACKIFCOPY` flag. In this case
nditer must be used as a context manager or the nditer.close
method must be called before using the result. The temporary
data will be written back to the original data when the `__exit__`
function is called but not before:
>>> a = np.arange(6, dtype='i4')[::-2]
>>> with np.nditer(a, [],
... [['writeonly', 'updateifcopy']],
... casting='unsafe',
... op_dtypes=[np.dtype('f4')]) as i:
... x = i.operands[0]
... x[:] = [-1, -2, -3]
... # a still unchanged here
>>> a, x
(array([-1, -2, -3], dtype=int32), array([-1., -2., -3.], dtype=float32))
It is important to note that once the iterator is exited, dangling
references (like `x` in the example) may or may not share data with
the original data `a`. If writeback semantics were active, i.e. if
`x.base.flags.writebackifcopy` is `True`, then exiting the iterator
will sever the connection between `x` and `a`, writing to `x` will
no longer write to `a`. If writeback semantics are not active, then
`x.data` will still point at some part of `a.data`, and writing to
one will affect the other.
""")
# nditer methods
add_newdoc('numpy.core', 'nditer', ('copy',
"""
copy()
Get a copy of the iterator in its current state.
Examples
--------
>>> x = np.arange(10)
>>> y = x + 1
>>> it = np.nditer([x, y])
>>> next(it)
(array(0), array(1))
>>> it2 = it.copy()
>>> next(it2)
(array(1), array(2))
"""))
add_newdoc('numpy.core', 'nditer', ('operands',
"""
operands[`Slice`]
The array(s) to be iterated over. Valid only before the iterator is closed.
"""))
add_newdoc('numpy.core', 'nditer', ('debug_print',
"""
debug_print()
Print the current state of the `nditer` instance and debug info to stdout.
"""))
add_newdoc('numpy.core', 'nditer', ('enable_external_loop',
"""
enable_external_loop()
When the "external_loop" was not used during construction, but
is desired, this modifies the iterator to behave as if the flag
was specified.
"""))
add_newdoc('numpy.core', 'nditer', ('iternext',
"""
iternext()
Check whether iterations are left, and perform a single internal iteration
without returning the result. Used in the C-style pattern do-while
pattern. For an example, see `nditer`.
Returns
-------
iternext : bool
Whether or not there are iterations left.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_axis',
"""
remove_axis(i)
Removes axis `i` from the iterator. Requires that the flag "multi_index"
be enabled.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_multi_index',
"""
remove_multi_index()
When the "multi_index" flag was specified, this removes it, allowing
the internal iteration structure to be optimized further.
"""))
add_newdoc('numpy.core', 'nditer', ('reset',
"""
reset()
Reset the iterator to its initial state.
"""))
add_newdoc('numpy.core', 'nested_iters',
"""
Create nditers for use in nested loops
Create a tuple of `nditer` objects which iterate in nested loops over
different axes of the op argument. The first iterator is used in the
outermost loop, the last in the innermost loop. Advancing one will change
the subsequent iterators to point at its new element.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
axes : list of list of int
Each item is used as an "op_axes" argument to an nditer
flags, op_flags, op_dtypes, order, casting, buffersize (optional)
See `nditer` parameters of the same name
Returns
-------
iters : tuple of nditer
An nditer for each item in `axes`, outermost first
See Also
--------
nditer
Examples
--------
Basic usage. Note how y is the "flattened" version of
[a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified
the first iter's axes as [1]
>>> a = np.arange(12).reshape(2, 3, 2)
>>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"])
>>> for x in i:
... print(i.multi_index)
... for y in j:
... print('', j.multi_index, y)
(0,)
(0, 0) 0
(0, 1) 1
(1, 0) 6
(1, 1) 7
(1,)
(0, 0) 2
(0, 1) 3
(1, 0) 8
(1, 1) 9
(2,)
(0, 0) 4
(0, 1) 5
(1, 0) 10
(1, 1) 11
""")
add_newdoc('numpy.core', 'nditer', ('close',
"""
close()
Resolve all writeback semantics in writeable operands.
See Also
--------
:ref:`nditer-context-manager`
"""))
###############################################################################
#
# broadcast
#
###############################################################################
add_newdoc('numpy.core', 'broadcast',
"""
Produce an object that mimics broadcasting.
Parameters
----------
in1, in2, ... : array_like
Input parameters.
Returns
-------
b : broadcast object
Broadcast the input parameters against one another, and
return an object that encapsulates the result.
Amongst others, it has ``shape`` and ``nd`` properties, and
may be used as an iterator.
See Also
--------
broadcast_arrays
broadcast_to
Examples
--------
Manually adding two vectors, using broadcasting:
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> out = np.empty(b.shape)
>>> out.flat = [u+v for (u,v) in b]
>>> out
array([[5., 6., 7.],
[6., 7., 8.],
[7., 8., 9.]])
Compare against built-in broadcasting:
>>> x + y
array([[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
""")
# attributes
add_newdoc('numpy.core', 'broadcast', ('index',
"""
current index in broadcasted result
Examples
--------
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> next(b), next(b), next(b)
((1, 4), (1, 5), (1, 6))
>>> b.index
3
"""))
add_newdoc('numpy.core', 'broadcast', ('iters',
"""
tuple of iterators along ``self``'s "components."
Returns a tuple of `numpy.flatiter` objects, one for each "component"
of ``self``.
See Also
--------
numpy.flatiter
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> row, col = b.iters
>>> row.next(), col.next()
(1, 4)
"""))
add_newdoc('numpy.core', 'broadcast', ('ndim',
"""
Number of dimensions of broadcasted result. Alias for `nd`.
.. versionadded:: 1.12.0
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.ndim
2
"""))
add_newdoc('numpy.core', 'broadcast', ('nd',
"""
Number of dimensions of broadcasted result. For code intended for NumPy
1.12.0 and later the more consistent `ndim` is preferred.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.nd
2
"""))
add_newdoc('numpy.core', 'broadcast', ('numiter',
"""
Number of iterators possessed by the broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.numiter
2
"""))
add_newdoc('numpy.core', 'broadcast', ('shape',
"""
Shape of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.shape
(3, 3)
"""))
add_newdoc('numpy.core', 'broadcast', ('size',
"""
Total size of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.size
9
"""))
add_newdoc('numpy.core', 'broadcast', ('reset',
"""
reset()
Reset the broadcasted result's iterator(s).
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> next(b), next(b), next(b)
((1, 4), (2, 4), (3, 4))
>>> b.index
3
>>> b.reset()
>>> b.index
0
"""))
###############################################################################
#
# numpy functions
#
###############################################################################
add_newdoc('numpy.core.multiarray', 'array',
"""
array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0)
Create an array.
Parameters
----------
object : array_like
An array, any object exposing the array interface, an object whose
__array__ method returns an array, or any (nested) sequence.
dtype : data-type, optional
The desired data-type for the array. If not given, then the type will
be determined as the minimum type required to hold the objects in the
sequence. This argument can only be used to 'upcast' the array. For
downcasting, use the .astype(t) method.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy will
only be made if __array__ returns a copy, if obj is a nested sequence,
or if a copy is needed to satisfy any of the other requirements
(`dtype`, `order`, etc.).
order : {'K', 'A', 'C', 'F'}, optional
Specify the memory layout of the array. If object is not an array, the
newly created array will be in C order (row major) unless 'F' is
specified, in which case it will be in Fortran order (column major).
If object is an array the following holds.
===== ========= ===================================================
order no copy copy=True
===== ========= ===================================================
'K' unchanged F & C order preserved, otherwise most similar order
'A' unchanged F order if input is F and not C, otherwise C order
'C' C order C order
'F' F order F order
===== ========= ===================================================
When ``copy=False`` and a copy is made for other reasons, the result is
the same as if ``copy=True``, with some exceptions for `A`, see the
Notes section. The default order is 'K'.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting
array should have. Ones will be pre-pended to the shape as
needed to meet this requirement.
Returns
-------
out : ndarray
An array object satisfying the specified requirements.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
full : Return a new array of given shape filled with value.
Notes
-----
When order is 'A' and `object` is an array in neither 'C' nor 'F' order,
and a copy is forced by a change in dtype, then the order of the result is
not necessarily 'C' as expected. This is likely a bug.
Examples
--------
>>> np.array([1, 2, 3])
array([1, 2, 3])
Upcasting:
>>> np.array([1, 2, 3.0])
array([ 1., 2., 3.])
More than one dimension:
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
Minimum dimensions 2:
>>> np.array([1, 2, 3], ndmin=2)
array([[1, 2, 3]])
Type provided:
>>> np.array([1, 2, 3], dtype=complex)
array([ 1.+0.j, 2.+0.j, 3.+0.j])
Data-type consisting of more than one element:
>>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')])
>>> x['a']
array([1, 3])
Creating an array from sub-classes:
>>> np.array(np.mat('1 2; 3 4'))
array([[1, 2],
[3, 4]])
>>> np.array(np.mat('1 2; 3 4'), subok=True)
matrix([[1, 2],
[3, 4]])
""")
add_newdoc('numpy.core.multiarray', 'empty',
"""
empty(shape, dtype=float, order='C')
Return a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
Desired output data-type for the array, e.g, `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional, default: 'C'
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data of the given shape, dtype, and
order. Object arrays will be initialized to None.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
full : Return a new array of given shape filled with value.
Notes
-----
`empty`, unlike `zeros`, does not set the array values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> np.empty([2, 2])
array([[ -9.74499359e+001, 6.69583040e-309],
[ 2.13182611e-314, 3.06959433e-309]]) #random
>>> np.empty([2, 2], dtype=int)
array([[-1073741821, -1067949133],
[ 496041986, 19249760]]) #random
""")
add_newdoc('numpy.core.multiarray', 'scalar',
"""
scalar(dtype, obj)
Return a new scalar array of the given type initialized with obj.
This function is meant mainly for pickle support. `dtype` must be a
valid data-type descriptor. If `dtype` corresponds to an object
descriptor, then `obj` can be any object, otherwise `obj` must be a
string. If `obj` is not given, it will be interpreted as None for object
type and as zeros for all other types.
""")
add_newdoc('numpy.core.multiarray', 'zeros',
"""
zeros(shape, dtype=float, order='C')
Return a new array of given shape and type, filled with zeros.
Parameters
----------
shape : int or tuple of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional, default: 'C'
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and order.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
full : Return a new array of given shape filled with value.
Examples
--------
>>> np.zeros(5)
array([ 0., 0., 0., 0., 0.])
>>> np.zeros((5,), dtype=int)
array([0, 0, 0, 0, 0])
>>> np.zeros((2, 1))
array([[ 0.],
[ 0.]])
>>> s = (2,2)
>>> np.zeros(s)
array([[ 0., 0.],
[ 0., 0.]])
>>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype
array([(0, 0), (0, 0)],
dtype=[('x', '<i4'), ('y', '<i4')])
""")
add_newdoc('numpy.core.multiarray', 'set_typeDict',
"""set_typeDict(dict)
Set the internal dictionary that can look up an array type using a
registered code.
""")
add_newdoc('numpy.core.multiarray', 'fromstring',
"""
fromstring(string, dtype=float, count=-1, sep='')
A new 1-D array initialized from text data in a string.
Parameters
----------
string : str
A string containing the data.
dtype : data-type, optional
The data type of the array; default: float. For binary input data,
the data must be in exactly this format.
count : int, optional
Read this number of `dtype` elements from the data. If this is
negative (the default), the count will be determined from the
length of the data.
sep : str, optional
The string separating numbers in the data; extra whitespace between
elements is also ignored.
.. deprecated:: 1.14
If this argument is not provided, `fromstring` falls back on the
behaviour of `frombuffer` after encoding unicode string inputs as
either utf-8 (python 3), or the default encoding (python 2).
Returns
-------
arr : ndarray
The constructed array.
Raises
------
ValueError
If the string is not the correct size to satisfy the requested
`dtype` and `count`.
See Also
--------
frombuffer, fromfile, fromiter
Examples
--------
>>> np.fromstring('1 2', dtype=int, sep=' ')
array([1, 2])
>>> np.fromstring('1, 2', dtype=int, sep=',')
array([1, 2])
""")
add_newdoc('numpy.core.multiarray', 'compare_chararrays',
"""
compare_chararrays(a, b, cmp_op, rstrip)
Performs element-wise comparison of two string arrays using the
comparison operator specified by `cmp_op`.
Parameters
----------
a, b : array_like
Arrays to be compared.
cmp_op : {"<", "<=", "==", ">=", ">", "!="}
Type of comparison.
rstrip : Boolean
If True, the spaces at the end of Strings are removed before the comparison.
Returns
-------
out : ndarray
The output array of type Boolean with the same shape as a and b.
Raises
------
ValueError
If `cmp_op` is not valid.
TypeError
If at least one of `a` or `b` is a non-string array
Examples
--------
>>> a = np.array(["a", "b", "cde"])
>>> b = np.array(["a", "a", "dec"])
>>> np.compare_chararrays(a, b, ">", True)
array([False, True, False])
""")
add_newdoc('numpy.core.multiarray', 'fromiter',
"""
fromiter(iterable, dtype, count=-1)
Create a new 1-dimensional array from an iterable object.
Parameters
----------
iterable : iterable object
An iterable object providing data for the array.
dtype : data-type
The data-type of the returned array.
count : int, optional
The number of items to read from *iterable*. The default is -1,
which means all data is read.
Returns
-------
out : ndarray
The output array.
Notes
-----
Specify `count` to improve performance. It allows ``fromiter`` to
pre-allocate the output array, instead of resizing it on demand.
Examples
--------
>>> iterable = (x*x for x in range(5))
>>> np.fromiter(iterable, float)
array([ 0., 1., 4., 9., 16.])
""")
add_newdoc('numpy.core.multiarray', 'fromfile',
"""
fromfile(file, dtype=float, count=-1, sep='')
Construct an array from data in a text or binary file.
A highly efficient way of reading binary data with a known data-type,
as well as parsing simply formatted text files. Data written using the
`tofile` method can be read using this function.
Parameters
----------
file : file or str
Open file object or filename.
dtype : data-type
Data type of the returned array.
For binary files, it is used to determine the size and byte-order
of the items in the file.
count : int
Number of items to read. ``-1`` means all items (i.e., the complete
file).
sep : str
Separator between items if file is a text file.
Empty ("") separator means the file should be treated as binary.
Spaces (" ") in the separator match zero or more whitespace characters.
A separator consisting only of spaces must match at least one
whitespace.
See also
--------
load, save
ndarray.tofile
loadtxt : More flexible way of loading data from a text file.
Notes
-----
Do not rely on the combination of `tofile` and `fromfile` for
data storage, as the binary files generated are are not platform
independent. In particular, no byte-order or data-type information is
saved. Data can be stored in the platform independent ``.npy`` format
using `save` and `load` instead.
Examples
--------
Construct an ndarray:
>>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]),
... ('temp', float)])
>>> x = np.zeros((1,), dtype=dt)
>>> x['time']['min'] = 10; x['temp'] = 98.25
>>> x
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
Save the raw data to disk:
>>> import tempfile
>>> fname = tempfile.mkstemp()[1]
>>> x.tofile(fname)
Read the raw data from disk:
>>> np.fromfile(fname, dtype=dt)
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
The recommended way to store and load data:
>>> np.save(fname, x)
>>> np.load(fname + '.npy')
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
""")
add_newdoc('numpy.core.multiarray', 'frombuffer',
"""
frombuffer(buffer, dtype=float, count=-1, offset=0)
Interpret a buffer as a 1-dimensional array.
Parameters
----------
buffer : buffer_like
An object that exposes the buffer interface.
dtype : data-type, optional
Data-type of the returned array; default: float.
count : int, optional
Number of items to read. ``-1`` means all data in the buffer.
offset : int, optional
Start reading the buffer from this offset (in bytes); default: 0.
Notes
-----
If the buffer has data that is not in machine byte-order, this should
be specified as part of the data-type, e.g.::
>>> dt = np.dtype(int)
>>> dt = dt.newbyteorder('>')
>>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP
The data of the resulting array will not be byteswapped, but will be
interpreted correctly.
Examples
--------
>>> s = b'hello world'
>>> np.frombuffer(s, dtype='S1', count=5, offset=6)
array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1')
>>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8)
array([1, 2], dtype=uint8)
>>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3)
array([1, 2, 3], dtype=uint8)
""")
add_newdoc('numpy.core', 'fastCopyAndTranspose',
"""_fastCopyAndTranspose(a)""")
add_newdoc('numpy.core.multiarray', 'correlate',
"""cross_correlate(a,v, mode=0)""")
add_newdoc('numpy.core.multiarray', 'arange',
"""
arange([start,] stop[, step,], dtype=None)
Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range` function, but returns an ndarray rather than a list.
When using a non-integer step, such as 0.1, the results will often not
be consistent. It is better to use `numpy.linspace` for these cases.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified as a position argument,
`start` must also be given.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
See Also
--------
linspace : Evenly spaced numbers with careful handling of endpoints.
ogrid: Arrays of evenly spaced numbers in N-dimensions.
mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions.
Examples
--------
>>> np.arange(3)
array([0, 1, 2])
>>> np.arange(3.0)
array([ 0., 1., 2.])
>>> np.arange(3,7)
array([3, 4, 5, 6])
>>> np.arange(3,7,2)
array([3, 5])
""")
add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version',
"""_get_ndarray_c_version()
Return the compile time NDARRAY_VERSION number.
""")
add_newdoc('numpy.core.multiarray', '_reconstruct',
"""_reconstruct(subtype, shape, dtype)
Construct an empty array. Used by Pickles.
""")
add_newdoc('numpy.core.multiarray', 'set_string_function',
"""
set_string_function(f, repr=1)
Internal method to set a function to be used when pretty printing arrays.
""")
add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
"""
set_numeric_ops(op1=func1, op2=func2, ...)
Set numerical operators for array objects.
.. deprecated:: 1.16
For the general case, use :c:func:`PyUFunc_ReplaceLoopBySignature`.
For ndarray subclasses, define the ``__array_ufunc__`` method and
override the relevant ufunc.
Parameters
----------
op1, op2, ... : callable
Each ``op = func`` pair describes an operator to be replaced.
For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace
addition by modulus 5 addition.
Returns
-------
saved_ops : list of callables
A list of all operators, stored before making replacements.
Notes
-----
.. WARNING::
Use with care! Incorrect usage may lead to memory errors.
A function replacing an operator cannot make use of that operator.
For example, when replacing add, you may not use ``+``. Instead,
directly call ufuncs.
Examples
--------
>>> def add_mod5(x, y):
... return np.add(x, y) % 5
...
>>> old_funcs = np.set_numeric_ops(add=add_mod5)
>>> x = np.arange(12).reshape((3, 4))
>>> x + x
array([[0, 2, 4, 1],
[3, 0, 2, 4],
[1, 3, 0, 2]])
>>> ignore = np.set_numeric_ops(**old_funcs) # restore operators
""")
add_newdoc('numpy.core.multiarray', 'promote_types',
"""
promote_types(type1, type2)
Returns the data type with the smallest size and smallest scalar
kind to which both ``type1`` and ``type2`` may be safely cast.
The returned data type is always in native byte order.
This function is symmetric, but rarely associative.
Parameters
----------
type1 : dtype or dtype specifier
First data type.
type2 : dtype or dtype specifier
Second data type.
Returns
-------
out : dtype
The promoted data type.
Notes
-----
.. versionadded:: 1.6.0
Starting in NumPy 1.9, promote_types function now returns a valid string
length when given an integer or float dtype as one argument and a string
dtype as another argument. Previously it always returned the input string
dtype, even if it wasn't long enough to store the max integer/float value
converted to a string.
See Also
--------
result_type, dtype, can_cast
Examples
--------
>>> np.promote_types('f4', 'f8')
dtype('float64')
>>> np.promote_types('i8', 'f4')
dtype('float64')
>>> np.promote_types('>i8', '<c8')
dtype('complex128')
>>> np.promote_types('i4', 'S8')
dtype('S11')
An example of a non-associative case:
>>> p = np.promote_types
>>> p('S', p('i1', 'u1'))
dtype('S6')
>>> p(p('S', 'i1'), 'u1')
dtype('S4')
""")
add_newdoc('numpy.core.multiarray', 'newbuffer',
"""
newbuffer(size)
Return a new uninitialized buffer object.
Parameters
----------
size : int
Size in bytes of returned buffer object.
Returns
-------
newbuffer : buffer object
Returned, uninitialized buffer object of `size` bytes.
""")
add_newdoc('numpy.core.multiarray', 'getbuffer',
"""
getbuffer(obj [,offset[, size]])
Create a buffer object from the given object referencing a slice of
length size starting at offset.
Default is the entire buffer. A read-write buffer is attempted followed
by a read-only buffer.
Parameters
----------
obj : object
offset : int, optional
size : int, optional
Returns
-------
buffer_obj : buffer
Examples
--------
>>> buf = np.getbuffer(np.ones(5), 1, 3)
>>> len(buf)
3
>>> buf[0]
'\\x00'
>>> buf
<read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0>
""")
add_newdoc('numpy.core.multiarray', 'c_einsum',
"""
c_einsum(subscripts, *operands, out=None, dtype=None, order='K',
casting='safe')
*This documentation shadows that of the native python implementation of the `einsum` function,
except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.*
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional,
linear algebraic array operations can be represented in a simple fashion.
In *implicit* mode `einsum` computes these values.
In *explicit* mode, `einsum` provides further flexibility to compute
other array operations that might not be considered classical Einstein
summation operations, by disabling, or forcing summation over specified
subscript labels.
See the notes and examples for clarification.
Parameters
----------
subscripts : str
Specifies the subscripts for summation as comma separated list of
subscript labels. An implicit (classical Einstein summation)
calculation is performed unless the explicit indicator '->' is
included as well as subscript labels of the precise output form.
operands : list of array_like
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
dtype : {data-type, None}, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions. Default is None.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout as the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Default is 'safe'.
optimize : {False, True, 'greedy', 'optimal'}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False and True will default to the 'greedy' algorithm.
Also accepts an explicit contraction list from the ``np.einsum_path``
function. See ``np.einsum_path`` for more details. Defaults to False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
Notes
-----
.. versionadded:: 1.6.0
The Einstein summation convention can be used to compute
many multi-dimensional, linear algebraic array operations. `einsum`
provides a succinct way of representing these.
A non-exhaustive list of these operations,
which can be computed by `einsum`, is shown below along with examples:
* Trace of an array, :py:func:`numpy.trace`.
* Return a diagonal, :py:func:`numpy.diag`.
* Array axis summations, :py:func:`numpy.sum`.
* Transpositions and permutations, :py:func:`numpy.transpose`.
* Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
* Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
* Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
* Tensor contractions, :py:func:`numpy.tensordot`.
* Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
describes traditional matrix multiplication and is equivalent to
:py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
to :py:func:`np.trace(a) <numpy.trace>`.
In *implicit mode*, the chosen subscripts are important
since the axes of the output are reordered alphabetically. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose. Additionally,
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
``np.einsum('ij,jh', a, b)`` returns the transpose of the
multiplication since subscript 'h' precedes subscript 'i'.
In *explicit mode* the output can be directly controlled by
specifying output subscript labels. This requires the
identifier '->' as well as the list of output subscript labels.
This feature increases the flexibility of the function since
summing can be disabled or forced when required. The call
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`,
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`.
The difference is that `einsum` does not allow broadcasting by default.
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
order of the output subscript labels and therefore returns matrix
multiplication, unlike the example above in implicit mode.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, one can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view (changed in version 1.10.0).
`einsum` also provides an alternative way to provide the subscripts
and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
If the output shape is not provided in this format `einsum` will be
calculated in implicit mode, otherwise it will be performed explicitly.
The examples below have corresponding `einsum` calls with the two
parameter methods.
.. versionadded:: 1.10.0
Views returned from einsum are now writeable whenever the input array
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>`
and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
of a 2D array.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
Trace of a matrix:
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
Extract the diagonal (requires explicit form):
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
Sum over an axis (requires explicit form):
>>> np.einsum('ij->i', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [0,1], [0])
array([ 10, 35, 60, 85, 110])
>>> np.sum(a, axis=1)
array([ 10, 35, 60, 85, 110])
For higher dimensional arrays summing a single axis can be done with ellipsis:
>>> np.einsum('...j->...', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [Ellipsis,1], [Ellipsis])
array([ 10, 35, 60, 85, 110])
Compute a matrix transpose, or reorder any number of axes:
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('ij->ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.transpose(c)
array([[0, 3],
[1, 4],
[2, 5]])
Vector inner products:
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
Matrix vector multiplication:
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('...j,j', a, b)
array([ 30, 80, 130, 180, 230])
Broadcasting and scalar multiplication:
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(',ij', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
Vector outer product:
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
Tensor contraction:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
Writeable returned arrays (since version 1.10.0):
>>> a = np.zeros((3, 3))
>>> np.einsum('ii->i', a)[:] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Example of ellipsis use:
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('k...,jk', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
""")
##############################################################################
#
# Documentation for ndarray attributes and methods
#
##############################################################################
##############################################################################
#
# ndarray object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray',
"""
ndarray(shape, dtype=float, buffer=None, offset=0,
strides=None, order=None)
An array object represents a multidimensional, homogeneous array
of fixed-size items. An associated data-type object describes the
format of each element in the array (its byte-order, how many bytes it
occupies in memory, whether it is an integer, a floating point number,
or something else, etc.)
Arrays should be constructed using `array`, `zeros` or `empty` (refer
to the See Also section below). The parameters given here refer to
a low-level method (`ndarray(...)`) for instantiating an array.
For more information, refer to the `numpy` module and examine the
methods and attributes of an array.
Parameters
----------
(for the __new__ method; see Notes below)
shape : tuple of ints
Shape of created array.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type.
buffer : object exposing buffer interface, optional
Used to fill the array with data.
offset : int, optional
Offset of array data in buffer.
strides : tuple of ints, optional
Strides of data in memory.
order : {'C', 'F'}, optional
Row-major (C-style) or column-major (Fortran-style) order.
Attributes
----------
T : ndarray
Transpose of the array.
data : buffer
The array's elements, in memory.
dtype : dtype object
Describes the format of the elements in the array.
flags : dict
Dictionary containing information related to memory use, e.g.,
'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
flat : numpy.flatiter object
Flattened version of the array as an iterator. The iterator
allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for
assignment examples; TODO).
imag : ndarray
Imaginary part of the array.
real : ndarray
Real part of the array.
size : int
Number of elements in the array.
itemsize : int
The memory use of each array element in bytes.
nbytes : int
The total number of bytes required to store the array data,
i.e., ``itemsize * size``.
ndim : int
The array's number of dimensions.
shape : tuple of ints
Shape of the array.
strides : tuple of ints
The step-size required to move from one element to the next in
memory. For example, a contiguous ``(3, 4)`` array of type
``int16`` in C-order has strides ``(8, 2)``. This implies that
to move from element to element in memory requires jumps of 2 bytes.
To move from row-to-row, one needs to jump 8 bytes at a time
(``2 * 4``).
ctypes : ctypes object
Class containing properties of the array needed for interaction
with ctypes.
base : ndarray
If the array is a view into another array, that array is its `base`
(unless that array is also a view). The `base` array is where the
array data is actually stored.
See Also
--------
array : Construct an array.
zeros : Create an array, each element of which is zero.
empty : Create an array, but leave its allocated memory unchanged (i.e.,
it contains "garbage").
dtype : Create a data-type.
Notes
-----
There are two modes of creating an array using ``__new__``:
1. If `buffer` is None, then only `shape`, `dtype`, and `order`
are used.
2. If `buffer` is an object exposing the buffer interface, then
all keywords are interpreted.
No ``__init__`` method is needed because the array is fully initialized
after the ``__new__`` method.
Examples
--------
These examples illustrate the low-level `ndarray` constructor. Refer
to the `See Also` section above for easier ways of constructing an
ndarray.
First mode, `buffer` is None:
>>> np.ndarray(shape=(2,2), dtype=float, order='F')
array([[0.0e+000, 0.0e+000], # random
[ nan, 2.5e-323]])
Second mode:
>>> np.ndarray((2,), buffer=np.array([1,2,3]),
... offset=np.int_().itemsize,
... dtype=int) # offset = 1*itemsize, i.e. skip first element
array([2, 3])
""")
##############################################################################
#
# ndarray attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__',
"""Array protocol: Python side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
"""None."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__',
"""Array priority."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
"""Array protocol: C-struct side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_',
"""Allow the array to be interpreted as a ctypes object by returning the
data-memory location as an integer
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
"""
Base object if memory is from some other object.
Examples
--------
The base of an array that owns its memory is None:
>>> x = np.array([1,2,3,4])
>>> x.base is None
True
Slicing creates a view, whose memory is shared with x:
>>> y = x[2:]
>>> y.base is x
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
"""
An object to simplify the interaction of the array with the ctypes
module.
This attribute creates an object that makes it easier to use arrays
when calling shared libraries with the ctypes module. The returned
object has, among others, data, shape, and strides attributes (see
Notes below) which themselves return ctypes objects that can be used
as arguments to a shared library.
Parameters
----------
None
Returns
-------
c : Python object
Possessing attributes data, shape, strides, etc.
See Also
--------
numpy.ctypeslib
Notes
-----
Below are the public attributes of this object which were documented
in "Guide to NumPy" (we have omitted undocumented public attributes,
as well as documented private attributes):
.. autoattribute:: numpy.core._internal._ctypes.data
:noindex:
.. autoattribute:: numpy.core._internal._ctypes.shape
:noindex:
.. autoattribute:: numpy.core._internal._ctypes.strides
:noindex:
.. automethod:: numpy.core._internal._ctypes.data_as
:noindex:
.. automethod:: numpy.core._internal._ctypes.shape_as
:noindex:
.. automethod:: numpy.core._internal._ctypes.strides_as
:noindex:
If the ctypes module is not available, then the ctypes attribute
of array objects still returns something useful, but ctypes objects
are not returned and errors may be raised instead. In particular,
the object will still have the ``as_parameter`` attribute which will
return an integer equal to the data attribute.
Examples
--------
>>> import ctypes
>>> x
array([[0, 1],
[2, 3]])
>>> x.ctypes.data
30439712
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long))
<ctypes.LP_c_long object at 0x01F01300>
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents
c_long(0)
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents
c_longlong(4294967296L)
>>> x.ctypes.shape
<numpy.core._internal.c_long_Array_2 object at 0x01FFD580>
>>> x.ctypes.shape_as(ctypes.c_long)
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides_as(ctypes.c_longlong)
<numpy.core._internal.c_longlong_Array_2 object at 0x01F01300>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('data',
"""Python buffer object pointing to the start of the array's data."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
"""
Data-type of the array's elements.
Parameters
----------
None
Returns
-------
d : numpy dtype object
See Also
--------
numpy.dtype
Examples
--------
>>> x
array([[0, 1],
[2, 3]])
>>> x.dtype
dtype('int32')
>>> type(x.dtype)
<type 'numpy.dtype'>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('imag',
"""
The imaginary part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.imag
array([ 0. , 0.70710678])
>>> x.imag.dtype
dtype('float64')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize',
"""
Length of one array element in bytes.
Examples
--------
>>> x = np.array([1,2,3], dtype=np.float64)
>>> x.itemsize
8
>>> x = np.array([1,2,3], dtype=np.complex128)
>>> x.itemsize
16
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
"""
Information about the memory layout of the array.
Attributes
----------
C_CONTIGUOUS (C)
The data is in a single, C-style contiguous segment.
F_CONTIGUOUS (F)
The data is in a single, Fortran-style contiguous segment.
OWNDATA (O)
The array owns the memory it uses or borrows it from another object.
WRITEABLE (W)
The data area can be written to. Setting this to False locks
the data, making it read-only. A view (slice, etc.) inherits WRITEABLE
from its base array at creation time, but a view of a writeable
array may be subsequently locked while the base array remains writeable.
(The opposite is not true, in that a view of a locked array may not
be made writeable. However, currently, locking a base object does not
lock any views that already reference it, so under that circumstance it
is possible to alter the contents of a locked array via a previously
created writeable view onto it.) Attempting to change a non-writeable
array raises a RuntimeError exception.
ALIGNED (A)
The data and all elements are aligned appropriately for the hardware.
WRITEBACKIFCOPY (X)
This array is a copy of some other array. The C-API function
PyArray_ResolveWritebackIfCopy must be called before deallocating
to the base array will be updated with the contents of this array.
UPDATEIFCOPY (U)
(Deprecated, use WRITEBACKIFCOPY) This array is a copy of some other array.
When this array is
deallocated, the base array will be updated with the contents of
this array.
FNC
F_CONTIGUOUS and not C_CONTIGUOUS.
FORC
F_CONTIGUOUS or C_CONTIGUOUS (one-segment test).
BEHAVED (B)
ALIGNED and WRITEABLE.
CARRAY (CA)
BEHAVED and C_CONTIGUOUS.
FARRAY (FA)
BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS.
Notes
-----
The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``),
or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag
names are only supported in dictionary access.
Only the WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be
changed by the user, via direct assignment to the attribute or dictionary
entry, or by calling `ndarray.setflags`.
The array flags cannot be set arbitrarily:
- UPDATEIFCOPY can only be set ``False``.
- WRITEBACKIFCOPY can only be set ``False``.
- ALIGNED can only be set ``True`` if the data is truly aligned.
- WRITEABLE can only be set ``True`` if the array owns its own memory
or the ultimate owner of the memory exposes a writeable buffer
interface or is a string.
Arrays can be both C-style and Fortran-style contiguous simultaneously.
This is clear for 1-dimensional arrays, but can also be true for higher
dimensional arrays.
Even for contiguous arrays a stride for a given dimension
``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
or the array has no elements.
It does *not* generally hold that ``self.strides[-1] == self.itemsize``
for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
Fortran-style contiguous arrays is true.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flat',
"""
A 1-D iterator over the array.
This is a `numpy.flatiter` instance, which acts similarly to, but is not
a subclass of, Python's built-in iterator object.
See Also
--------
flatten : Return a copy of the array collapsed into one dimension.
flatiter
Examples
--------
>>> x = np.arange(1, 7).reshape(2, 3)
>>> x
array([[1, 2, 3],
[4, 5, 6]])
>>> x.flat[3]
4
>>> x.T
array([[1, 4],
[2, 5],
[3, 6]])
>>> x.T.flat[3]
5
>>> type(x.flat)
<class 'numpy.flatiter'>
An assignment example:
>>> x.flat = 3; x
array([[3, 3, 3],
[3, 3, 3]])
>>> x.flat[[1,4]] = 1; x
array([[3, 1, 3],
[3, 1, 3]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes',
"""
Total bytes consumed by the elements of the array.
Notes
-----
Does not include memory consumed by non-element attributes of the
array object.
Examples
--------
>>> x = np.zeros((3,5,2), dtype=np.complex128)
>>> x.nbytes
480
>>> np.prod(x.shape) * x.itemsize
480
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim',
"""
Number of array dimensions.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> x.ndim
1
>>> y = np.zeros((2, 3, 4))
>>> y.ndim
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('real',
"""
The real part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.real
array([ 1. , 0.70710678])
>>> x.real.dtype
dtype('float64')
See Also
--------
numpy.real : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
"""
Tuple of array dimensions.
The shape property is usually used to get the current shape of an array,
but may also be used to reshape the array in-place by assigning a tuple of
array dimensions to it. As with `numpy.reshape`, one of the new shape
dimensions can be -1, in which case its value is inferred from the size of
the array and the remaining dimensions. Reshaping an array in-place will
fail if a copy is required.
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> x.shape
(4,)
>>> y = np.zeros((2, 3, 4))
>>> y.shape
(2, 3, 4)
>>> y.shape = (3, 8)
>>> y
array([[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> y.shape = (3, 6)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: total size of new array must be unchanged
>>> np.zeros((4,2))[::2].shape = (-1,)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: incompatible shape for a non-contiguous array
See Also
--------
numpy.reshape : similar function
ndarray.reshape : similar method
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
"""
Number of elements in the array.
Equal to ``np.prod(a.shape)``, i.e., the product of the array's
dimensions.
Notes
-----
`a.size` returns a standard arbitrary precision Python integer. This
may not be the case with other methods of obtaining the same value
(like the suggested ``np.prod(a.shape)``, which returns an instance
of ``np.int_``), and may be relevant if the value is used further in
calculations that may overflow a fixed size integer type.
Examples
--------
>>> x = np.zeros((3, 5, 2), dtype=np.complex128)
>>> x.size
30
>>> np.prod(x.shape)
30
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
"""
Tuple of bytes to step in each dimension when traversing an array.
The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a`
is::
offset = sum(np.array(i) * a.strides)
A more detailed explanation of strides can be found in the
"ndarray.rst" file in the NumPy reference guide.
Notes
-----
Imagine an array of 32-bit integers (each 4 bytes)::
x = np.array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]], dtype=np.int32)
This array is stored in memory as 40 bytes, one after the other
(known as a contiguous block of memory). The strides of an array tell
us how many bytes we have to skip in memory to move to the next position
along a certain axis. For example, we have to skip 4 bytes (1 value) to
move to the next column, but 20 bytes (5 values) to get to the same
position in the next row. As such, the strides for the array `x` will be
``(20, 4)``.
See Also
--------
numpy.lib.stride_tricks.as_strided
Examples
--------
>>> y = np.reshape(np.arange(2*3*4), (2,3,4))
>>> y
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
>>> y.strides
(48, 16, 4)
>>> y[1,1,1]
17
>>> offset=sum(y.strides * np.array((1,1,1)))
>>> offset/y.itemsize
17
>>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0)
>>> x.strides
(32, 4, 224, 1344)
>>> i = np.array([3,5,2,2])
>>> offset = sum(i * x.strides)
>>> x[3,5,2,2]
813
>>> offset / x.itemsize
813
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
"""
The transposed array.
Same as ``self.transpose()``.
Examples
--------
>>> x = np.array([[1.,2.],[3.,4.]])
>>> x
array([[ 1., 2.],
[ 3., 4.]])
>>> x.T
array([[ 1., 3.],
[ 2., 4.]])
>>> x = np.array([1.,2.,3.,4.])
>>> x
array([ 1., 2., 3., 4.])
>>> x.T
array([ 1., 2., 3., 4.])
See Also
--------
transpose
"""))
##############################################################################
#
# ndarray methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
""" a.__array__(|dtype) -> reference if type unchanged, copy otherwise.
Returns either a new reference to self if dtype is not given or a new array
of provided data type if dtype is different from the current dtype of the
array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__',
"""a.__array_prepare__(obj) -> Object of same type as ndarray object obj.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__',
"""a.__array_wrap__(obj) -> Object of same type as ndarray object a.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
"""a.__copy__()
Used if :func:`copy.copy` is called on an array. Returns a copy of the array.
Equivalent to ``a.copy(order='K')``.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
"""a.__deepcopy__(memo, /) -> Deep copy of array.
Used if :func:`copy.deepcopy` is called on an array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__',
"""a.__reduce__()
For pickling.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
"""a.__setstate__(state, /)
For unpickling.
The `state` argument must be a sequence that contains the following
elements:
Parameters
----------
version : int
optional pickle version. If omitted defaults to 0.
shape : tuple
dtype : data-type
isFortran : bool
rawdata : string or list
a binary string with the data (or a list if 'a' is an object array)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
"""
a.all(axis=None, out=None, keepdims=False)
Returns True if all elements evaluate to True.
Refer to `numpy.all` for full documentation.
See Also
--------
numpy.all : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('any',
"""
a.any(axis=None, out=None, keepdims=False)
Returns True if any of the elements of `a` evaluate to True.
Refer to `numpy.any` for full documentation.
See Also
--------
numpy.any : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax',
"""
a.argmax(axis=None, out=None)
Return indices of the maximum values along the given axis.
Refer to `numpy.argmax` for full documentation.
See Also
--------
numpy.argmax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
"""
a.argmin(axis=None, out=None)
Return indices of the minimum values along the given axis of `a`.
Refer to `numpy.argmin` for detailed documentation.
See Also
--------
numpy.argmin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
"""
a.argsort(axis=-1, kind=None, order=None)
Returns the indices that would sort this array.
Refer to `numpy.argsort` for full documentation.
See Also
--------
numpy.argsort : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition',
"""
a.argpartition(kth, axis=-1, kind='introselect', order=None)
Returns the indices that would partition this array.
Refer to `numpy.argpartition` for full documentation.
.. versionadded:: 1.8.0
See Also
--------
numpy.argpartition : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
"""
a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True)
Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through (default), otherwise
the returned array will be forced to be a base-class array.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to false, and the `dtype`, `order`, and `subok`
requirements are satisfied, the input array is returned instead
of a copy.
Returns
-------
arr_t : ndarray
Unless `copy` is False and the other conditions for returning the input
array are satisfied (see description for `copy` input parameter), `arr_t`
is a new array of the same shape as the input array, with dtype, order
given by `dtype`, `order`.
Notes
-----
Starting in NumPy 1.9, astype method now returns an error if the string
dtype to cast to is not long enough in 'safe' casting mode to hold the max
value of integer/float array that is being casted. Previously the casting
was allowed even if the result was truncated.
Raises
------
ComplexWarning
When casting from complex to float or int. To avoid this,
one should use ``a.real.astype(t)``.
Examples
--------
>>> x = np.array([1, 2, 2.5])
>>> x
array([1. , 2. , 2.5])
>>> x.astype(int)
array([1, 2, 2])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
"""
a.byteswap(inplace=False)
Swap the bytes of the array elements
Toggle between low-endian and big-endian data representation by
returning a byteswapped array, optionally swapped in-place.
Parameters
----------
inplace : bool, optional
If ``True``, swap bytes in-place, default is ``False``.
Returns
-------
out : ndarray
The byteswapped array. If `inplace` is ``True``, this is
a view to self.
Examples
--------
>>> A = np.array([1, 256, 8755], dtype=np.int16)
>>> list(map(hex, A))
['0x1', '0x100', '0x2233']
>>> A.byteswap(inplace=True)
array([ 256, 1, 13090], dtype=int16)
>>> list(map(hex, A))
['0x100', '0x1', '0x3322']
Arrays of strings are not swapped
>>> A = np.array(['ceg', 'fac'])
>>> A.byteswap()
Traceback (most recent call last):
...
UnicodeDecodeError: ...
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
"""
a.choose(choices, out=None, mode='raise')
Use an index array to construct a new array from a set of choices.
Refer to `numpy.choose` for full documentation.
See Also
--------
numpy.choose : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
"""
a.clip(min=None, max=None, out=None)
Return an array whose values are limited to ``[min, max]``.
One of max or min must be given.
Refer to `numpy.clip` for full documentation.
See Also
--------
numpy.clip : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('compress',
"""
a.compress(condition, axis=None, out=None)
Return selected slices of this array along given axis.
Refer to `numpy.compress` for full documentation.
See Also
--------
numpy.compress : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conj',
"""
a.conj()
Complex-conjugate all elements.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate',
"""
a.conjugate()
Return the complex conjugate, element-wise.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
"""
a.copy(order='C')
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :func:`numpy.copy` are very
similar, but have different default values for their order=
arguments.)
See also
--------
numpy.copy
numpy.copyto
Examples
--------
>>> x = np.array([[1,2,3],[4,5,6]], order='F')
>>> y = x.copy()
>>> x.fill(0)
>>> x
array([[0, 0, 0],
[0, 0, 0]])
>>> y
array([[1, 2, 3],
[4, 5, 6]])
>>> y.flags['C_CONTIGUOUS']
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod',
"""
a.cumprod(axis=None, dtype=None, out=None)
Return the cumulative product of the elements along the given axis.
Refer to `numpy.cumprod` for full documentation.
See Also
--------
numpy.cumprod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum',
"""
a.cumsum(axis=None, dtype=None, out=None)
Return the cumulative sum of the elements along the given axis.
Refer to `numpy.cumsum` for full documentation.
See Also
--------
numpy.cumsum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal',
"""
a.diagonal(offset=0, axis1=0, axis2=1)
Return specified diagonals. In NumPy 1.9 the returned array is a
read-only view instead of a copy as in previous NumPy versions. In
a future version the read-only restriction will be removed.
Refer to :func:`numpy.diagonal` for full documentation.
See Also
--------
numpy.diagonal : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dot',
"""
a.dot(b, out=None)
Dot product of two arrays.
Refer to `numpy.dot` for full documentation.
See Also
--------
numpy.dot : equivalent function
Examples
--------
>>> a = np.eye(2)
>>> b = np.ones((2, 2)) * 2
>>> a.dot(b)
array([[2., 2.],
[2., 2.]])
This array method can be conveniently chained:
>>> a.dot(b).dot(b)
array([[8., 8.],
[8., 8.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dump',
"""a.dump(file)
Dump a pickle of the array to the specified file.
The array can be read back with pickle.load or numpy.load.
Parameters
----------
file : str
A string naming the dump file.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
"""
a.dumps()
Returns the pickle of the array as a string.
pickle.loads or numpy.loads will convert the string back to an array.
Parameters
----------
None
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('fill',
"""
a.fill(value)
Fill the array with a scalar value.
Parameters
----------
value : scalar
All elements of `a` will be assigned this value.
Examples
--------
>>> a = np.array([1, 2])
>>> a.fill(0)
>>> a
array([0, 0])
>>> a = np.empty(2)
>>> a.fill(1)
>>> a
array([1., 1.])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten',
"""
a.flatten(order='C')
Return a copy of the array collapsed into one dimension.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order.
'F' means to flatten in column-major (Fortran-
style) order. 'A' means to flatten in column-major
order if `a` is Fortran *contiguous* in memory,
row-major order otherwise. 'K' means to flatten
`a` in the order the elements occur in memory.
The default is 'C'.
Returns
-------
y : ndarray
A copy of the input array, flattened to one dimension.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the array.
Examples
--------
>>> a = np.array([[1,2], [3,4]])
>>> a.flatten()
array([1, 2, 3, 4])
>>> a.flatten('F')
array([1, 3, 2, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield',
"""
a.getfield(dtype, offset=0)
Returns a field of the given array as a certain type.
A field is a view of the array data with a given data-type. The values in
the view are determined by the given type and the offset into the current
array in bytes. The offset needs to be such that the view dtype fits in the
array dtype; for example an array of dtype complex128 has 16-byte elements.
If taking a view with a 32-bit integer (4 bytes), the offset needs to be
between 0 and 12 bytes.
Parameters
----------
dtype : str or dtype
The data type of the view. The dtype size of the view can not be larger
than that of the array itself.
offset : int
Number of bytes to skip before beginning the element view.
Examples
--------
>>> x = np.diag([1.+1.j]*2)
>>> x[1, 1] = 2 + 4.j
>>> x
array([[1.+1.j, 0.+0.j],
[0.+0.j, 2.+4.j]])
>>> x.getfield(np.float64)
array([[1., 0.],
[0., 2.]])
By choosing an offset of 8 bytes we can select the complex part of the
array for our view:
>>> x.getfield(np.float64, offset=8)
array([[1., 0.],
[0., 4.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('item',
"""
a.item(*args)
Copy an element of an array to a standard Python scalar and return it.
Parameters
----------
\\*args : Arguments (variable number and type)
* none: in this case, the method only works for arrays
with one element (`a.size == 1`), which element is
copied into a standard Python scalar object and returned.
* int_type: this argument is interpreted as a flat index into
the array, specifying which element to copy and return.
* tuple of int_types: functions as does a single int_type argument,
except that the argument is interpreted as an nd-index into the
array.
Returns
-------
z : Standard Python scalar object
A copy of the specified element of the array as a suitable
Python scalar
Notes
-----
When the data type of `a` is longdouble or clongdouble, item() returns
a scalar array object because there is no available Python scalar that
would not lose information. Void arrays return a buffer object for item(),
unless fields are defined, in which case a tuple is returned.
`item` is very similar to a[args], except, instead of an array scalar,
a standard Python scalar is returned. This can be useful for speeding up
access to elements of the array and doing arithmetic on elements of the
array using Python's optimized math.
Examples
--------
>>> np.random.seed(123)
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[2, 2, 6],
[1, 3, 6],
[1, 0, 1]])
>>> x.item(3)
1
>>> x.item(7)
0
>>> x.item((0, 1))
2
>>> x.item((2, 2))
1
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset',
"""
a.itemset(*args)
Insert scalar into an array (scalar is cast to array's dtype, if possible)
There must be at least 1 argument, and define the last argument
as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster
than ``a[args] = item``. The item should be a scalar value and `args`
must select a single item in the array `a`.
Parameters
----------
\\*args : Arguments
If one argument: a scalar, only used in case `a` is of size 1.
If two arguments: the last argument is the value to be set
and must be a scalar, the first argument specifies a single array
element location. It is either an int or a tuple.
Notes
-----
Compared to indexing syntax, `itemset` provides some speed increase
for placing a scalar into a particular location in an `ndarray`,
if you must do this. However, generally this is discouraged:
among other problems, it complicates the appearance of the code.
Also, when using `itemset` (and `item`) inside a loop, be sure
to assign the methods to a local variable to avoid the attribute
look-up at each loop iteration.
Examples
--------
>>> np.random.seed(123)
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[2, 2, 6],
[1, 3, 6],
[1, 0, 1]])
>>> x.itemset(4, 0)
>>> x.itemset((2, 2), 9)
>>> x
array([[2, 2, 6],
[1, 0, 6],
[1, 0, 9]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
"""
a.max(axis=None, out=None, keepdims=False, initial=<no value>, where=True)
Return the maximum along a given axis.
Refer to `numpy.amax` for full documentation.
See Also
--------
numpy.amax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
"""
a.mean(axis=None, dtype=None, out=None, keepdims=False)
Returns the average of the array elements along given axis.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.mean : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""
a.min(axis=None, out=None, keepdims=False, initial=<no value>, where=True)
Return the minimum along a given axis.
Refer to `numpy.amin` for full documentation.
See Also
--------
numpy.amin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'shares_memory',
"""
shares_memory(a, b, max_work=None)
Determine if two arrays share memory
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem (maximum number
of candidate solutions to consider). The following special
values are recognized:
max_work=MAY_SHARE_EXACT (default)
The problem is solved exactly. In this case, the function returns
True only if there is an element shared between the arrays.
max_work=MAY_SHARE_BOUNDS
Only the memory bounds of a and b are checked.
Raises
------
numpy.TooHardError
Exceeded max_work.
Returns
-------
out : bool
See Also
--------
may_share_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
""")
add_newdoc('numpy.core.multiarray', 'may_share_memory',
"""
may_share_memory(a, b, max_work=None)
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem. See
`shares_memory` for details. Default for ``may_share_memory``
is to do a bounds check.
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
""")
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""
arr.newbyteorder(new_order='S')
Return the array with the same data viewed with a different byte order.
Equivalent to::
arr.view(arr.dtype.newbytorder(new_order))
Changes are also made in all fields and sub-arrays of the array data
type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_arr : array
New array object with the dtype reflecting given change to the
byte order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
"""
a.nonzero()
Return the indices of the elements that are non-zero.
Refer to `numpy.nonzero` for full documentation.
See Also
--------
numpy.nonzero : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
"""
a.prod(axis=None, dtype=None, out=None, keepdims=False, initial=1, where=True)
Return the product of the array elements over the given axis
Refer to `numpy.prod` for full documentation.
See Also
--------
numpy.prod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp',
"""
a.ptp(axis=None, out=None, keepdims=False)
Peak to peak (maximum - minimum) value along a given axis.
Refer to `numpy.ptp` for full documentation.
See Also
--------
numpy.ptp : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
"""
a.put(indices, values, mode='raise')
Set ``a.flat[n] = values[n]`` for all `n` in indices.
Refer to `numpy.put` for full documentation.
See Also
--------
numpy.put : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'copyto',
"""
copyto(dst, src, casting='same_kind', where=True)
Copies values from one array to another, broadcasting as necessary.
Raises a TypeError if the `casting` rule is violated, and if
`where` is provided, it selects which elements to copy.
.. versionadded:: 1.7.0
Parameters
----------
dst : ndarray
The array into which values are copied.
src : array_like
The array from which values are copied.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when copying.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `dst`, and selects elements to copy from `src` to `dst`
wherever it contains the value True.
""")
add_newdoc('numpy.core.multiarray', 'putmask',
"""
putmask(a, mask, values)
Changes elements of an array based on conditional and input values.
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
If `values` is not the same size as `a` and `mask` then it will repeat.
This gives behavior different from ``a[mask] = values``.
Parameters
----------
a : array_like
Target array.
mask : array_like
Boolean mask array. It has to be the same shape as `a`.
values : array_like
Values to put into `a` where `mask` is True. If `values` is smaller
than `a` it will be repeated.
See Also
--------
place, put, take, copyto
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> np.putmask(x, x>2, x**2)
>>> x
array([[ 0, 1, 2],
[ 9, 16, 25]])
If `values` is smaller than `a` it is repeated:
>>> x = np.arange(5)
>>> np.putmask(x, x>1, [-33, -44])
>>> x
array([ 0, 1, -33, -44, -33])
""")
add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
"""
a.ravel([order])
Return a flattened array.
Refer to `numpy.ravel` for full documentation.
See Also
--------
numpy.ravel : equivalent function
ndarray.flat : a flat iterator on the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat',
"""
a.repeat(repeats, axis=None)
Repeat elements of an array.
Refer to `numpy.repeat` for full documentation.
See Also
--------
numpy.repeat : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape',
"""
a.reshape(shape, order='C')
Returns an array containing the same data with a new shape.
Refer to `numpy.reshape` for full documentation.
See Also
--------
numpy.reshape : equivalent function
Notes
-----
Unlike the free function `numpy.reshape`, this method on `ndarray` allows
the elements of the shape parameter to be passed in as separate arguments.
For example, ``a.reshape(10, 11)`` is equivalent to
``a.reshape((10, 11))``.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('resize',
"""
a.resize(new_shape, refcheck=True)
Change shape and size of array in-place.
Parameters
----------
new_shape : tuple of ints, or `n` ints
Shape of resized array.
refcheck : bool, optional
If False, reference count will not be checked. Default is True.
Returns
-------
None
Raises
------
ValueError
If `a` does not own its own data or references or views to it exist,
and the data memory must be changed.
PyPy only: will always raise if the data memory must be changed, since
there is no reliable way to determine if references or views to it
exist.
SystemError
If the `order` keyword argument is specified. This behaviour is a
bug in NumPy.
See Also
--------
resize : Return a new array with the specified shape.
Notes
-----
This reallocates space for the data area if necessary.
Only contiguous arrays (data elements consecutive in memory) can be
resized.
The purpose of the reference count check is to make sure you
do not use this array as a buffer for another Python object and then
reallocate the memory. However, reference counts can increase in
other ways so if you are sure that you have not shared the memory
for this array with another Python object, then you may safely set
`refcheck` to False.
Examples
--------
Shrinking an array: array is flattened (in the order that the data are
stored in memory), resized, and reshaped:
>>> a = np.array([[0, 1], [2, 3]], order='C')
>>> a.resize((2, 1))
>>> a
array([[0],
[1]])
>>> a = np.array([[0, 1], [2, 3]], order='F')
>>> a.resize((2, 1))
>>> a
array([[0],
[2]])
Enlarging an array: as above, but missing entries are filled with zeros:
>>> b = np.array([[0, 1], [2, 3]])
>>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple
>>> b
array([[0, 1, 2],
[3, 0, 0]])
Referencing an array prevents resizing...
>>> c = a
>>> a.resize((1, 1))
Traceback (most recent call last):
...
ValueError: cannot resize an array that references or is referenced ...
Unless `refcheck` is False:
>>> a.resize((1, 1), refcheck=False)
>>> a
array([[0]])
>>> c
array([[0]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('round',
"""
a.round(decimals=0, out=None)
Return `a` with each element rounded to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.around : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted',
"""
a.searchsorted(v, side='left', sorter=None)
Find indices where elements of v should be inserted in a to maintain order.
For full documentation, see `numpy.searchsorted`
See Also
--------
numpy.searchsorted : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield',
"""
a.setfield(val, dtype, offset=0)
Put a value into a specified place in a field defined by a data-type.
Place `val` into `a`'s field defined by `dtype` and beginning `offset`
bytes into the field.
Parameters
----------
val : object
Value to be placed in field.
dtype : dtype object
Data-type of the field in which to place `val`.
offset : int, optional
The number of bytes into the field at which to place `val`.
Returns
-------
None
See Also
--------
getfield
Examples
--------
>>> x = np.eye(3)
>>> x.getfield(np.float64)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
>>> x.setfield(3, np.int32)
>>> x.getfield(np.int32)
array([[3, 3, 3],
[3, 3, 3],
[3, 3, 3]], dtype=int32)
>>> x
array([[1.0e+000, 1.5e-323, 1.5e-323],
[1.5e-323, 1.0e+000, 1.5e-323],
[1.5e-323, 1.5e-323, 1.0e+000]])
>>> x.setfield(np.eye(3), np.int32)
>>> x
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
"""
a.setflags(write=None, align=None, uic=None)
Set array flags WRITEABLE, ALIGNED, (WRITEBACKIFCOPY and UPDATEIFCOPY),
respectively.
These Boolean-valued flags affect how numpy interprets the memory
area used by `a` (see Notes below). The ALIGNED flag can only
be set to True if the data is actually aligned according to the type.
The WRITEBACKIFCOPY and (deprecated) UPDATEIFCOPY flags can never be set
to True. The flag WRITEABLE can only be set to True if the array owns its
own memory, or the ultimate owner of the memory exposes a writeable buffer
interface, or is a string. (The exception for string is made so that
unpickling can be done without copying memory.)
Parameters
----------
write : bool, optional
Describes whether or not `a` can be written to.
align : bool, optional
Describes whether or not `a` is aligned properly for its type.
uic : bool, optional
Describes whether or not `a` is a copy of another "base" array.
Notes
-----
Array flags provide information about how the memory area used
for the array is to be interpreted. There are 7 Boolean flags
in use, only four of which can be changed by the user:
WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED.
WRITEABLE (W) the data area can be written to;
ALIGNED (A) the data and strides are aligned appropriately for the hardware
(as determined by the compiler);
UPDATEIFCOPY (U) (deprecated), replaced by WRITEBACKIFCOPY;
WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced
by .base). When the C-API function PyArray_ResolveWritebackIfCopy is
called, the base array will be updated with the contents of this array.
All flags can be accessed using the single (upper case) letter as well
as the full name.
Examples
--------
>>> y = np.array([[3, 1, 7],
... [2, 0, 0],
... [8, 5, 9]])
>>> y
array([[3, 1, 7],
[2, 0, 0],
[8, 5, 9]])
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
>>> y.setflags(write=0, align=0)
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : False
ALIGNED : False
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
>>> y.setflags(uic=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: cannot set WRITEBACKIFCOPY flag to True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
"""
a.sort(axis=-1, kind=None, order=None)
Sort an array in-place. Refer to `numpy.sort` for full documentation.
Parameters
----------
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort under the covers and, in general, the
actual implementation will vary with datatype. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.sort : Return a sorted copy of an array.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in sorted array.
partition: Partial sort.
Notes
-----
See `numpy.sort` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4], [3,1]])
>>> a.sort(axis=1)
>>> a
array([[1, 4],
[1, 3]])
>>> a.sort(axis=0)
>>> a
array([[1, 3],
[1, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
>>> a.sort(order='y')
>>> a
array([(b'c', 1), (b'a', 2)],
dtype=[('x', 'S1'), ('y', '<i8')])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('partition',
"""
a.partition(kth, axis=-1, kind='introselect', order=None)
Rearranges the elements in the array in such a way that the value of the
element in kth position is in the position it would be in a sorted array.
All elements smaller than the kth element are moved before this element and
all equal or greater are moved behind it. The ordering of the elements in
the two partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
kth : int or sequence of ints
Element index to partition by. The kth element value will be in its
final sorted position and all smaller elements will be moved before it
and all equal or greater elements behind it.
The order of all elements in the partitions is undefined.
If provided with a sequence of kth it will partition all elements
indexed by kth of them into their sorted position at once.
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need to be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.partition : Return a parititioned copy of an array.
argpartition : Indirect partition.
sort : Full sort.
Notes
-----
See ``np.partition`` for notes on the different algorithms.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> a.partition(3)
>>> a
array([2, 1, 3, 4])
>>> a.partition((1, 3))
>>> a
array([1, 2, 3, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
"""
a.squeeze(axis=None)
Remove single-dimensional entries from the shape of `a`.
Refer to `numpy.squeeze` for full documentation.
See Also
--------
numpy.squeeze : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
"""
a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Returns the standard deviation of the array elements along given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
"""
a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True)
Return the sum of the array elements over the given axis.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.sum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes',
"""
a.swapaxes(axis1, axis2)
Return a view of the array with `axis1` and `axis2` interchanged.
Refer to `numpy.swapaxes` for full documentation.
See Also
--------
numpy.swapaxes : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('take',
"""
a.take(indices, axis=None, out=None, mode='raise')
Return an array formed from the elements of `a` at the given indices.
Refer to `numpy.take` for full documentation.
See Also
--------
numpy.take : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
"""
a.tofile(fid, sep="", format="%s")
Write array to a file as text or binary (default).
Data is always written in 'C' order, independent of the order of `a`.
The data produced by this method can be recovered using the function
fromfile().
Parameters
----------
fid : file or str
An open file object, or a string containing a filename.
sep : str
Separator between array items for text output.
If "" (empty), a binary file is written, equivalent to
``file.write(a.tobytes())``.
format : str
Format string for text file output.
Each entry in the array is formatted to text by first converting
it to the closest Python type, and then using "format" % item.
Notes
-----
This is a convenience function for quick storage of array data.
Information on endianness and precision is lost, so this method is not a
good choice for files intended to archive data or transport data between
machines with different endianness. Some of these problems can be overcome
by outputting the data as text files, at the expense of speed and file
size.
When fid is a file object, array contents are directly written to the
file, bypassing the file object's ``write`` method. As a result, tofile
cannot be used with files objects supporting compression (e.g., GzipFile)
or file-like objects that do not support ``fileno()`` (e.g., BytesIO).
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
"""
a.tolist()
Return the array as an ``a.ndim``-levels deep nested list of Python scalars.
Return a copy of the array data as a (nested) Python list.
Data items are converted to the nearest compatible builtin Python type, via
the `~numpy.ndarray.item` function.
If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will
not be a list at all, but a simple Python scalar.
Parameters
----------
none
Returns
-------
y : object, or list of object, or list of list of object, or ...
The possibly nested list of array elements.
Notes
-----
The array may be recreated via ``a = np.array(a.tolist())``, although this
may sometimes lose precision.
Examples
--------
For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``:
>>> a = np.array([1, 2])
>>> list(a)
[1, 2]
>>> a.tolist()
[1, 2]
However, for a 2D array, ``tolist`` applies recursively:
>>> a = np.array([[1, 2], [3, 4]])
>>> list(a)
[array([1, 2]), array([3, 4])]
>>> a.tolist()
[[1, 2], [3, 4]]
The base case for this recursion is a 0D array:
>>> a = np.array(1)
>>> list(a)
Traceback (most recent call last):
...
TypeError: iteration over a 0-d array
>>> a.tolist()
1
"""))
tobytesdoc = """
a.{name}(order='C')
Construct Python bytes containing the raw data bytes in the array.
Constructs Python bytes showing a copy of the raw contents of
data memory. The bytes object can be produced in either 'C' or 'Fortran',
or 'Any' order (the default is 'C'-order). 'Any' order means C-order
unless the F_CONTIGUOUS flag in the array is set, in which case it
means 'Fortran' order.
{deprecated}
Parameters
----------
order : {{'C', 'F', None}}, optional
Order of the data for multidimensional arrays:
C, Fortran, or the same as for the original array.
Returns
-------
s : bytes
Python bytes exhibiting a copy of `a`'s raw data.
Examples
--------
>>> x = np.array([[0, 1], [2, 3]], dtype='<u2')
>>> x.tobytes()
b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00'
>>> x.tobytes('C') == x.tobytes()
True
>>> x.tobytes('F')
b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00'
"""
add_newdoc('numpy.core.multiarray', 'ndarray',
('tostring', tobytesdoc.format(name='tostring',
deprecated=
'This function is a compatibility '
'alias for tobytes. Despite its '
'name it returns bytes not '
'strings.')))
add_newdoc('numpy.core.multiarray', 'ndarray',
('tobytes', tobytesdoc.format(name='tobytes',
deprecated='.. versionadded:: 1.9.0')))
add_newdoc('numpy.core.multiarray', 'ndarray', ('trace',
"""
a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
Return the sum along diagonals of the array.
Refer to `numpy.trace` for full documentation.
See Also
--------
numpy.trace : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
"""
a.transpose(*axes)
Returns a view of the array with axes transposed.
For a 1-D array this has no effect, as a transposed vector is simply the
same vector. To convert a 1-D array into a 2D column vector, an additional
dimension must be added. `np.atleast2d(a).T` achieves this, as does
`a[:, np.newaxis]`.
For a 2-D array, this is a standard matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted (see Examples). If axes are not provided and
``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then
``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``.
Parameters
----------
axes : None, tuple of ints, or `n` ints
* None or no argument: reverses the order of the axes.
* tuple of ints: `i` in the `j`-th place in the tuple means `a`'s
`i`-th axis becomes `a.transpose()`'s `j`-th axis.
* `n` ints: same as an n-tuple of the same ints (this form is
intended simply as a "convenience" alternative to the tuple form)
Returns
-------
out : ndarray
View of `a`, with axes suitably permuted.
See Also
--------
ndarray.T : Array property returning the array transposed.
ndarray.reshape : Give a new shape to an array without changing its data.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> a
array([[1, 2],
[3, 4]])
>>> a.transpose()
array([[1, 3],
[2, 4]])
>>> a.transpose((1, 0))
array([[1, 3],
[2, 4]])
>>> a.transpose(1, 0)
array([[1, 3],
[2, 4]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
"""
a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Returns the variance of the array elements, along given axis.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.var : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
"""
a.view(dtype=None, type=None)
New view of array with the same data.
Parameters
----------
dtype : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16. The
default, None, results in the view having the same data-type as `a`.
This argument can also be specified as an ndarray sub-class, which
then specifies the type of the returned object (this is equivalent to
setting the ``type`` parameter).
type : Python type, optional
Type of the returned view, e.g., ndarray or matrix. Again, the
default None results in type preservation.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
bytes per entry than the previous dtype (for example, converting a
regular array to a structured array), then the behavior of the view
cannot be predicted just from the superficial appearance of ``a`` (shown
by ``print(a)``). It also depends on exactly how ``a`` is stored in
memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
defined as a slice or transpose, etc., the view may give different
results.
Examples
--------
>>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
Viewing array data using a different type and dtype:
>>> y = x.view(dtype=np.int16, type=np.matrix)
>>> y
matrix([[513]], dtype=int16)
>>> print(type(y))
<class 'numpy.matrix'>
Creating a view on a structured array so it can be used in calculations
>>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)])
>>> xv = x.view(dtype=np.int8).reshape(-1,2)
>>> xv
array([[1, 2],
[3, 4]], dtype=int8)
>>> xv.mean(0)
array([2., 3.])
Making changes to the view changes the underlying array
>>> xv[0,1] = 20
>>> x
array([(1, 20), (3, 4)], dtype=[('a', 'i1'), ('b', 'i1')])
Using a view to convert an array to a recarray:
>>> z = x.view(np.recarray)
>>> z.a
array([1, 3], dtype=int8)
Views share data:
>>> x[0] = (9, 10)
>>> z[0]
(9, 10)
Views that change the dtype size (bytes per entry) should normally be
avoided on arrays defined by slices, transposes, fortran-ordering, etc.:
>>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16)
>>> y = x[:, 0:2]
>>> y
array([[1, 2],
[4, 5]], dtype=int16)
>>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
Traceback (most recent call last):
...
ValueError: To change to a dtype of a different size, the array must be C-contiguous
>>> z = y.copy()
>>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
array([[(1, 2)],
[(4, 5)]], dtype=[('width', '<i2'), ('length', '<i2')])
"""))
##############################################################################
#
# umath functions
#
##############################################################################
add_newdoc('numpy.core.umath', 'frompyfunc',
"""
frompyfunc(func, nin, nout)
Takes an arbitrary Python function and returns a NumPy ufunc.
Can be used, for example, to add broadcasting to a built-in Python
function (see Examples section).
Parameters
----------
func : Python function object
An arbitrary Python function.
nin : int
The number of input arguments.
nout : int
The number of objects returned by `func`.
Returns
-------
out : ufunc
Returns a NumPy universal function (``ufunc``) object.
See Also
--------
vectorize : evaluates pyfunc over input arrays using broadcasting rules of numpy
Notes
-----
The returned ufunc always returns PyObject arrays.
Examples
--------
Use frompyfunc to add broadcasting to the Python function ``oct``:
>>> oct_array = np.frompyfunc(oct, 1, 1)
>>> oct_array(np.array((10, 30, 100)))
array(['0o12', '0o36', '0o144'], dtype=object)
>>> np.array((oct(10), oct(30), oct(100))) # for comparison
array(['0o12', '0o36', '0o144'], dtype='<U5')
""")
add_newdoc('numpy.core.umath', 'geterrobj',
"""
geterrobj()
Return the current object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `geterrobj` is used internally by the other
functions that get and set error handling behavior (`geterr`, `seterr`,
`geterrcall`, `seterrcall`).
Returns
-------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
seterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrobj() # first get the defaults
[8192, 521, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> old_bufsize = np.setbufsize(20000)
>>> old_err = np.seterr(divide='raise')
>>> old_handler = np.seterrcall(err_handler)
>>> np.geterrobj()
[8192, 521, <function err_handler at 0x91dcaac>]
>>> old_err = np.seterr(all='ignore')
>>> np.base_repr(np.geterrobj()[1], 8)
'0'
>>> old_err = np.seterr(divide='warn', over='log', under='call',
... invalid='print')
>>> np.base_repr(np.geterrobj()[1], 8)
'4351'
""")
add_newdoc('numpy.core.umath', 'seterrobj',
"""
seterrobj(errobj)
Set the object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `seterrobj` is used internally by the other
functions that set error handling behavior (`seterr`, `seterrcall`).
Parameters
----------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
geterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> old_errobj = np.geterrobj() # first get the defaults
>>> old_errobj
[8192, 521, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> new_errobj = [20000, 12, err_handler]
>>> np.seterrobj(new_errobj)
>>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn')
'14'
>>> np.geterr()
{'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.geterrcall() is err_handler
True
""")
##############################################################################
#
# compiled_base functions
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'add_docstring',
"""
add_docstring(obj, docstring)
Add a docstring to a built-in obj if possible.
If the obj already has a docstring raise a RuntimeError
If this routine does not know how to add a docstring to the object
raise a TypeError
""")
add_newdoc('numpy.core.umath', '_add_newdoc_ufunc',
"""
add_ufunc_docstring(ufunc, new_docstring)
Replace the docstring for a ufunc with new_docstring.
This method will only work if the current docstring for
the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.)
Parameters
----------
ufunc : numpy.ufunc
A ufunc whose current doc is NULL.
new_docstring : string
The new docstring for the ufunc.
Notes
-----
This method allocates memory for new_docstring on
the heap. Technically this creates a mempory leak, since this
memory will not be reclaimed until the end of the program
even if the ufunc itself is removed. However this will only
be a problem if the user is repeatedly creating ufuncs with
no documentation, adding documentation via add_newdoc_ufunc,
and then throwing away the ufunc.
""")
add_newdoc('numpy.core._multiarray_tests', 'format_float_OSprintf_g',
"""
format_float_OSprintf_g(val, precision)
Print a floating point scalar using the system's printf function,
equivalent to:
printf("%.*g", precision, val);
for half/float/double, or replacing 'g' by 'Lg' for longdouble. This
method is designed to help cross-validate the format_float_* methods.
Parameters
----------
val : python float or numpy floating scalar
Value to format.
precision : non-negative integer, optional
Precision given to printf.
Returns
-------
rep : string
The string representation of the floating point value
See Also
--------
format_float_scientific
format_float_positional
""")
##############################################################################
#
# Documentation for ufunc attributes and methods
#
##############################################################################
##############################################################################
#
# ufunc object
#
##############################################################################
add_newdoc('numpy.core', 'ufunc',
"""
Functions that operate element by element on whole arrays.
To see the documentation for a specific ufunc, use `info`. For
example, ``np.info(np.sin)``. Because ufuncs are written in C
(for speed) and linked into Python with NumPy's ufunc facility,
Python's help() function finds this page whenever help() is called
on a ufunc.
A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`.
Calling ufuncs:
===============
op(*x[, out], where=True, **kwargs)
Apply `op` to the arguments `*x` elementwise, broadcasting the arguments.
The broadcasting rules are:
* Dimensions of length 1 may be prepended to either array.
* Arrays may be repeated along dimensions of length 1.
Parameters
----------
*x : array_like
Input arrays.
out : ndarray, None, or tuple of ndarray and None, optional
Alternate array object(s) in which to put the result; if provided, it
must have a shape that the inputs broadcast to. A tuple of arrays
(possible only as a keyword argument) must have length equal to the
number of outputs; use `None` for uninitialized outputs to be
allocated by the ufunc.
where : array_like, optional
Values of True indicate to calculate the ufunc at that position, values
of False indicate to leave the value in the output alone. Note that if
an uninitialized return array is created via the default ``out=None``,
then the elements where the values are False will remain uninitialized.
**kwargs
For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`.
Returns
-------
r : ndarray or tuple of ndarray
`r` will have the shape that the arrays in `x` broadcast to; if `out` is
provided, it will be returned. If not, `r` will be allocated and
may contain uninitialized values. If the function has more than one
output, then the result will be a tuple of arrays.
""")
##############################################################################
#
# ufunc attributes
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('identity',
"""
The identity value.
Data attribute containing the identity element for the ufunc, if it has one.
If it does not, the attribute value is None.
Examples
--------
>>> np.add.identity
0
>>> np.multiply.identity
1
>>> np.power.identity
1
>>> print(np.exp.identity)
None
"""))
add_newdoc('numpy.core', 'ufunc', ('nargs',
"""
The number of arguments.
Data attribute containing the number of arguments the ufunc takes, including
optional ones.
Notes
-----
Typically this value will be one more than what you might expect because all
ufuncs take the optional "out" argument.
Examples
--------
>>> np.add.nargs
3
>>> np.multiply.nargs
3
>>> np.power.nargs
3
>>> np.exp.nargs
2
"""))
add_newdoc('numpy.core', 'ufunc', ('nin',
"""
The number of inputs.
Data attribute containing the number of arguments the ufunc treats as input.
Examples
--------
>>> np.add.nin
2
>>> np.multiply.nin
2
>>> np.power.nin
2
>>> np.exp.nin
1
"""))
add_newdoc('numpy.core', 'ufunc', ('nout',
"""
The number of outputs.
Data attribute containing the number of arguments the ufunc treats as output.
Notes
-----
Since all ufuncs can take output arguments, this will always be (at least) 1.
Examples
--------
>>> np.add.nout
1
>>> np.multiply.nout
1
>>> np.power.nout
1
>>> np.exp.nout
1
"""))
add_newdoc('numpy.core', 'ufunc', ('ntypes',
"""
The number of types.
The number of numerical NumPy types - of which there are 18 total - on which
the ufunc can operate.
See Also
--------
numpy.ufunc.types
Examples
--------
>>> np.add.ntypes
18
>>> np.multiply.ntypes
18
>>> np.power.ntypes
17
>>> np.exp.ntypes
7
>>> np.remainder.ntypes
14
"""))
add_newdoc('numpy.core', 'ufunc', ('types',
"""
Returns a list with types grouped input->output.
Data attribute listing the data-type "Domain-Range" groupings the ufunc can
deliver. The data-types are given using the character codes.
See Also
--------
numpy.ufunc.ntypes
Examples
--------
>>> np.add.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.multiply.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.power.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G',
'OO->O']
>>> np.exp.types
['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O']
>>> np.remainder.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O']
"""))
add_newdoc('numpy.core', 'ufunc', ('signature',
"""
Definition of the core elements a generalized ufunc operates on.
The signature determines how the dimensions of each input/output array
are split into core and loop dimensions:
1. Each dimension in the signature is matched to a dimension of the
corresponding passed-in array, starting from the end of the shape tuple.
2. Core dimensions assigned to the same label in the signature must have
exactly matching sizes, no broadcasting is performed.
3. The core dimensions are removed from all inputs and the remaining
dimensions are broadcast together, defining the loop dimensions.
Notes
-----
Generalized ufuncs are used internally in many linalg functions, and in
the testing suite; the examples below are taken from these.
For ufuncs that operate on scalars, the signature is `None`, which is
equivalent to '()' for every argument.
Examples
--------
>>> np.core.umath_tests.matrix_multiply.signature
'(m,n),(n,p)->(m,p)'
>>> np.linalg._umath_linalg.det.signature
'(m,m)->()'
>>> np.add.signature is None
True # equivalent to '(),()->()'
"""))
##############################################################################
#
# ufunc methods
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('reduce',
"""
reduce(a, axis=0, dtype=None, out=None, keepdims=False, initial=<no value>, where=True)
Reduces `a`'s dimension by one, by applying ufunc along one axis.
Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then
:math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
the result of iterating `j` over :math:`range(N_i)`, cumulatively applying
ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
For a one-dimensional array, reduce produces results equivalent to:
::
r = op.identity # op = ufunc
for i in range(len(A)):
r = op(r, A[i])
return r
For example, add.reduce() is equivalent to sum().
Parameters
----------
a : array_like
The array to act on.
axis : None or int or tuple of ints, optional
Axis or axes along which a reduction is performed.
The default (`axis` = 0) is perform a reduction over the first
dimension of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is `None`, a reduction is performed over all the axes.
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
For operations which are either not commutative or not associative,
doing a reduction over multiple axes is not well-defined. The
ufuncs do not currently raise an exception in this case, but will
likely do so in the future.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data-type of the output array if this is provided, or
the data-type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.7.0
initial : scalar, optional
The value with which to start the reduction.
If the ufunc has no identity or the dtype is object, this defaults
to None - otherwise it defaults to ufunc.identity.
If ``None`` is given, the first element of the reduction is used,
and an error is thrown if the reduction is empty.
.. versionadded:: 1.15.0
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `a`, and selects elements to include in the reduction. Note
that for ufuncs like ``minimum`` that do not have an identity
defined, one has to pass in also ``initial``.
.. versionadded:: 1.17.0
Returns
-------
r : ndarray
The reduced array. If `out` was supplied, `r` is a reference to it.
Examples
--------
>>> np.multiply.reduce([2,3,5])
30
A multi-dimensional array example:
>>> X = np.arange(8).reshape((2,2,2))
>>> X
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.add.reduce(X, 0)
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X) # confirm: default axis value is 0
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X, 1)
array([[ 2, 4],
[10, 12]])
>>> np.add.reduce(X, 2)
array([[ 1, 5],
[ 9, 13]])
You can use the ``initial`` keyword argument to initialize the reduction
with a different value, and ``where`` to select specific elements to include:
>>> np.add.reduce([10], initial=5)
15
>>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initial=10)
array([14., 14.])
>>> a = np.array([10., np.nan, 10])
>>> np.add.reduce(a, where=~np.isnan(a))
20.0
Allows reductions of empty arrays where they would normally fail, i.e.
for ufuncs without an identity.
>>> np.minimum.reduce([], initial=np.inf)
inf
>>> np.minimum.reduce([[1., 2.], [3., 4.]], initial=10., where=[True, False])
array([ 1., 10.])
>>> np.minimum.reduce([])
Traceback (most recent call last):
...
ValueError: zero-size array to reduction operation minimum which has no identity
"""))
add_newdoc('numpy.core', 'ufunc', ('accumulate',
"""
accumulate(array, axis=0, dtype=None, out=None)
Accumulate the result of applying the operator to all elements.
For a one-dimensional array, accumulate produces results equivalent to::
r = np.empty(len(A))
t = op.identity # op = the ufunc being applied to A's elements
for i in range(len(A)):
t = op(t, A[i])
r[i] = t
return r
For example, add.accumulate() is equivalent to np.cumsum().
For a multi-dimensional array, accumulate is applied along only one
axis (axis zero by default; see Examples below) so repeated use is
necessary if one wants to accumulate over multiple axes.
Parameters
----------
array : array_like
The array to act on.
axis : int, optional
The axis along which to apply the accumulation; default is zero.
dtype : data-type code, optional
The data-type used to represent the intermediate results. Defaults
to the data-type of the output array if such is provided, or the
the data-type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
Returns
-------
r : ndarray
The accumulated values. If `out` was supplied, `r` is a reference to
`out`.
Examples
--------
1-D array examples:
>>> np.add.accumulate([2, 3, 5])
array([ 2, 5, 10])
>>> np.multiply.accumulate([2, 3, 5])
array([ 2, 6, 30])
2-D array examples:
>>> I = np.eye(2)
>>> I
array([[1., 0.],
[0., 1.]])
Accumulate along axis 0 (rows), down columns:
>>> np.add.accumulate(I, 0)
array([[1., 0.],
[1., 1.]])
>>> np.add.accumulate(I) # no axis specified = axis zero
array([[1., 0.],
[1., 1.]])
Accumulate along axis 1 (columns), through rows:
>>> np.add.accumulate(I, 1)
array([[1., 1.],
[0., 1.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('reduceat',
"""
reduceat(a, indices, axis=0, dtype=None, out=None)
Performs a (local) reduce with specified slices over a single axis.
For i in ``range(len(indices))``, `reduceat` computes
``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th
generalized "row" parallel to `axis` in the final result (i.e., in a
2-D array, for example, if `axis = 0`, it becomes the i-th row, but if
`axis = 1`, it becomes the i-th column). There are three exceptions to this:
* when ``i = len(indices) - 1`` (so for the last index),
``indices[i+1] = a.shape[axis]``.
* if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is
simply ``a[indices[i]]``.
* if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised.
The shape of the output depends on the size of `indices`, and may be
larger than `a` (this happens if ``len(indices) > a.shape[axis]``).
Parameters
----------
a : array_like
The array to act on.
indices : array_like
Paired indices, comma separated (not colon), specifying slices to
reduce.
axis : int, optional
The axis along which to apply the reduceat.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data type of the output array if this is provided, or
the data type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
Returns
-------
r : ndarray
The reduced values. If `out` was supplied, `r` is a reference to
`out`.
Notes
-----
A descriptive example:
If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as
``ufunc.reduceat(a, indices)[::2]`` where `indices` is
``range(len(array) - 1)`` with a zero placed
in every other element:
``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``.
Don't be fooled by this attribute's name: `reduceat(a)` is not
necessarily smaller than `a`.
Examples
--------
To take the running sum of four successive values:
>>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
array([ 6, 10, 14, 18])
A 2-D example:
>>> x = np.linspace(0, 15, 16).reshape(4,4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
::
# reduce such that the result has the following five rows:
# [row1 + row2 + row3]
# [row4]
# [row2]
# [row3]
# [row1 + row2 + row3 + row4]
>>> np.add.reduceat(x, [0, 3, 1, 2, 0])
array([[12., 15., 18., 21.],
[12., 13., 14., 15.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[24., 28., 32., 36.]])
::
# reduce such that result has the following two columns:
# [col1 * col2 * col3, col4]
>>> np.multiply.reduceat(x, [0, 3], 1)
array([[ 0., 3.],
[ 120., 7.],
[ 720., 11.],
[2184., 15.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('outer',
"""
outer(A, B, **kwargs)
Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`.
Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of
``op.outer(A, B)`` is an array of dimension M + N such that:
.. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] =
op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}])
For `A` and `B` one-dimensional, this is equivalent to::
r = empty(len(A),len(B))
for i in range(len(A)):
for j in range(len(B)):
r[i,j] = op(A[i], B[j]) # op = ufunc in question
Parameters
----------
A : array_like
First array
B : array_like
Second array
kwargs : any
Arguments to pass on to the ufunc. Typically `dtype` or `out`.
Returns
-------
r : ndarray
Output array
See Also
--------
numpy.outer
Examples
--------
>>> np.multiply.outer([1, 2, 3], [4, 5, 6])
array([[ 4, 5, 6],
[ 8, 10, 12],
[12, 15, 18]])
A multi-dimensional example:
>>> A = np.array([[1, 2, 3], [4, 5, 6]])
>>> A.shape
(2, 3)
>>> B = np.array([[1, 2, 3, 4]])
>>> B.shape
(1, 4)
>>> C = np.multiply.outer(A, B)
>>> C.shape; C
(2, 3, 1, 4)
array([[[[ 1, 2, 3, 4]],
[[ 2, 4, 6, 8]],
[[ 3, 6, 9, 12]]],
[[[ 4, 8, 12, 16]],
[[ 5, 10, 15, 20]],
[[ 6, 12, 18, 24]]]])
"""))
add_newdoc('numpy.core', 'ufunc', ('at',
"""
at(a, indices, b=None)
Performs unbuffered in place operation on operand 'a' for elements
specified by 'indices'. For addition ufunc, this method is equivalent to
``a[indices] += b``, except that results are accumulated for elements that
are indexed more than once. For example, ``a[[0,0]] += 1`` will only
increment the first element once because of buffering, whereas
``add.at(a, [0,0], 1)`` will increment the first element twice.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
The array to perform in place operation on.
indices : array_like or tuple
Array like index object or slice object for indexing into first
operand. If first operand has multiple dimensions, indices can be a
tuple of array like index objects or slice objects.
b : array_like
Second operand for ufuncs requiring two operands. Operand must be
broadcastable over first operand after indexing or slicing.
Examples
--------
Set items 0 and 1 to their negative values:
>>> a = np.array([1, 2, 3, 4])
>>> np.negative.at(a, [0, 1])
>>> a
array([-1, -2, 3, 4])
Increment items 0 and 1, and increment item 2 twice:
>>> a = np.array([1, 2, 3, 4])
>>> np.add.at(a, [0, 1, 2, 2], 1)
>>> a
array([2, 3, 5, 4])
Add items 0 and 1 in first array to second array,
and store results in first array:
>>> a = np.array([1, 2, 3, 4])
>>> b = np.array([1, 2])
>>> np.add.at(a, [0, 1], b)
>>> a
array([2, 4, 3, 4])
"""))
##############################################################################
#
# Documentation for dtype attributes and methods
#
##############################################################################
##############################################################################
#
# dtype object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype',
"""
dtype(obj, align=False, copy=False)
Create a data type object.
A numpy array is homogeneous, and contains elements described by a
dtype object. A dtype object can be constructed from different
combinations of fundamental numeric types.
Parameters
----------
obj
Object to be converted to a data type object.
align : bool, optional
Add padding to the fields to match what a C compiler would output
for a similar C-struct. Can be ``True`` only if `obj` is a dictionary
or a comma-separated string. If a struct dtype is being created,
this also sets a sticky alignment flag ``isalignedstruct``.
copy : bool, optional
Make a new copy of the data-type object. If ``False``, the result
may just be a reference to a built-in data-type object.
See also
--------
result_type
Examples
--------
Using array-scalar type:
>>> np.dtype(np.int16)
dtype('int16')
Structured type, one field name 'f1', containing int16:
>>> np.dtype([('f1', np.int16)])
dtype([('f1', '<i2')])
Structured type, one field named 'f1', in itself containing a structured
type with one field:
>>> np.dtype([('f1', [('f1', np.int16)])])
dtype([('f1', [('f1', '<i2')])])
Structured type, two fields: the first field contains an unsigned int, the
second an int32:
>>> np.dtype([('f1', np.uint64), ('f2', np.int32)])
dtype([('f1', '<u8'), ('f2', '<i4')])
Using array-protocol type strings:
>>> np.dtype([('a','f8'),('b','S10')])
dtype([('a', '<f8'), ('b', 'S10')])
Using comma-separated field formats. The shape is (2,3):
>>> np.dtype("i4, (2,3)f8")
dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))])
Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void``
is a flexible type, here of size 10:
>>> np.dtype([('hello',(np.int64,3)),('world',np.void,10)])
dtype([('hello', '<i8', (3,)), ('world', 'V10')])
Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are
the offsets in bytes:
>>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
dtype((numpy.int16, [('x', 'i1'), ('y', 'i1')]))
Using dictionaries. Two fields named 'gender' and 'age':
>>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
dtype([('gender', 'S1'), ('age', 'u1')])
Offsets in bytes, here 0 and 25:
>>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
dtype([('surname', 'S25'), ('age', 'u1')])
""")
##############################################################################
#
# dtype attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('alignment',
"""
The required alignment (bytes) of this data-type according to the compiler.
More information is available in the C-API section of the manual.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder',
"""
A character indicating the byte-order of this data-type object.
One of:
=== ==============
'=' native
'<' little-endian
'>' big-endian
'|' not applicable
=== ==============
All built-in data-type objects have byteorder either '=' or '|'.
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.byteorder
'='
>>> # endian is not relevant for 8 bit numbers
>>> np.dtype('i1').byteorder
'|'
>>> # or ASCII strings
>>> np.dtype('S2').byteorder
'|'
>>> # Even if specific code is given, and it is native
>>> # '=' is the byteorder
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> dt = np.dtype(native_code + 'i2')
>>> dt.byteorder
'='
>>> # Swapped code shows up as itself
>>> dt = np.dtype(swapped_code + 'i2')
>>> dt.byteorder == swapped_code
True
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('char',
"""A unique character code for each of the 21 different built-in types."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('descr',
"""
`__array_interface__` description of the data-type.
The format is that required by the 'descr' key in the
`__array_interface__` attribute.
Warning: This attribute exists specifically for `__array_interface__`,
and is not a datatype description compatible with `np.dtype`.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
"""
Dictionary of named fields defined for this data type, or ``None``.
The dictionary is indexed by keys that are the names of the fields.
Each entry in the dictionary is a tuple fully describing the field::
(dtype, offset[, title])
Offset is limited to C int, which is signed and usually 32 bits.
If present, the optional title can be any object (if it is a string
or unicode then it will also be a key in the fields dictionary,
otherwise it's meta-data). Notice also that the first two elements
of the tuple can be passed directly as arguments to the ``ndarray.getfield``
and ``ndarray.setfield`` methods.
See Also
--------
ndarray.getfield, ndarray.setfield
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> print(dt.fields)
{'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)}
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('flags',
"""
Bit-flags describing how this data type is to be interpreted.
Bit-masks are in `numpy.core.multiarray` as the constants
`ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`,
`NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation
of these flags is in C-API documentation; they are largely useful
for user-defined data-types.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject',
"""
Boolean indicating whether this dtype contains any reference-counted
objects in any fields or sub-dtypes.
Recall that what is actually in the ndarray memory representing
the Python object is the memory address of that object (a pointer).
Special handling may be required, and this attribute is useful for
distinguishing data types that may contain arbitrary Python objects
and data-types that won't.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin',
"""
Integer indicating how this dtype relates to the built-in dtypes.
Read-only.
= ========================================================================
0 if this is a structured array type, with fields
1 if this is a dtype compiled into numpy (such as ints, floats etc)
2 if the dtype is for a user-defined numpy type
A user-defined type uses the numpy C-API machinery to extend
numpy to handle a new array type. See
:ref:`user.user-defined-data-types` in the NumPy manual.
= ========================================================================
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.isbuiltin
1
>>> dt = np.dtype('f8')
>>> dt.isbuiltin
1
>>> dt = np.dtype([('field1', 'f8')])
>>> dt.isbuiltin
0
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isnative',
"""
Boolean indicating whether the byte order of this dtype is native
to the platform.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct',
"""
Boolean indicating whether the dtype is a struct which maintains
field alignment. This flag is sticky, so when combining multiple
structs together, it is preserved and produces new dtypes which
are also aligned.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize',
"""
The element size of this data-type object.
For 18 of the 21 types this number is fixed by the data-type.
For the flexible data-types, this number can be anything.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
"""
A character code (one of 'biufcmMOSUV') identifying the general kind of data.
= ======================
b boolean
i signed integer
u unsigned integer
f floating-point
c complex floating-point
m timedelta
M datetime
O object
S (byte-)string
U Unicode
V void
= ======================
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('name',
"""
A bit-width name for this data-type.
Un-sized flexible data-type objects do not have this attribute.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('names',
"""
Ordered list of field names, or ``None`` if there are no fields.
The names are ordered according to increasing byte offset. This can be
used, for example, to walk through all of the named fields in offset order.
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.names
('name', 'grades')
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('num',
"""
A unique number for each of the 21 different built-in types.
These are roughly ordered from least-to-most precision.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('shape',
"""
Shape tuple of the sub-array if this data type describes a sub-array,
and ``()`` otherwise.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('ndim',
"""
Number of dimensions of the sub-array if this data type describes a
sub-array, and ``0`` otherwise.
.. versionadded:: 1.13.0
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('str',
"""The array-protocol typestring of this data-type object."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype',
"""
Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and
None otherwise.
The *shape* is the fixed shape of the sub-array described by this
data type, and *item_dtype* the data type of the array.
If a field whose dtype object has this attribute is retrieved,
then the extra dimensions implied by *shape* are tacked on to
the end of the retrieved array.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('type',
"""The type object used to instantiate a scalar of this data-type."""))
##############################################################################
#
# dtype methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder',
"""
newbyteorder(new_order='S')
Return a new dtype with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. The default value ('S') results in swapping the current
byte order. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The code does a case-insensitive check on the first letter of
`new_order` for these alternatives. For example, any of '>'
or 'B' or 'b' or 'brian' are valid to specify big-endian.
Returns
-------
new_dtype : dtype
New dtype object with the given change to the byte order.
Notes
-----
Changes are also made in all fields and sub-arrays of the data type.
Examples
--------
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> native_dt = np.dtype(native_code+'i2')
>>> swapped_dt = np.dtype(swapped_code+'i2')
>>> native_dt.newbyteorder('S') == swapped_dt
True
>>> native_dt.newbyteorder() == swapped_dt
True
>>> native_dt == swapped_dt.newbyteorder('S')
True
>>> native_dt == swapped_dt.newbyteorder('=')
True
>>> native_dt == swapped_dt.newbyteorder('N')
True
>>> native_dt == native_dt.newbyteorder('|')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('<')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('L')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('>')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('B')
True
"""))
##############################################################################
#
# Datetime-related Methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'busdaycalendar',
"""
busdaycalendar(weekmask='1111100', holidays=None)
A business day calendar object that efficiently stores information
defining valid days for the busday family of functions.
The default valid days are Monday through Friday ("business days").
A busdaycalendar object can be specified with any set of weekly
valid days, plus an optional "holiday" dates that always will be invalid.
Once a busdaycalendar object is created, the weekmask and holidays
cannot be modified.
.. versionadded:: 1.7.0
Parameters
----------
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates, no matter which
weekday they fall upon. Holiday dates may be specified in any
order, and NaT (not-a-time) dates are ignored. This list is
saved in a normalized form that is suited for fast calculations
of valid days.
Returns
-------
out : busdaycalendar
A business day calendar object containing the specified
weekmask and holidays values.
See Also
--------
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Attributes
----------
Note: once a busdaycalendar object is created, you cannot modify the
weekmask or holidays. The attributes return copies of internal data.
weekmask : (copy) seven-element array of bool
holidays : (copy) sorted array of datetime64[D]
Examples
--------
>>> # Some important days in July
... bdd = np.busdaycalendar(
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
>>> # Default is Monday to Friday weekdays
... bdd.weekmask
array([ True, True, True, True, True, False, False])
>>> # Any holidays already on the weekend are removed
... bdd.holidays
array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]')
""")
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask',
"""A copy of the seven-element boolean mask indicating valid days."""))
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays',
"""A copy of the holiday array indicating additional invalid days."""))
add_newdoc('numpy.core.multiarray', 'normalize_axis_index',
"""
normalize_axis_index(axis, ndim, msg_prefix=None)
Normalizes an axis index, `axis`, such that is a valid positive index into
the shape of array with `ndim` dimensions. Raises an AxisError with an
appropriate message if this is not possible.
Used internally by all axis-checking logic.
.. versionadded:: 1.13.0
Parameters
----------
axis : int
The un-normalized index of the axis. Can be negative
ndim : int
The number of dimensions of the array that `axis` should be normalized
against
msg_prefix : str
A prefix to put before the message, typically the name of the argument
Returns
-------
normalized_axis : int
The normalized axis index, such that `0 <= normalized_axis < ndim`
Raises
------
AxisError
If the axis index is invalid, when `-ndim <= axis < ndim` is false.
Examples
--------
>>> normalize_axis_index(0, ndim=3)
0
>>> normalize_axis_index(1, ndim=3)
1
>>> normalize_axis_index(-1, ndim=3)
2
>>> normalize_axis_index(3, ndim=3)
Traceback (most recent call last):
...
AxisError: axis 3 is out of bounds for array of dimension 3
>>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg')
Traceback (most recent call last):
...
AxisError: axes_arg: axis -4 is out of bounds for array of dimension 3
""")
add_newdoc('numpy.core.multiarray', 'datetime_data',
"""
datetime_data(dtype, /)
Get information about the step size of a date or time type.
The returned tuple can be passed as the second argument of `numpy.datetime64` and
`numpy.timedelta64`.
Parameters
----------
dtype : dtype
The dtype object, which must be a `datetime64` or `timedelta64` type.
Returns
-------
unit : str
The :ref:`datetime unit <arrays.dtypes.dateunits>` on which this dtype
is based.
count : int
The number of base units in a step.
Examples
--------
>>> dt_25s = np.dtype('timedelta64[25s]')
>>> np.datetime_data(dt_25s)
('s', 25)
>>> np.array(10, dt_25s).astype('timedelta64[s]')
array(250, dtype='timedelta64[s]')
The result can be used to construct a datetime that uses the same units
as a timedelta
>>> np.datetime64('2010', np.datetime_data(dt_25s))
numpy.datetime64('2010-01-01T00:00:00','25s')
""")
##############################################################################
#
# Documentation for `generic` attributes and methods
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'generic',
"""
Base class for numpy scalar types.
Class from which most (all?) numpy scalar types are derived. For
consistency, exposes the same API as `ndarray`, despite many
consequent attributes being either "get-only," or completely irrelevant.
This is the class from which it is strongly suggested users should derive
custom scalar types.
""")
# Attributes
add_newdoc('numpy.core.numerictypes', 'generic', ('T',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('base',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('data',
"""Pointer to start of data."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dtype',
"""Get array data-descriptor."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flags',
"""The integer value of flags."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flat',
"""A 1-D view of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('imag',
"""The imaginary part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize',
"""The length of one element in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes',
"""The length of the scalar in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ndim',
"""The number of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('real',
"""The real part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('shape',
"""Tuple of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('size',
"""The number of elements in the gentype."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('strides',
"""Tuple of bytes steps in each dimension."""))
# Methods
add_newdoc('numpy.core.numerictypes', 'generic', ('all',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('any',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argmax',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argmin',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argsort',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('astype',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('choose',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('clip',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('compress',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('copy',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dump',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dumps',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('fill',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flatten',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('getfield',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('item',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemset',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('max',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('mean',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('min',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder',
"""
newbyteorder(new_order='S')
Return a new `dtype` with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
The `new_order` code can be any from the following:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
Parameters
----------
new_order : str, optional
Byte order to force; a value from the byte order specifications
above. The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_dtype : dtype
New `dtype` object with the given change to the byte order.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('prod',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ptp',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('put',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ravel',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('repeat',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('reshape',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('resize',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('round',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('setfield',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('setflags',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('sort',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('std',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('sum',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('take',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tofile',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tolist',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tostring',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('trace',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('transpose',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('var',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('view',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
##############################################################################
#
# Documentation for scalar type abstract base classes in type hierarchy
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'number',
"""
Abstract base class of all numeric scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'integer',
"""
Abstract base class of all integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'signedinteger',
"""
Abstract base class of all signed integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'unsignedinteger',
"""
Abstract base class of all unsigned integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'inexact',
"""
Abstract base class of all numeric scalar types with a (potentially)
inexact representation of the values in its range, such as
floating-point numbers.
""")
add_newdoc('numpy.core.numerictypes', 'floating',
"""
Abstract base class of all floating-point scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'complexfloating',
"""
Abstract base class of all complex number scalar types that are made up of
floating-point numbers.
""")
add_newdoc('numpy.core.numerictypes', 'flexible',
"""
Abstract base class of all scalar types without predefined length.
The actual size of these types depends on the specific `np.dtype`
instantiation.
""")
add_newdoc('numpy.core.numerictypes', 'character',
"""
Abstract base class of all character string scalar types.
""")
##############################################################################
#
# Documentation for concrete scalar classes
#
##############################################################################
def numeric_type_aliases(aliases):
def type_aliases_gen():
for alias, doc in aliases:
try:
alias_type = getattr(_numerictypes, alias)
except AttributeError:
# The set of aliases that actually exist varies between platforms
pass
else:
yield (alias_type, alias, doc)
return list(type_aliases_gen())
possible_aliases = numeric_type_aliases([
('int8', '8-bit signed integer (-128 to 127)'),
('int16', '16-bit signed integer (-32768 to 32767)'),
('int32', '32-bit signed integer (-2147483648 to 2147483647)'),
('int64', '64-bit signed integer (-9223372036854775808 to 9223372036854775807)'),
('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'),
('uint8', '8-bit unsigned integer (0 to 255)'),
('uint16', '16-bit unsigned integer (0 to 65535)'),
('uint32', '32-bit unsigned integer (0 to 4294967295)'),
('uint64', '64-bit unsigned integer (0 to 18446744073709551615)'),
('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'),
('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'),
('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'),
('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'),
('float96', '96-bit extended-precision floating-point number type'),
('float128', '128-bit extended-precision floating-point number type'),
('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'),
('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'),
('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'),
('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'),
])
def add_newdoc_for_scalar_type(obj, fixed_aliases, doc):
o = getattr(_numerictypes, obj)
character_code = dtype(o).char
canonical_name_doc = "" if obj == o.__name__ else "Canonical name: ``np.{}``.\n ".format(obj)
alias_doc = ''.join("Alias: ``np.{}``.\n ".format(alias) for alias in fixed_aliases)
alias_doc += ''.join("Alias *on this platform*: ``np.{}``: {}.\n ".format(alias, doc)
for (alias_type, alias, doc) in possible_aliases if alias_type is o)
docstring = """
{doc}
Character code: ``'{character_code}'``.
{canonical_name_doc}{alias_doc}
""".format(doc=doc.strip(), character_code=character_code,
canonical_name_doc=canonical_name_doc, alias_doc=alias_doc)
add_newdoc('numpy.core.numerictypes', obj, docstring)
add_newdoc_for_scalar_type('bool_', ['bool8'],
"""
Boolean type (True or False), stored as a byte.
""")
add_newdoc_for_scalar_type('byte', [],
"""
Signed integer type, compatible with C ``char``.
""")
add_newdoc_for_scalar_type('short', [],
"""
Signed integer type, compatible with C ``short``.
""")
add_newdoc_for_scalar_type('intc', [],
"""
Signed integer type, compatible with C ``int``.
""")
add_newdoc_for_scalar_type('int_', [],
"""
Signed integer type, compatible with Python `int` anc C ``long``.
""")
add_newdoc_for_scalar_type('longlong', [],
"""
Signed integer type, compatible with C ``long long``.
""")
add_newdoc_for_scalar_type('ubyte', [],
"""
Unsigned integer type, compatible with C ``unsigned char``.
""")
add_newdoc_for_scalar_type('ushort', [],
"""
Unsigned integer type, compatible with C ``unsigned short``.
""")
add_newdoc_for_scalar_type('uintc', [],
"""
Unsigned integer type, compatible with C ``unsigned int``.
""")
add_newdoc_for_scalar_type('uint', [],
"""
Unsigned integer type, compatible with C ``unsigned long``.
""")
add_newdoc_for_scalar_type('ulonglong', [],
"""
Signed integer type, compatible with C ``unsigned long long``.
""")
add_newdoc_for_scalar_type('half', [],
"""
Half-precision floating-point number type.
""")
add_newdoc_for_scalar_type('single', [],
"""
Single-precision floating-point number type, compatible with C ``float``.
""")
add_newdoc_for_scalar_type('double', ['float_'],
"""
Double-precision floating-point number type, compatible with Python `float`
and C ``double``.
""")
add_newdoc_for_scalar_type('longdouble', ['longfloat'],
"""
Extended-precision floating-point number type, compatible with C
``long double`` but not necessarily with IEEE 754 quadruple-precision.
""")
add_newdoc_for_scalar_type('csingle', ['singlecomplex'],
"""
Complex number type composed of two single-precision floating-point
numbers.
""")
add_newdoc_for_scalar_type('cdouble', ['cfloat', 'complex_'],
"""
Complex number type composed of two double-precision floating-point
numbers, compatible with Python `complex`.
""")
add_newdoc_for_scalar_type('clongdouble', ['clongfloat', 'longcomplex'],
"""
Complex number type composed of two extended-precision floating-point
numbers.
""")
add_newdoc_for_scalar_type('object_', [],
"""
Any Python object.
""")
# TODO: work out how to put this on the base class, np.floating
for float_name in ('half', 'single', 'double', 'longdouble'):
add_newdoc('numpy.core.numerictypes', float_name, ('as_integer_ratio',
"""
{ftype}.as_integer_ratio() -> (int, int)
Return a pair of integers, whose ratio is exactly equal to the original
floating point number, and with a positive denominator.
Raise OverflowError on infinities and a ValueError on NaNs.
>>> np.{ftype}(10.0).as_integer_ratio()
(10, 1)
>>> np.{ftype}(0.0).as_integer_ratio()
(0, 1)
>>> np.{ftype}(-.25).as_integer_ratio()
(-1, 4)
""".format(ftype=float_name)))
|
shoyer/numpy
|
numpy/core/_add_newdocs.py
|
Python
|
bsd-3-clause
| 202,407
|
[
"Brian"
] |
05372f6ac3f1e17a8d569414e8709de8893cb50e5e1532be13cca6005501c52f
|
# -*- coding: utf-8 -*-
# Copyright(C) 2014 Roger Philibert
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import re
from pygrl.browser.browsers import DomainBrowser, APIBrowser
from pygrl.browser.pages import HTMLPage
from pygrl.browser.profiles import IPhone
from pygrl.exceptions import BrowserIncorrectPassword
import json
__all__ = ['TinderBrowser', 'FacebookBrowser']
class FacebookBrowser(DomainBrowser):
BASEURL = 'https://graph.facebook.com'
CLIENT_ID = "464891386855067"
access_token = None
info = None
def login(self, username, password):
self.location('https://www.facebook.com/dialog/oauth?client_id=%s&redirect_uri=https://www.facebook.com/connect/login_success.html&scope=basic_info,email,public_profile,user_about_me,user_activities,user_birthday,user_education_history,user_friends,user_interests,user_likes,user_location,user_photos,user_relationship_details&response_type=token' % self.CLIENT_ID)
page = HTMLPage(self, self.response)
form = page.get_form('//form[@id="login_form"]')
form['email'] = username
form['pass'] = password
form['persistent'] = 1
for script in page.doc.xpath('//script'):
m = re.search('"_js_datr","([^"]+)"', script.text or '')
if m:
self.session.cookies.set('_js_datr', m.group(1))
form.submit(allow_redirects=False)
if 'Location' not in self.response.headers:
raise BrowserIncorrectPassword()
self.location(self.response.headers['Location'])
m = re.search('access_token=([^&]+)&', self.url)
if m:
self.access_token = m.group(1)
else:
raise SystemExit("\n########\nYou need to allow Tinder to access your profile first, please visit :\n\n{u}\n########\n".format(u=self.url))
self.info = self.request('/me')
def request(self, url, *args, **kwargs):
url += '?access_token=' + self.access_token
self.location(self.absurl(url, base=True), *args, **kwargs)
return json.loads(self.response.content)
class TinderBrowser(APIBrowser):
BASEURL = 'https://api.gotinder.com/'
PROFILE = IPhone('Tinder/3.0.2')
recs = []
def __init__(self, facebook, *args, **kwargs):
super(TinderBrowser, self).__init__(*args, **kwargs)
self.facebook = facebook
me = self.request('/auth', data={'facebook_id': facebook.info['id'], 'facebook_token': facebook.access_token})
self.session.headers['Authorization'] = 'Token token="%s"' % me['token']
self.session.headers['X-Auth-Token'] = me['token']
self.my_id = me['user']['_id']
self.my_name = me['user']['name']
def get_threads(self):
resp = self.request('/updates', data={'last_activity_date': '2014-05-01T06:13:16.971Z'})
return sorted(resp['matches'], key=lambda m: m['last_activity_date'], reverse=True)
def post_message(self, match_id, content):
self.request('/user/matches/%s' % match_id, data={'message': content})
def update_recs(self):
resp = self.request('/user/recs')
try:
self.recs = resp['results']
except KeyError:
self.recs = []
def like_profile(self):
if len(self.recs) == 0:
self.update_recs()
if len(self.recs) == 0:
return 60
profile = self.recs.pop()
if 'tinder_rate_limited' in profile['_id']:
self.logger.info(profile['bio'])
return 600
resp = self.request('/like/%s' % profile['_id'])
if resp['match']:
self.logger.error('Match with %s!' % profile['name'])
else:
self.logger.info('Liked %s (%r)' % (profile['name'], profile['common_likes']))
if len(self.recs) > 0:
return 1
else:
return 60
|
blagarde/pygrl
|
tinder/browser.py
|
Python
|
gpl-3.0
| 4,501
|
[
"VisIt"
] |
5b07574273dcc6edd75f7bed1e7e1e07a42a284f105f646347a253cc849b91b8
|
from BAMF_Detect.postprocessors.common import Postprocessor, Postprocessors
import pefile
import datetime
class GetPETimes(Postprocessor):
def __init__(self):
Postprocessor.__init__(
self,
name="GetPETimes",
author="Brian Wallace (@botnet_hunter)",
date="March 14th, 2015",
description="Extracts the timestamps from PEs",
references="",
version="1.0.0.0"
)
@staticmethod
def epoch_to_string(epoch):
return datetime.datetime.fromtimestamp(epoch).strftime("%x %X")
def _do_processing(self, file_data, results):
to_return = {}
try:
times = []
pe = pefile.PE(data=file_data)
pe.FILE_HEADER.dump_dict()
# I know they parse the time out into a nice string for us, but I want uniform printing...
epoch = int(pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split(" ")[0], 16)
times.append({"name": "FILE_HEADER", "integer": epoch, "s": GetPETimes.epoch_to_string(epoch)})
to_return = {'times': times}
# usually null timestamps
for entry in pe.DIRECTORY_ENTRY_RESOURCE.entries:
try:
times.append({
"name": "DIRECTORY_ENTRY_RESOURCE",
"integer": entry.directory.struct.TimeDateStamp,
"s": GetPETimes.epoch_to_string(entry.directory.struct.TimeDateStamp)})
except KeyboardInterrupt:
raise
except:
pass
for entry in pe.DIRECTORY_ENTRY_IMPORT:
try:
times.append({
"name": "DIRECTORY_ENTRY_IMPORT",
"integer": entry.struct.TimeDateStamp,
"s": GetPETimes.epoch_to_string(entry.struct.TimeDateStamp)})
except KeyboardInterrupt:
raise
except:
pass
# pe.DIRECTORY_ENTRY_RESOURCE.entries[0].directory.struct.TimeDateStamp
# pe.DIRECTORY_ENTRY_IMPORT[0].struct.TimeDateStamp
to_return = {'times': times}
except:
pass
return to_return, file_data
Postprocessors.add_postprocessor(GetPETimes())
|
bwall/bamfdetect
|
BAMF_Detect/postprocessors/get_pe_times.py
|
Python
|
mit
| 2,371
|
[
"Brian"
] |
ac2e5a984b965cacdae4811d767490acfb26c0f5f017365b736863fc1a00a466
|
#!/usr/bin/env python
from pymatgen.io.vaspio import Poscar
import cProfile
import pstats
import os
import logging
logging.basicConfig(level=logging.DEBUG)
p = Poscar.from_file("../test_files/POSCAR.LiFePO4", check_for_POTCAR=False)
s = p.structure
def test():
nn = s.get_all_neighbors(20)
print len(nn)
def chgcar_test():
from pymatgen.io.vaspio import Chgcar
c = Chgcar.from_file("../test_files/CHGCAR.noncubic")
print c.get_integrated_diff(1, 2.5, 3)
def vasprun_test():
from pymatgen.io.vaspio import Vasprun
v = Vasprun("../test_files/vasprun.xml")
print v.final_energy
def matcher_test():
p = Poscar.from_file("../test_files/POSCAR.Li2O")
s = p.structure
from pymatgen.analysis.structure_matcher import StructureMatcher
print StructureMatcher().fit(s, s)
cProfile.run('matcher_test()', 'testprof')
p = pstats.Stats('testprof')
p.sort_stats('cumulative').print_stats(20)
os.remove("testprof")
|
Dioptas/pymatgen
|
dev_scripts/profile_structure.py
|
Python
|
mit
| 955
|
[
"pymatgen"
] |
d1b5eb242db1c6257bf5143c33a990a41cbf1e46601e44c96876829f8a34e055
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
r"""Module to largely replicate in python the psi4 libmints
CoordValue and CoordEntry classes, which were developed by
Justin M. Turney, with incremental improvements by other
psi4 developers.
"""
from __future__ import absolute_import
from __future__ import print_function
import math
import copy
from .vecutil import *
from .exceptions import *
try:
from collections import OrderedDict
except ImportError:
from .oldpymodules import OrderedDict
class CoordValue(object):
"""An abstract class to handle storage of Cartesian coordinate values, which
may be defined in terms of other variables through this mechanism, greatly
simplifying Z-matrix specification, for example.
"""
def __init__(self, fixed=False, computed=False):
# Fixed coordinate?
self.PYfixed = fixed
# Whether the current value is up to date or not
self.computed = computed
def set_fixed(self, fixed):
"""Set whether the coordinate value is fixed or not"""
self.PYfixed = fixed
def fixed(self):
"""Get whether the coordinate value is fixed or not"""
return self.PYfixed
def invalidate(self):
"""Flag the current value as outdated"""
self.computed = False
def everything(self):
print('\nCoordValue\n Fixed = %s\n Computed = %s\n\n' % (self.PYfixed, self.computed))
class NumberValue(CoordValue):
"""Specialization of CoordValue that is simply a number to be stored."""
def __init__(self, value, fixed=False):
CoordValue.__init__(self, fixed, True)
# coordinate number value
self.value = value
def compute(self):
"""Computes value of coordinate from member data"""
return self.value
def rset(self, val):
"""Resets value of coordinate if not fixed"""
if not self.PYfixed:
self.value = val
def type(self):
"""Gets specialization type of CoordValue"""
return 'NumberType'
def clone(self):
"""Returns new, independent NumberValue object"""
return copy.deepcopy(self)
def variable_to_string(self, precision):
"""Takes a CoordValue object, and returns a string for printing."""
return "%*.*f" % (precision + 5, precision, self.compute())
def everything(self):
print('\nNumberValue\n Fixed = %s\n Computed = %s\n Type = %s\n Value = %f\n FValue = %s\n\n' %
(self.PYfixed, self.computed, self.type(), self.value, self.variable_to_string(4)))
class VariableValue(CoordValue):
"""Specialization of CoordValue, where the current value depends
on the list of geometry values stored by the molecule.
"""
def __init__(self, name, geometryVariables, negate=False, fixed=False):
CoordValue.__init__(self, fixed, True)
# Name of variable
self.PYname = name
# Dictionary from molecule of variable names and values
self.geometryVariables = geometryVariables
# Whether the coordinate value is actually the negative of the variable value
self.negate = negate
def compute(self):
"""Computes value of coordinate from member data"""
vstr = self.PYname.upper()
if vstr not in self.geometryVariables:
raise IncompleteAtomError('Variable %s used in geometry specification has not been defined' % (vstr))
if self.negate:
return self.geometryVariables[vstr] * -1.0
else:
return self.geometryVariables[vstr]
def negated(self):
"""Gets whether the coordinate value is actually the negative of the variable value"""
return self.negate
def name(self):
"""Gets the name of the variable"""
return self.PYname
def rset(self, val):
"""Resets value of coordinate if not fixed"""
if not self.PYfixed:
if self.negate:
self.geometryVariables[self.PYname] = val * -1.0
else:
self.geometryVariables[self.PYname] = val
def type(self):
"""Gets specialization type of CoordValue"""
return 'VariableType'
def clone(self):
"""Returns new, independent VariableValue object"""
return copy.deepcopy(self)
def variable_to_string(self, precision):
"""Takes a CoordValue object, and returns a string for printing."""
if self.negate:
return '-' + self.PYname
else:
return self.PYname
def everything(self):
print('\nVariableValue\n Fixed = %s\n Computed = %s\n Type = %s\n Value = %f\n FValue = %s\n Name = %s\n Negated = %s\n Map = %s\n\n' %
(self.PYfixed, self.computed, self.type(), self.compute(), self.variable_to_string(4), self.name(), self.negated(), self.geometryVariables))
class CoordEntry(object):
"""Class to store all the attributes associated with an atom, not the
larger Molecule. Specialized into CartesianEntry and ZMatrixEntry.
"""
def __init__(self, entry_number, Z, charge, mass, symbol, label="", basis=None, shells=None):
"""Constructor"""
# Order in full atomic list
self.PYentry_number = entry_number
# Whether the coordinates have been computed
self.computed = False
# Actual cartesian coordinates of the atom
self.coordinates = [None, None, None]
# Atomic number of the atom
self.PYZ = Z
# Charge of the atom (SAD-related)
self.PYcharge = charge
# Mass of the atom
self.PYmass = mass
# Label of the atom minus any extra info (H1 => H)
self.PYsymbol = symbol
# Original label from the molecule from the input file (H1)
self.PYlabel = label
# Is this a ghost atom?
self.ghosted = False
# Different types of basis sets that can be assigned to this atom.
self.PYbasissets = basis if basis is not None else OrderedDict()
# Hash of one-atom BasisSet attached to this atom
self.PYshells = shells if shells is not None else OrderedDict()
@staticmethod
def r(a1, a2):
"""Computes the distance between two sets of coordinates"""
if len(a1) != 3 or len(a2) != 3:
raise ValidationError('ERROR: r() only defined for Vector3\n')
return distance(a1, a2)
@staticmethod
def a(a1, a2, a3):
"""Computes the angle (in rad.) between three sets of coordinates."""
if len(a1) != 3 or len(a2) != 3 or len(a3) != 3:
raise ValidationError('ERROR: a() only defined for Vector3\n')
eBA = sub(a2, a1)
eBC = sub(a2, a3)
eBA = normalize(eBA)
eBC = normalize(eBC)
costheta = dot(eBA, eBC)
if costheta > 1.0 - 1.0E-14:
costheta = 1.0
if costheta < 1.0E-14 - 1.0:
costheta = -1.0
return math.acos(costheta)
@staticmethod
def d(a1, a2, a3, a4):
"""Computes the dihedral (in rad.) between four sets of coordinates."""
if len(a1) != 3 or len(a2) != 3 or len(a3) != 3 or len(a4) != 3:
raise ValidationError('ERROR: d() only defined for Vector3\n')
eBA = sub(a2, a1)
eDC = sub(a4, a3)
eCB = sub(a3, a2)
CBNorm = norm(eCB)
DCxCB = cross(eDC, eCB)
CBxBA = cross(eCB, eBA)
return -1.0 * math.atan2(CBNorm * dot(eDC, CBxBA), dot(DCxCB, CBxBA))
def is_computed(self):
"""Whether the current atom's coordinates are up-to-date."""
return self.computed
def is_equivalent_to(self, other):
"""Whether this atom has the same mass and ghost status as atom *other*.
Also compares basis set assignment down to nbf(), has_puream() level
with code borrowed from Robert M. Parrish's SAD guess in Psi4.
"""
if other.PYZ != self.PYZ:
return False
if other.PYmass != self.PYmass:
return False
if other.ghosted != self.ghosted:
return False
if other.PYshells is not None and self.PYshells is not None:
for bas in self.PYshells: # do we instead care only about orbital basis?
if bas in other.PYshells:
if other.PYshells[bas] != self.PYshells[bas]:
return False
#if other.PYshells[bas].nbf() != self.PYshells[bas].nbf():
# return False
#if other.PYshells[bas].nshell() != self.PYshells[bas].nshell():
# return False
#if other.PYshells[bas].nprimitive() != self.PYshells[bas].nprimitive():
# return False
#if other.PYshells[bas].max_am() != self.PYshells[bas].max_am():
# return False
#if other.PYshells[bas].max_nprimitive() != self.PYshells[bas].max_nprimitive():
# return False
#if other.PYshells[bas].has_puream() != self.PYshells[bas].has_puream():
# return False
else:
raise ValidationError("""Basis set %s set for one and not other. This shouldn't happen. Investigate.""" % (bas))
return True
def is_ghosted(self):
"""Whether the current atom is ghosted or not."""
return self.ghosted
def set_ghosted(self, gh):
"""Flag the atom as either ghost or real."""
self.ghosted = gh
def Z(self):
"""The nuclear charge of the current atom (0 if ghosted)."""
if self.ghosted:
return 0.0
else:
return self.PYZ
def charge(self):
"""The "atomic charge" of the current atom (for SAD purposes)."""
return self.PYcharge
def mass(self):
"""The atomic mass of the current atom."""
return self.PYmass
def symbol(self):
"""The atomic symbol."""
return self.PYsymbol
def label(self):
"""The atom label."""
return self.PYlabel
def entry_number(self):
"""The order in which this appears in the full atom list."""
return self.PYentry_number
def set_basisset(self, name, role='BASIS'):
"""Set the basis for this atom
* @param type Keyword from input file, basis, ri_basis, etc.
* @param name Value from input file
"""
self.PYbasissets[role] = name
def basisset(self, role='BASIS'):
"""Returns the basis name for the provided type.
* @param type Keyword from input file.
* @returns the value from input.
"""
try:
return self.PYbasissets[role]
except ValueError:
raise ValidationError('CoordEntry::basisset: Basisset not set for %s and type of %s' % \
(self.PYlabel, role))
def basissets(self):
"""Returns basisset to atom map"""
return self.PYbasissets
def set_shell(self, bshash, key='BASIS'):
"""Set the hash for this atom
* @param key Keyword from input file, basis, ri_basis, etc.
* @param bshash hash string of one-atom BasisSet
"""
self.PYshells[key] = bshash
def shell(self, key='BASIS'):
"""Returns the hash for the provided type.
* @param type Keyword from input file.
* @returns the hash string for basis.
"""
try:
return self.PYshells[key]
except (ValueError, KeyError):
raise ValidationError('CoordEntry::shells: Shells not set for %s and type of %s' % \
(self.PYlabel, key))
def shells(self):
"""Returns shells sets to atom map"""
return self.PYshells
def everything(self):
print('\nCoordEntry\n Entry Number = %d\n Computed = %s\n Z = %d\n Charge = %f\n Mass = %f\n Symbol = %s\n Label = %s\n Ghosted = %s\n Coordinates = %s\n Basissets = %s\n\n Shells = %s\n\n' %
(self.entry_number(), self.is_computed(), self.Z(), self.charge(),
self.mass(), self.symbol(), self.label(), self.is_ghosted(),
self.coordinates, self.PYbasissets, self.PYshells))
class CartesianEntry(CoordEntry):
"""Class to hold all information about an atom, including its
coordinate specification as three Cartesians.
"""
def __init__(self, entry_number, Z, charge, mass, symbol, label, x, y, z, basis=None, shells=None):
CoordEntry.__init__(self, entry_number, Z, charge, mass, symbol, label, basis, shells)
self.x = x
self.y = y
self.z = z
def compute(self):
"""Computes the values of the coordinates (in whichever units
were inputted), returning them in a Vector
"""
if self.computed:
return self.coordinates
self.coordinates[0] = self.x.compute()
self.coordinates[1] = self.y.compute()
self.coordinates[2] = self.z.compute()
self.computed = True
return self.coordinates
def set_coordinates(self, x, y, z):
"""Given the current set of coordinates, updates the values of this
atom's coordinates and any variables that may depend on it.
"""
self.coordinates[0] = x
self.coordinates[1] = y
self.coordinates[2] = z
self.x.rset(x)
self.y.rset(y)
self.z.rset(z)
self.computed = True
def type(self):
"""The type of CoordEntry specialization."""
return 'CartesianCoord'
def print_in_input_format(self):
"""Prints the updated geometry, in the format provided by the user."""
xstr = self.x.variable_to_string(12)
ystr = self.y.variable_to_string(12)
zstr = self.z.variable_to_string(12)
return " %17s %17s %17s\n" % (xstr, ystr, zstr)
# should go to outfile
def print_in_input_format_cfour(self):
"""Prints the updated geometry, in the format provided by the user.
This, for Cfour, not different from regular version.
"""
xstr = self.x.variable_to_string(12)
ystr = self.y.variable_to_string(12)
zstr = self.z.variable_to_string(12)
return " %17s %17s %17s\n" % (xstr, ystr, zstr)
# should go to outfile
def invalidate(self):
"""Flags the current coordinates as being outdated."""
self.computed = False
self.x.invalidate()
self.y.invalidate()
self.z.invalidate()
def clone(self):
"""Returns new, independent CartesianEntry object"""
return copy.deepcopy(self)
def everything(self):
CoordEntry.everything(self)
print('\nCartesianEntry\n Type = %s\n x = %s\n y = %s\n z = %s\n\n' % (self.type(), self.x.variable_to_string(8), self.y.variable_to_string(8), self.z.variable_to_string(8)))
class ZMatrixEntry(CoordEntry):
"""Class to hold all information about an atom, including its
coordinate specification as any position of ZMatrix.
"""
def __init__(self, entry_number, Z, charge, mass, symbol, label, \
rto=None, rval=0, ato=None, aval=0, dto=None, dval=0, basis=None, shells=None):
"""Constructor""" # note that pos'n of basis arg changed from libmints
CoordEntry.__init__(self, entry_number, Z, charge, mass, symbol, label, basis, shells)
self.rto = rto
self.rval = rval
self.ato = ato
self.aval = aval
self.dto = dto
self.dval = dval
def invalidate(self):
"""Flags the current coordinates as being outdated"""
self.computed = False
if self.rval != 0:
self.rval.invalidate()
if self.aval != 0:
self.aval.invalidate()
if self.dval != 0:
self.dval.invalidate()
def print_in_input_format(self):
"""Prints the updated geometry, in the format provided by the user"""
text = ""
if self.rto == None and self.ato == None and self.dto == None:
# The first atom
text += "\n"
elif self.ato == None and self.dto == None:
# The second atom
now_rto = self.rto.entry_number() + 1
now_rval = self.rval.variable_to_string(10)
text += " %5d %11s\n" % (now_rto, now_rval)
elif self.dto == None:
# The third atom
now_rto = self.rto.entry_number() + 1
now_rval = self.rval.variable_to_string(10)
now_ato = self.ato.entry_number() + 1
now_aval = self.aval.variable_to_string(10)
text += " %5d %11s %5d %11s\n" % (now_rto, now_rval, now_ato, now_aval)
else:
# Remaining atoms
now_rto = self.rto.entry_number() + 1
now_rval = self.rval.variable_to_string(10)
now_ato = self.ato.entry_number() + 1
now_aval = self.aval.variable_to_string(10)
now_dto = self.dto.entry_number() + 1
now_dval = self.dval.variable_to_string(10)
text += " %5d %11s %5d %11s %5d %11s\n" % \
(now_rto, now_rval, now_ato, now_aval, now_dto, now_dval)
return text
# outfile
def print_in_input_format_cfour(self):
"""Prints the updated geometry, in the format provided by the user"""
text = ""
if self.rto == None and self.ato == None and self.dto == None:
# The first atom
text += "\n"
elif self.ato == None and self.dto == None:
# The second atom
now_rto = self.rto.entry_number() + 1
now_rval = self.rval.variable_to_string(10)
text += " %d %s\n" % (now_rto, now_rval)
elif self.dto == None:
# The third atom
now_rto = self.rto.entry_number() + 1
now_rval = self.rval.variable_to_string(10)
now_ato = self.ato.entry_number() + 1
now_aval = self.aval.variable_to_string(10)
text += " %d %s %d %s\n" % (now_rto, now_rval, now_ato, now_aval)
else:
# Remaining atoms
now_rto = self.rto.entry_number() + 1
now_rval = self.rval.variable_to_string(10)
now_ato = self.ato.entry_number() + 1
now_aval = self.aval.variable_to_string(10)
now_dto = self.dto.entry_number() + 1
now_dval = self.dval.variable_to_string(10)
text += " %d %s %d %s %d %s\n" % \
(now_rto, now_rval, now_ato, now_aval, now_dto, now_dval)
return text
# outfile
def set_coordinates(self, x, y, z):
"""Given the current set of coordinates, updates the values of this
atom's coordinates, and any variables that may depend on it.
"""
self.coordinates[0] = 0.0 if math.fabs(x) < 1.0E-14 else x
self.coordinates[1] = 0.0 if math.fabs(y) < 1.0E-14 else y
self.coordinates[2] = 0.0 if math.fabs(z) < 1.0E-14 else z
if self.rto != None:
if not self.rto.is_computed():
raise ValidationError("Coordinates have been set in the wrong order")
self.rval.rset(self.r(self.coordinates, self.rto.compute()))
if self.ato != None:
if not self.ato.is_computed():
raise ValidationError("Coordinates have been set in the wrong order")
aval = self.a(self.coordinates, self.rto.compute(), self.ato.compute())
# Noise creeps in for linear molecules. Force linearity, if it is close enough.
val = aval * 180.0 / math.pi
self.aval.rset(val)
if self.dto != None:
if not self.dto.is_computed():
raise ValidationError("Coordinates have been set in the wrong order")
val = self.d(self.coordinates, self.rto.compute(), self.ato.compute(), self.dto.compute())
# Check for NaN, and don't update if we find one
# what is this? proper py traslation?
if val == val:
self.dval.rset(val * 180.0 / math.pi)
self.computed = True
def type(self):
"""The type of CoordEntry specialization."""
return 'ZMatrixCoord'
def clone(self):
"""Returns new, independent ZMatrixEntry object."""
return copy.deepcopy(self)
def compute(self):
"""Compute the Cartesian coordinates in Bohr of current atom's entry."""
if self.computed:
return self.coordinates
# place first atom at the origin
if self.rto == None and self.ato == None and self.dto == None:
self.coordinates[0] = 0.0
self.coordinates[1] = 0.0
self.coordinates[2] = 0.0
# place second atom directly above the first
elif self.ato == None and self.dto == None:
self.coordinates[0] = 0.0
self.coordinates[1] = 0.0
self.coordinates[2] = self.rval.compute()
# place third atom pointing upwards
# this rTo rVal aTo aVal
# A B C
elif self.dto == None:
r = self.rval.compute()
a = self.aval.compute() * math.pi / 180.0
cosABC = math.cos(a)
sinABC = math.sin(a)
B = self.rto.compute()
C = self.ato.compute()
eCB = sub(B, C)
eCB = normalize(eCB)
eX = [0.0, 0.0, 0.0]
eY = [0.0, 0.0, 0.0]
if (math.fabs(1.0 - math.fabs(eCB[0])) < 1.0E-5):
# CB is collinear with X, start by finding Y
eY[1] = 1.0
eX = perp_unit(eY, eCB)
eY = perp_unit(eX, eCB)
else:
# CB is not collinear with X, we can safely find X first
eX[0] = 1.0
eY = perp_unit(eX, eCB)
eX = perp_unit(eY, eCB)
for xyz in range(3):
self.coordinates[xyz] = B[xyz] + r * (eY[xyz] * sinABC - eCB[xyz] * cosABC)
if math.fabs(self.coordinates[xyz]) < 1.E-14:
self.coordinates[xyz] = 0.0
# The fourth, or subsequent, atom
#
# The atom specification is
# this rTo rVal aTo aVal dTo dVal
# A B C D
# which allows us to define the vector from C->B (eCB) as the +z axis, and eDC
# lies in the xz plane. Then eX, eY and eZ (=eBC) are the x, y, and z axes, respecively.
else:
r = self.rval.compute()
a = self.aval.compute() * math.pi / 180.0
d = self.dval.compute() * math.pi / 180.0
B = self.rto.compute()
C = self.ato.compute()
D = self.dto.compute()
eDC = sub(C, D)
eCB = sub(B, C)
eDC = normalize(eDC)
eCB = normalize(eCB)
cosABC = math.cos(a)
sinABC = math.sin(a)
cosABCD = math.cos(d)
sinABCD = math.sin(d)
eY = perp_unit(eDC, eCB)
eX = perp_unit(eY, eCB)
for xyz in range(3):
self.coordinates[xyz] = B[xyz] + r * (eX[xyz] * sinABC * cosABCD +
eY[xyz] * sinABC * sinABCD - eCB[xyz] * cosABC)
if math.fabs(self.coordinates[xyz]) < 1.E-14:
self.coordinates[xyz] = 0.0
self.computed = True
return self.coordinates
def everything(self):
CoordEntry.everything(self)
print('\nZMatrixEntry\n Type = %s\n\n' % (self.type()))
print(self.print_in_input_format())
|
rmcgibbo/psi4public
|
psi4/driver/qcdb/libmintscoordentry.py
|
Python
|
lgpl-3.0
| 24,567
|
[
"CFOUR",
"Psi4"
] |
9475f60bbaa04966f81ef0cf6db22c1855b7382ad972ab5a297eed32da48dc4f
|
#!/usr/bin/env python
"""This script creates a graph of the modeled volume time series
for both 20km and 10km grid calculations for SeaRISE-Greenland. This figure
appears in the "Getting Started" section of the User's Manual.
"""
from numpy import *
import pylab as plt
from sys import exit
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
NC = netCDF.Dataset
# generate "whole" time series this way:
# $ ncrcat ts_g20km_m5000a.nc ts_g20km_0.nc -o ts_g20km_whole.nc
# $ ncrcat ts_g10km_m5000a.nc ts_g10km_0.nc -o ts_g10km_whole.nc
# run this way:
# $ python ivolboth.py -o both.png ts_g20km_whole.nc ts_g10km_whole.nc
from optparse import OptionParser
parser = OptionParser()
parser.usage = "usage: %prog [options] FILE1 FILE2"
parser.description = "A script to show ivol time series from two files."
parser.add_option("-o", "--output_file", dest="outfile",
help="output file name",default='both.png')
(options, args) = parser.parse_args()
nts = len(args)
if (nts<2) | (nts>2):
print "needs exactly two input files ... EXITING"
exit(-1)
nc20km = NC(args[0], "r")
secpera = 31556926.0
t20km = nc20km.variables["time"][:]
t20km = t20km / secpera
ivol20km = nc20km.variables["ivol"][:]
nc20km.close()
nc10km = NC(args[1], "r")
t10km = nc10km.variables["time"][:]
t10km = t10km / secpera
ivol10km = nc10km.variables["ivol"][:]
nc10km.close()
vfactor = 1.0e6 * 1.0e9
plt.figure(figsize=(12,6))
plt.plot(t20km, ivol20km / vfactor, 'b', linewidth=2.0)
plt.hold(True)
plt.plot(t10km, ivol10km / vfactor, 'r', linewidth=2.5)
plt.hold(False)
plt.legend(('20 km','10 km'), loc='lower right')
plt.xlabel("t (years)", size=16)
plt.ylabel("volume ($10^6$ km$^3$)", size=16)
plt.grid(True)
#last=-10000.0
#t20km_last = t20km[t20km >= last]
#ivol20km_last = ivol20km[t20km >= last]
#t10km_last = t10km[t10km >= last]
#ivol10km_last = ivol10km[t10km >= last]
#axesinset = axes([0.55, 0.20, 0.33, 0.25],axisbg='w')
#plot(t20km_last,ivol20km_last / vfactor, 'b', linewidth=1.0), hold(True)
#plot(t10km_last,ivol10km_last / vfactor, 'r', linewidth=2.0), hold(False)
#setp(axesinset)
#setp(axesinset,xticks=arange(95.e3,101.e3,1.e3),
# xticklabels=('95','96','97','98','99','100'))
#plt.show()
plt.savefig(options.outfile)
|
JohannesFeldmann/pism
|
util/ivolboth.py
|
Python
|
gpl-2.0
| 2,273
|
[
"NetCDF"
] |
2ff2a200db4c55aa70a371de097c58404ee164bd9a79742b9d52ae7efffd799f
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The 'grit android2grd' tool."""
import getopt
import os.path
import StringIO
from xml.dom import Node
import xml.dom.minidom
import grit.node.empty
from grit.node import io
from grit.node import message
from grit.tool import interface
from grit import grd_reader
from grit import lazy_re
from grit import tclib
from grit import util
# The name of a string in strings.xml
_STRING_NAME = lazy_re.compile(r'[a-z0-9_]+\Z')
# A string's character limit in strings.xml
_CHAR_LIMIT = lazy_re.compile(r'\[CHAR-LIMIT=(\d+)\]')
# Finds String.Format() style format specifiers such as "%-5.2f".
_FORMAT_SPECIFIER = lazy_re.compile(
'%'
'([1-9][0-9]*\$|<)?' # argument_index
'([-#+ 0,(]*)' # flags
'([0-9]+)?' # width
'(\.[0-9]+)?' # precision
'([bBhHsScCdoxXeEfgGaAtT%n])') # conversion
class Android2Grd(interface.Tool):
"""Tool for converting Android string.xml files into chrome Grd files.
Usage: grit [global options] android2grd [OPTIONS] STRINGS_XML
The Android2Grd tool will convert an Android strings.xml file (whose path is
specified by STRINGS_XML) and create a chrome style grd file containing the
relevant information.
Because grd documents are much richer than strings.xml documents we supplement
the information required by grds using OPTIONS with sensible defaults.
OPTIONS may be any of the following:
--name FILENAME Specify the base FILENAME. This should be without
any file type suffix. By default
"chrome_android_strings" will be used.
--languages LANGUAGES Comma separated list of ISO language codes (e.g.
en-US, en-GB, ru, zh-CN). These codes will be used
to determine the names of resource and translations
files that will be declared by the output grd file.
--grd-dir GRD_DIR Specify where the resultant grd file
(FILENAME.grd) shoud be output. By default this
will be the present working directory.
--header-dir HEADER_DIR Specify the location of the directory where grit
generated C++ headers (whose name will be
FILENAME.h) will be placed. By default no
directory is specified.
--rc-dir RC_DIR Specify the directory where resource files will
be located. By default this is empty.
--xml-dir XML_DIR Specify the location of the Android app's resource
directory. Internationalized strings.xml files will
be placed under this directory. For each langauge
xx a values-xx/strings.xml file will be generated.
--xtb-dir XTB_DIR Specify where the output translation files will be
located.
"""
_NAME_FLAG = 'name'
_LANGUAGES_FLAG = 'languages'
_GRD_DIR_FLAG = 'grd-dir'
_RC_DIR_FLAG = 'rc-dir'
_HEADER_DIR_FLAG = 'header-dir'
_XTB_DIR_FLAG = 'xtb-dir'
_XML_DIR_FLAG = 'xml-dir'
def __init__(self):
self.name = 'chrome_android_strings'
self.languages = []
self.grd_dir = '.'
self.rc_dir = None
self.xtb_dir = '.'
self.xml_res_dir = None
self.header_dir = ''
def ShortDescription(self):
"""Returns a short description of the Android2Grd tool.
Overridden from grit.interface.Tool
Returns:
A string containing a short description of the android2grd tool.
"""
return 'Converts Android string.xml files into Chrome grd files.'
def ParseOptions(self, args):
"""Set this objects and return all non-option arguments."""
flags = [
Android2Grd._NAME_FLAG,
Android2Grd._LANGUAGES_FLAG,
Android2Grd._GRD_DIR_FLAG,
Android2Grd._RC_DIR_FLAG,
Android2Grd._HEADER_DIR_FLAG,
Android2Grd._XTB_DIR_FLAG,
Android2Grd._XML_DIR_FLAG, ]
(opts, args) = getopt.getopt(args, None, ['%s=' % o for o in flags])
for key, val in opts:
# Get rid of the preceding hypens.
k = key[2:]
if k == Android2Grd._NAME_FLAG:
self.name = val
elif k == Android2Grd._LANGUAGES_FLAG:
self.languages = val.split(',')
elif k == Android2Grd._GRD_DIR_FLAG:
self.grd_dir = val
elif k == Android2Grd._RC_DIR_FLAG:
self.rc_dir = val
elif k == Android2Grd._HEADER_DIR_FLAG:
self.header_dir = val
elif k == Android2Grd._XTB_DIR_FLAG:
self.xtb_dir = val
elif k == Android2Grd._XML_DIR_FLAG:
self.xml_res_dir = val
return args
def Run(self, opts, args):
"""Runs the Android2Grd tool.
Inherited from grit.interface.Tool.
Args:
opts: List of string arguments that should be parsed.
args: String containing the path of the strings.xml file to be converted.
"""
args = self.ParseOptions(args)
if len(args) != 1:
print ('Tool requires one argument, the path to the Android '
'strings.xml resource file to be converted.')
return 2
self.SetOptions(opts)
android_path = args[0]
# Read and parse the Android strings.xml file.
with open(android_path) as android_file:
android_dom = xml.dom.minidom.parse(android_file)
# Do the hard work -- convert the Android dom to grd file contents.
grd_dom = self.AndroidDomToGrdDom(android_dom)
grd_string = unicode(grd_dom)
# Write the grd string to a file in grd_dir.
grd_filename = self.name + '.grd'
grd_path = os.path.join(self.grd_dir, grd_filename)
with open(grd_path, 'w') as grd_file:
grd_file.write(grd_string)
def AndroidDomToGrdDom(self, android_dom):
"""Converts a strings.xml DOM into a DOM representing the contents of
a grd file.
Args:
android_dom: A xml.dom.Document containing the contents of the Android
string.xml document.
Returns:
The DOM for the grd xml document produced by converting the Android DOM.
"""
# Start with a basic skeleton for the .grd file.
root = grd_reader.Parse(StringIO.StringIO(
'''<?xml version="1.0" encoding="UTF-8"?>
<grit base_dir="." latest_public_release="0"
current_release="1" source_lang_id="en">
<release allow_pseudo="false" seq="1">
<messages fallback_to_english="true" />
</release>
<translations />
<outputs />
</grit>'''), dir='.')
messages = root.children[0].children[0]
translations = root.children[1]
outputs = root.children[2]
assert (isinstance(messages, grit.node.empty.MessagesNode) and
isinstance(translations, grit.node.empty.TranslationsNode) and
isinstance(outputs, grit.node.empty.OutputsNode))
if self.header_dir:
cpp_header = self.__CreateCppHeaderOutputNode(outputs, self.header_dir)
for lang in self.languages:
# Create an output element for each language.
if self.rc_dir:
self.__CreateRcOutputNode(outputs, lang, self.rc_dir)
if self.xml_res_dir:
self.__CreateAndroidXmlOutputNode(outputs, lang, self.xml_res_dir)
self.__CreateFileNode(translations, lang)
# Convert all the strings.xml strings into grd messages.
self.__CreateMessageNodes(messages, android_dom.documentElement)
return root
def __CreateMessageNodes(self, messages, resources):
"""Creates the <message> elements and adds them as children of <messages>.
Args:
messages: the <messages> element in the strings.xml dom.
resources: the <resources> element in the grd dom.
"""
# <string> elements contain the definition of the resource.
# The description of a <string> element is contained within the comment
# node element immediately preceeding the string element in question.
description = ''
for child in resources.childNodes:
if child.nodeType == Node.COMMENT_NODE:
# Remove leading/trailing whitespace; collapse consecutive whitespaces.
description = ' '.join(child.data.split())
elif child.nodeType == Node.ELEMENT_NODE:
if child.tagName != 'string':
print 'Warning: ignoring unknown tag <%s>' % child.tagName
elif self.IsTranslatable(child):
raw_name = child.getAttribute('name')
product = child.getAttribute('product') or None
grd_name = self.__FormatName(raw_name, product)
# Transform the <string> node contents into a tclib.Message, taking
# care to handle whitespace transformations and escaped characters,
# and coverting <xliff:g> placeholders into <ph> placeholders.
msg = self.CreateTclibMessage(child)
msg_node = self.__CreateMessageNode(messages, grd_name, description,
msg)
messages.AddChild(msg_node)
# Reset the description once a message has been parsed.
description = ''
def __FormatName(self, name, product=None):
"""Formats the message name.
Names in the strings.xml files should be lowercase with underscores. In grd
files message names should be mostly uppercase with a IDS prefix. We also
will annotate names with product information (lowercase) where appropriate.
Args:
name: The message name as found in the string.xml file.
product: An optional product annotation.
Returns:
String containing the grd style name that will be used in the translation
console.
"""
if not _STRING_NAME.match(name):
print 'Error: string name contains illegal characters: %s' % name
grd_name = 'IDS_%s' % name.upper()
product_suffix = ('_product_%s' % product.lower()) if product else ''
return grd_name + product_suffix
def CreateTclibMessage(self, android_string):
"""Transforms a <string/> element from strings.xml into a tclib.Message.
Interprets whitespace, quotes, and escaped characters in the android_string
according to Android's formatting and styling rules for strings. Also
converts <xliff:g> placeholders into <ph> placeholders, e.g.:
<xliff:g id="website" example="google.com">%s</xliff:g>
becomes
<ph name="website"><ex>google.com</ex>%s</ph>
Returns:
The tclib.Message.
"""
msg = tclib.Message()
current_text = '' # Accumulated text that hasn't yet been added to msg.
nodes = android_string.childNodes
for i, node in enumerate(nodes):
# Handle text nodes.
if node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
current_text += node.data
# Handle <xliff:g> and other tags.
elif node.nodeType == Node.ELEMENT_NODE:
if node.tagName == 'xliff:g':
assert node.hasAttribute('id'), 'missing id: ' + node.data()
placeholder_id = node.getAttribute('id')
placeholder_text = self.__FormatPlaceholderText(node)
placeholder_example = node.getAttribute('example')
if not placeholder_example:
print ('Info: placeholder does not contain an example: %s' %
node.toxml())
placeholder_example = placeholder_id.upper()
msg.AppendPlaceholder(tclib.Placeholder(placeholder_id,
placeholder_text, placeholder_example))
else:
print ('Warning: removing tag <%s> which must be inside a '
'placeholder: %s' % (node.tagName, node.toxml()))
msg.AppendText(self.__FormatPlaceholderText(node))
# Handle other nodes.
elif node.nodeType != Node.COMMENT_NODE:
assert False, 'Unknown node type: %s' % node.nodeType
is_last_node = (i == len(nodes) - 1)
if (current_text and
(is_last_node or nodes[i + 1].nodeType == Node.ELEMENT_NODE)):
# For messages containing just text and comments (no xml tags) Android
# strips leading and trailing whitespace. We mimic that behavior.
if not msg.GetContent() and is_last_node:
current_text = current_text.strip()
msg.AppendText(self.__FormatAndroidString(current_text))
current_text = ''
return msg
def __FormatAndroidString(self, android_string, inside_placeholder=False):
r"""Returns android_string formatted for a .grd file.
* Collapses consecutive whitespaces, except when inside double-quotes.
* Replaces \\, \n, \t, \", \' with \, newline, tab, ", '.
"""
backslash_map = {'\\' : '\\', 'n' : '\n', 't' : '\t', '"' : '"', "'" : "'"}
is_quoted_section = False # True when we're inside double quotes.
is_backslash_sequence = False # True after seeing an unescaped backslash.
prev_char = ''
output = []
for c in android_string:
if is_backslash_sequence:
# Unescape \\, \n, \t, \", and \'.
assert c in backslash_map, 'Illegal escape sequence: \\%s' % c
output.append(backslash_map[c])
is_backslash_sequence = False
elif c == '\\':
is_backslash_sequence = True
elif c.isspace() and not is_quoted_section:
# Turn whitespace into ' ' and collapse consecutive whitespaces.
if not prev_char.isspace():
output.append(' ')
elif c == '"':
is_quoted_section = not is_quoted_section
else:
output.append(c)
prev_char = c
output = ''.join(output)
if is_quoted_section:
print 'Warning: unbalanced quotes in string: %s' % android_string
if is_backslash_sequence:
print 'Warning: trailing backslash in string: %s' % android_string
# Check for format specifiers outside of placeholder tags.
if not inside_placeholder:
format_specifier = _FORMAT_SPECIFIER.search(output)
if format_specifier:
print ('Warning: format specifiers are not inside a placeholder '
'<xliff:g/> tag: %s' % output)
return output
def __FormatPlaceholderText(self, placeholder_node):
"""Returns the text inside of an <xliff:g> placeholder node."""
text = []
for childNode in placeholder_node.childNodes:
if childNode.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
text.append(childNode.data)
elif childNode.nodeType != Node.COMMENT_NODE:
assert False, 'Unknown node type in ' + placeholder_node.toxml()
return self.__FormatAndroidString(''.join(text), inside_placeholder=True)
def __CreateMessageNode(self, messages_node, grd_name, description, msg):
"""Creates and initializes a <message> element.
Message elements correspond to Android <string> elements in that they
declare a string resource along with a programmatic id.
"""
if not description:
print 'Warning: no description for %s' % grd_name
# Check that we actually fit within the character limit we've specified.
match = _CHAR_LIMIT.search(description)
if match:
char_limit = int(match.group(1))
msg_content = msg.GetRealContent()
if len(msg_content) > char_limit:
print ('Warning: char-limit for %s is %d, but length is %d: %s' %
(grd_name, char_limit, len(msg_content), msg_content))
return message.MessageNode.Construct(parent=messages_node,
name=grd_name,
message=msg,
desc=description,
translateable=True)
def __CreateFileNode(self, translations_node, lang):
"""Creates and initializes the <file> elements.
File elements provide information on the location of translation files
(xtbs)
"""
xtb_file = self.name + '_' + lang + '.xtb'
fnode = io.FileNode()
fnode.StartParsing(u'file', translations_node)
fnode.HandleAttribute('path', xtb_file)
fnode.HandleAttribute('lang', lang)
fnode.EndParsing()
translations_node.AddChild(fnode)
return fnode
def __CreateCppHeaderOutputNode(self, outputs_node, header_dir):
"""Creates the <output> element corresponding to the generated c header."""
header_file_name = os.path.join(header_dir, self.name + '.h')
header_node = io.OutputNode()
header_node.StartParsing(u'output', outputs_node)
header_node.HandleAttribute('filename', header_file_name)
header_node.HandleAttribute('type', 'rc_header')
emit_node = io.EmitNode()
emit_node.StartParsing(u'emit', header_node)
emit_node.HandleAttribute('emit_type', 'prepend')
emit_node.EndParsing()
header_node.AddChild(emit_node)
header_node.EndParsing()
outputs_node.AddChild(header_node)
return header_node
def __CreateRcOutputNode(self, outputs_node, lang, rc_dir):
"""Creates the <output> element corresponding to various rc file output."""
rc_file_name = self.name + '_' + lang + ".rc"
rc_path = os.path.join(rc_dir, rc_file_name)
node = io.OutputNode()
node.StartParsing(u'output', outputs_node)
node.HandleAttribute('filename', rc_path)
node.HandleAttribute('lang', lang)
node.HandleAttribute('type', 'rc_all')
node.EndParsing()
outputs_node.AddChild(node)
return node
def __CreateAndroidXmlOutputNode(self, outputs_node, locale, xml_res_dir):
"""Creates the <output> element corresponding to various rc file output."""
# Need to check to see if the locale has a region, e.g. the GB in en-GB.
# When a locale has a region Android expects the region to be prefixed
# with an 'r'. For example for en-GB Android expects a values-en-rGB
# directory. Also, Android expects nb, tl, in, iw, ji as the language
# codes for Norwegian, Tagalog/Filipino, Indonesian, Hebrew, and Yiddish:
# http://developer.android.com/reference/java/util/Locale.html
if locale == 'es-419':
android_locale = 'es-rUS'
else:
android_lang, dash, region = locale.partition('-')
lang_map = {'no': 'nb', 'fil': 'tl', 'id': 'in', 'he': 'iw', 'yi': 'ji'}
android_lang = lang_map.get(android_lang, android_lang)
android_locale = android_lang + ('-r' + region if region else '')
xml_file_name = "strings.xml"
xml_locale_path = os.path.join(xml_res_dir, 'values-%s' % android_locale)
xml_path = os.path.join(xml_locale_path, "strings.xml")
node = io.OutputNode()
node.StartParsing(u'output', outputs_node)
node.HandleAttribute('filename', xml_path)
node.HandleAttribute('lang', locale)
node.HandleAttribute('type', 'android')
node.EndParsing()
outputs_node.AddChild(node)
return node
def IsTranslatable(self, android_string):
"""Determines if a <string> element is a candidate for translation.
A <string> element is by default translatable unless otherwise marked.
"""
if android_string.hasAttribute('translatable'):
value = android_string.getAttribute('translatable').lower()
if value not in ('true', 'false'):
print 'Warning: translatable attribute has invalid value: %s' % value
return value == 'true'
else:
return True
|
leighpauls/k2cro4
|
tools/grit/grit/tool/android2grd.py
|
Python
|
bsd-3-clause
| 19,349
|
[
"xTB"
] |
86943706da2f7459066a73597b001f7ed600b55db09c35614cf57b8e56e39c9d
|
from __future__ import unicode_literals
import threading
from django.test.simple import DjangoTestSuiteRunner
from django.core.servers.basehttp import run, get_internal_wsgi_application
from django.contrib.staticfiles.handlers import StaticFilesHandler
from splinter.driver.webdriver.firefox import WebDriver
from mock import patch
BROWSER = None
class TestSuiteRunner(DjangoTestSuiteRunner):
def setup_test_environment(self, **kwargs):
super(TestSuiteRunner, self).setup_test_environment(**kwargs)
handler = get_internal_wsgi_application()
handler = StaticFilesHandler(handler)
def start_server():
with patch('django.core.servers.basehttp.WSGIRequestHandler.log_message'):
run('0.0.0.0', 65432, handler, ipv6=False, threading=False)
thread = threading.Thread(target=start_server)
thread.daemon = True
thread.start()
global BROWSER
BROWSER = SingleVisitFirefoxDriver()
def teardown_test_environment(self, **kwargs):
BROWSER.quit()
super(TestSuiteRunner, self).setup_test_environment(**kwargs)
class SingleVisitFirefoxDriver(WebDriver):
def visit(self, url):
self.driver.get(url)
|
ErinCall/splinter_demo
|
django/splinter_demo/test_runner.py
|
Python
|
mit
| 1,225
|
[
"VisIt"
] |
4eb476d10621a4ed8957499ce9c8edbca339d6b3e30a66035a76f8530bd1501f
|
#!/usr/bin/env python
import vtk
def main():
xyzFile, qFile = get_program_parameters()
colors = vtk.vtkNamedColors()
# Create the RenderWindow, Renderer and Interactor.
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Create the pipeline.
#
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName(xyzFile)
pl3d.SetQFileName(qFile)
pl3d.SetScalarFunctionNumber(100)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
iso = vtk.vtkContourFilter()
iso.SetInputData(pl3d.GetOutput().GetBlock(0))
iso.SetValue(0, 0.38)
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(iso.GetOutputPort())
normals.SetFeatureAngle(45)
isoMapper = vtk.vtkPolyDataMapper()
isoMapper.SetInputConnection(normals.GetOutputPort())
isoMapper.ScalarVisibilityOff()
isoActor = vtk.vtkActor()
isoActor.SetMapper(isoMapper)
isoActor.GetProperty().SetColor(colors.GetColor3d("bisque"))
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputConnection(pl3d.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
# Add the actors to the renderer, set the background and size.
#
ren1.AddActor(outlineActor)
ren1.AddActor(isoActor)
ren1.SetBackground(colors.GetColor3d("SlateGray"))
renWin.SetSize(640, 480)
ren1.GetActiveCamera().SetFocalPoint(9.71821, 0.458166, 29.3999)
ren1.GetActiveCamera().SetPosition(2.7439, -37.3196, 38.7167)
ren1.GetActiveCamera().SetViewUp(-0.16123, 0.264271, 0.950876)
ren1.ResetCameraClippingRange()
# Render the image.
#
renWin.Render()
iren.Start()
def get_program_parameters():
import argparse
description = 'Marching cubes surface of flow density.'
epilogue = '''
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('filename1', help='combxyz.bin.')
parser.add_argument('filename2', help='combq.bin.')
args = parser.parse_args()
return args.filename1, args.filename2
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/VisualizationAlgorithms/CombustorIsosurface.py
|
Python
|
apache-2.0
| 2,448
|
[
"VTK"
] |
0916f206f66be5fbad390089fdbc1283bc9c6026a2b4866e4e8e0e26d31edd6c
|
# Copyright 2017 the GPflow authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Collection, Optional
import numpy as np
import pytest
import tensorflow as tf
from numpy.testing import assert_allclose
import gpflow
from gpflow.base import AnyNDArray, MeanAndVariance
from gpflow.conditionals import conditional, uncertain_conditional
from gpflow.config import default_float
from gpflow.mean_functions import Constant, Linear, MeanFunction, Zero
from gpflow.quadrature import mvnquad
from gpflow.utilities import training_loop
rng = np.random.RandomState(1)
# ------------------------------------------
# Helpers
# ------------------------------------------
class MomentMatchingSVGP(gpflow.models.SVGP):
def uncertain_predict_f_moment_matching(
self, Xmu: tf.Tensor, Xcov: tf.Tensor
) -> MeanAndVariance:
return uncertain_conditional(
Xmu,
Xcov,
self.inducing_variable,
self.kernel,
self.q_mu,
self.q_sqrt,
mean_function=self.mean_function,
white=self.whiten,
full_output_cov=self.full_output_cov,
)
def uncertain_predict_f_monte_carlo(
self, Xmu: tf.Tensor, Xchol: tf.Tensor, mc_iter: int = int(1e6)
) -> MeanAndVariance:
D_in = Xchol.shape[0]
X_samples = Xmu + np.reshape(
Xchol[None, :, :] @ rng.randn(mc_iter, D_in)[:, :, None], [mc_iter, D_in]
)
F_mu, F_var = self.predict_f(X_samples)
F_samples = (F_mu + rng.randn(*F_var.shape) * (F_var ** 0.5)).numpy()
mean = np.mean(F_samples, axis=0)
covar = np.cov(F_samples.T)
return mean, covar
def gen_L(n: int, *shape: int) -> AnyNDArray:
return np.array([np.tril(rng.randn(*shape)) for _ in range(n)])
def gen_q_sqrt(D_out: int, *shape: int) -> tf.Tensor:
return tf.convert_to_tensor(
np.array([np.tril(rng.randn(*shape)) for _ in range(D_out)]),
dtype=default_float(),
)
def mean_function_factory(
mean_function_name: Optional[str], D_in: int, D_out: int
) -> Optional[MeanFunction]:
if mean_function_name == "Zero":
return Zero(output_dim=D_out)
elif mean_function_name == "Constant":
return Constant(c=rng.rand(D_out))
elif mean_function_name == "Linear":
return Linear(A=rng.rand(D_in, D_out), b=rng.rand(D_out))
else:
return None
# ------------------------------------------
# Data classes: storing constants
# ------------------------------------------
class Data:
N = 7
N_new = 2
D_out = 3
D_in = 1
X = np.linspace(-5, 5, N)[:, None] + rng.randn(N, 1)
Y = np.hstack([np.sin(X), np.cos(X), X ** 2])
Xnew_mu = rng.randn(N_new, 1)
Xnew_covar = np.zeros((N_new, 1, 1))
data = (X, Y)
class DataMC1(Data):
Y = np.hstack([np.sin(Data.X), np.sin(Data.X) * 2, Data.X ** 2])
data = (Data.X, Y)
class DataMC2(Data):
N = 7
N_new = 5
D_out = 4
D_in = 2
X = rng.randn(N, D_in)
Y = np.hstack([np.sin(X), np.sin(X)])
Xnew_mu = rng.randn(N_new, D_in)
L = gen_L(N_new, D_in, D_in)
Xnew_covar = np.array([l @ l.T for l in L])
data = (X, Y)
class DataQuad:
num_data = 10
num_ind = 10
D_in = 2
D_out = 3
H = 150
Xmu = tf.convert_to_tensor(rng.randn(num_data, D_in), dtype=default_float())
L = gen_L(num_data, D_in, D_in)
Xvar = tf.convert_to_tensor(np.array([l @ l.T for l in L]), dtype=default_float())
Z = rng.randn(num_ind, D_in)
q_mu = tf.convert_to_tensor(rng.randn(num_ind, D_out), dtype=default_float())
q_sqrt = gen_q_sqrt(D_out, num_ind, num_ind)
MEANS: Collection[Optional[str]] = ["Constant", "Linear", "Zero", None]
@pytest.mark.parametrize("white", [True, False])
@pytest.mark.parametrize("mean", MEANS)
def test_no_uncertainty(white: bool, mean: Optional[str]) -> None:
mean_function = mean_function_factory(mean, Data.D_in, Data.D_out)
kernel = gpflow.kernels.SquaredExponential(variance=rng.rand())
model = MomentMatchingSVGP(
kernel,
gpflow.likelihoods.Gaussian(),
num_latent_gps=Data.D_out,
mean_function=mean_function,
inducing_variable=Data.X.copy(),
whiten=white,
)
model.full_output_cov = False
training_loop(
model.training_loss_closure(Data.data),
optimizer=tf.optimizers.Adam(),
var_list=model.trainable_variables,
maxiter=100,
compile=True,
)
mean1, var1 = model.predict_f(Data.Xnew_mu)
mean2, var2 = model.uncertain_predict_f_moment_matching(
*map(tf.convert_to_tensor, [Data.Xnew_mu, Data.Xnew_covar])
)
assert_allclose(mean1, mean2)
for n in range(Data.N_new):
assert_allclose(var1[n, :], var2[n, ...])
@pytest.mark.parametrize("white", [True, False])
@pytest.mark.parametrize("mean", MEANS)
def test_monte_carlo_1_din(white: bool, mean: Optional[str]) -> None:
kernel = gpflow.kernels.SquaredExponential(variance=rng.rand())
mean_function = mean_function_factory(mean, DataMC1.D_in, DataMC1.D_out)
model = MomentMatchingSVGP(
kernel,
gpflow.likelihoods.Gaussian(),
num_latent_gps=DataMC1.D_out,
mean_function=mean_function,
inducing_variable=DataMC1.X.copy(),
whiten=white,
)
model.full_output_cov = True
training_loop(
model.training_loss_closure(DataMC1.data),
optimizer=tf.optimizers.Adam(),
var_list=model.trainable_variables,
maxiter=200,
compile=True,
)
mean1, var1 = model.uncertain_predict_f_moment_matching(
*map(tf.convert_to_tensor, [DataMC1.Xnew_mu, DataMC1.Xnew_covar])
)
for n in range(DataMC1.N_new):
mean2, var2 = model.uncertain_predict_f_monte_carlo(
DataMC1.Xnew_mu[n, ...], DataMC1.Xnew_covar[n, ...] ** 0.5
)
assert_allclose(mean1[n, ...], mean2, atol=1e-3, rtol=1e-1)
assert_allclose(var1[n, ...], var2, atol=1e-2, rtol=1e-1)
@pytest.mark.parametrize("white", [True, False])
@pytest.mark.parametrize("mean", MEANS)
def test_monte_carlo_2_din(white: bool, mean: Optional[str]) -> None:
kernel = gpflow.kernels.SquaredExponential(variance=rng.rand())
mean_function = mean_function_factory(mean, DataMC2.D_in, DataMC2.D_out)
model = MomentMatchingSVGP(
kernel,
gpflow.likelihoods.Gaussian(),
num_latent_gps=DataMC2.D_out,
mean_function=mean_function,
inducing_variable=DataMC2.X.copy(),
whiten=white,
)
model.full_output_cov = True
training_loop(
model.training_loss_closure(DataMC2.data),
optimizer=tf.optimizers.Adam(),
var_list=model.trainable_variables,
maxiter=100,
compile=True,
)
mean1, var1 = model.uncertain_predict_f_moment_matching(
*map(tf.convert_to_tensor, [DataMC2.Xnew_mu, DataMC2.Xnew_covar])
)
for n in range(DataMC2.N_new):
mean2, var2 = model.uncertain_predict_f_monte_carlo(
DataMC2.Xnew_mu[n, ...], DataMC2.L[n, ...]
)
assert_allclose(mean1[n, ...], mean2, atol=1e-2)
assert_allclose(var1[n, ...], var2, atol=1e-2)
@pytest.mark.parametrize("white", [True, False])
@pytest.mark.parametrize("mean", MEANS)
def test_quadrature(white: bool, mean: Optional[str]) -> None:
kernel = gpflow.kernels.SquaredExponential()
inducing_variable = gpflow.inducing_variables.InducingPoints(DataQuad.Z)
mean_function = mean_function_factory(mean, DataQuad.D_in, DataQuad.D_out)
effective_mean = mean_function or (lambda X: 0.0)
def conditional_fn(X: tf.Tensor) -> MeanAndVariance:
return conditional(
X,
inducing_variable,
kernel,
DataQuad.q_mu,
q_sqrt=DataQuad.q_sqrt,
white=white,
)
def mean_fn(X: tf.Tensor) -> tf.Tensor:
return conditional_fn(X)[0] + effective_mean(X)
def var_fn(X: tf.Tensor) -> tf.Tensor:
return conditional_fn(X)[1]
quad_args = (
DataQuad.Xmu,
DataQuad.Xvar,
DataQuad.H,
DataQuad.D_in,
(DataQuad.D_out,),
)
mean_quad = mvnquad(mean_fn, *quad_args)
var_quad = mvnquad(var_fn, *quad_args)
def mean_sq_fn(X: tf.Tensor) -> tf.Tensor:
return mean_fn(X) ** 2
mean_sq_quad = mvnquad(mean_sq_fn, *quad_args)
var_quad = var_quad + (mean_sq_quad - mean_quad ** 2)
mean_analytic, var_analytic = uncertain_conditional(
DataQuad.Xmu,
DataQuad.Xvar,
inducing_variable,
kernel,
DataQuad.q_mu,
DataQuad.q_sqrt,
mean_function=mean_function,
full_output_cov=False,
white=white,
)
assert_allclose(mean_quad, mean_analytic, rtol=1e-6)
assert_allclose(var_quad, var_analytic, rtol=1e-6)
|
GPflow/GPflow
|
tests/gpflow/conditionals/test_uncertain_conditional.py
|
Python
|
apache-2.0
| 9,395
|
[
"Gaussian"
] |
f3951c96eea69548e9237565d8addd80be42fa1461faffc1d246c4a411bf66f3
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2013 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
""" Wizard for optical pre-sale"""
import gtk
import string
from kiwi.ui.objectlist import Column
from kiwi.datatypes import ValidationError
from stoqlib.domain.person import Person
from stoqlib.domain.workorder import WorkOrder, WorkOrderItem
from stoqlib.gui.dialogs.batchselectiondialog import BatchDecreaseSelectionDialog
from stoqlib.gui.utils.printing import print_report
from stoqlib.gui.wizards.personwizard import (PersonRoleWizard,
PersonRoleTypeStep,
RoleEditorStep)
from stoqlib.gui.wizards.workorderquotewizard import (WorkOrderQuoteWizard,
WorkOrderQuoteStartStep,
WorkOrderQuoteWorkOrderStep,
WorkOrderQuoteItemStep)
from stoqlib.lib.message import yesno
from stoqlib.lib.translation import stoqlib_gettext as _
from .opticaldomain import OpticalWorkOrder, OpticalMedic, OpticalProduct
from .opticalslave import WorkOrderOpticalSlave
from .opticalreport import OpticalWorkOrderReceiptReport
class OpticalStartSaleQuoteStep(WorkOrderQuoteStartStep):
"""First step of the pre-sale for optical stores.
This is just like the first step of the regular pre-sale, but it has a
different next step.
"""
#
# WorkOrderQuoteStartStep
#
def next_step(self):
self.wizard.workorders = []
return OpticalWorkOrderStep(
self.store, self.wizard, self, self.model)
class OpticalWorkOrderStep(WorkOrderQuoteWorkOrderStep):
"""Second step of the pre-sale for optical stores.
In this step, the sales person will create the workorders required for this
sale (one for each spectacles)
"""
#
# WorkOrderQuoteWorkOrderStep
#
def __init__(self, store, wizard, previous, model):
self._current_work_order = 0
WorkOrderQuoteWorkOrderStep.__init__(self, store, wizard, previous, model)
def next_step(self):
return OpticalItemStep(self.wizard, self, self.store, self.model)
def get_work_order_slave(self, work_order):
desc = unicode(string.ascii_uppercase[self._current_work_order])
self._current_work_order += 1
return WorkOrderOpticalSlave(self.store, work_order,
show_finish_date=True,
description=desc)
class OpticalItemStep(WorkOrderQuoteItemStep):
"""Third step of the optical pre-sale.
Besides using the <stoqlib.gui.wizards.abstractwizard.SellableItemSlave> to
add items to the sale, this step has a widget on the top to let the user
choose on what work order he is adding the items.
If the sale has more than 4 work orders, then the widget will be a combo
box. Otherwise, there will be up to 3 radio buttons for the user to choose
the work order.
"""
batch_selection_dialog = BatchDecreaseSelectionDialog
allow_no_batch = True
#
# WorkOrderQuoteItemStep
#
def get_order_item(self, sellable, price, quantity, batch=None):
sale_item = super(OpticalItemStep, self).get_order_item(
sellable, price, quantity, batch=batch)
self._setup_patient(sale_item)
wo_item = WorkOrderItem.get_from_sale_item(self.store, sale_item)
# Now we must remove the products added to the workorders from the
# stock and we can associate the category selected to the workorders
storable = sale_item.sellable.product_storable
if not storable:
return sale_item
optical_product = OpticalProduct.get_from_product(storable.product)
if optical_product:
auto_reserve = optical_product.auto_reserve
else:
auto_reserve = True
if sale_item.batch is not None:
balance = sale_item.batch.get_balance_for_branch(
sale_item.sale.branch)
else:
balance = storable.get_balance_for_branch(
sale_item.sale.branch)
if auto_reserve:
quantity_to_reserve = min(balance, sale_item.quantity)
if quantity_to_reserve:
sale_item.reserve(quantity_to_reserve)
wo_item.quantity_decreased = sale_item.quantity_decreased
return sale_item
def get_saved_items(self):
for item in super(OpticalItemStep, self).get_saved_items():
self._setup_patient(item)
yield item
def get_extra_columns(self):
return [Column('_patient', title=_(u'Owner'), data_type=str)]
def setup_work_order(self, work_order):
optical_wo = self.store.find(
OpticalWorkOrder, work_order=work_order).one()
work_order.description = _('Work order for %s') % optical_wo.patient
work_order.estimated_start = work_order.estimated_finish
#
# Private
#
def _setup_patient(self, sale_item):
wo_item = WorkOrderItem.get_from_sale_item(self.store, sale_item)
optical_wo = self.store.find(
OpticalWorkOrder, work_order=wo_item.order).one()
sale_item._patient = optical_wo.patient
class OpticalSaleQuoteWizard(WorkOrderQuoteWizard):
"""Wizard for optical pre-sales.
This is similar to the regular pre-sale, but has an additional step to
create some workorders, and the item step is changed a little bit, to allow
the sales person to select in what work order the item should be added to.
"""
#
# WorkOrderQuoteWizard
#
def get_first_step(self, store, model):
return OpticalStartSaleQuoteStep(store, self, model)
def print_quote_details(self, model, payments_created=False):
msg = _('Would you like to print the quote details now?')
# We can only print the details if the quote was confirmed.
if yesno(msg, gtk.RESPONSE_YES,
_("Print quote details"), _("Don't print")):
orders = WorkOrder.find_by_sale(self.model.store, self.model)
print_report(OpticalWorkOrderReceiptReport, list(orders))
class MedicRoleTypeStep(PersonRoleTypeStep):
def _setup_widgets(self):
self.document_label.set_text('CRM')
label = _('What kind of %s are you adding?')
role_name = self.wizard.get_role_name().lower()
self.person_role_label.set_text(label % role_name)
self.person_role_label.set_size('large')
self.person_role_label.set_bold(True)
self.register_validate_function(self.wizard.refresh_next)
def next_step(self):
from stoqlib.domain.person import Individual, Company
if self.individual_check.get_active():
role_type = Person.ROLE_INDIVIDUAL
else:
role_type = Person.ROLE_COMPANY
if self.person_document.is_empty():
return RoleEditorStep(self.wizard, self.store, self, role_type)
person = OpticalMedic.get_person_by_crm(self.store, self.model.person_document)
if person:
role = person.has_individual_or_company_facets()
if isinstance(role, Individual):
role_type = Person.ROLE_INDIVIDUAL
elif isinstance(role, Company):
role_type = Person.ROLE_COMPANY
return RoleEditorStep(self.wizard, self.store, self, role_type, person,
document=self.model.person_document)
def on_person_document__validate(self, entry, value):
if value.startswith('0'):
return ValidationError(_("CRM can't start with zeros"))
def on_individual_check__toggled(self, *args):
# Overriding the method.
# CPF/CNPJ fields are no longer used to search an existing medic.
pass
class MedicRoleWizard(PersonRoleWizard):
def get_first_step(self, store):
return MedicRoleTypeStep(self, store)
|
tiagocardosos/stoq
|
plugins/optical/opticalwizard.py
|
Python
|
gpl-2.0
| 8,853
|
[
"VisIt"
] |
11183b3563fa15717914e76d88402ed1545bd49be666cb7a54d1689fce88e840
|
# (c) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible import utils
import urllib2
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.utils.unicode import to_unicode
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
if isinstance(terms, basestring):
terms = [ terms ]
validate_certs = kwargs.get('validate_certs', True)
ret = []
for term in terms:
try:
response = open_url(term, validate_certs=validate_certs)
except urllib2.URLError as e:
utils.warning("Failed lookup url for %s : %s" % (term, str(e)))
continue
except urllib2.HTTPError as e:
utils.warning("Received HTTP error for %s : %s" % (term, str(e)))
continue
except SSLValidationError as e:
utils.warning("Error validating the server's certificate for %s: %s" % (term, str(e)))
continue
except ConnectionError as e:
utils.warning("Error connecting to %s: %s" % (term, str(e)))
continue
for line in response.read().splitlines():
ret.append(to_unicode(line))
return ret
|
mattbernst/polyhartree
|
support/ansible/runner/lookup_plugins/url.py
|
Python
|
gpl-3.0
| 2,108
|
[
"Brian"
] |
13948149c0091e0fb8bce8392f73bebd00582c27c8122ad156fe748acd7dac7c
|
"""
Views related to the Custom Courses feature.
"""
import csv
import datetime
import functools
import json
import logging
from copy import deepcopy
from cStringIO import StringIO
import pytz
from ccx_keys.locator import CCXLocator
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import transaction
from django.http import Http404, HttpResponse, HttpResponseForbidden
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import ensure_csrf_cookie
from opaque_keys.edx.keys import CourseKey
from courseware.access import has_access
from courseware.courses import get_course_by_id
from courseware.field_overrides import disable_overrides
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR, assign_role
from django_comment_common.utils import seed_permissions_roles
from edxmako.shortcuts import render_to_response
from lms.djangoapps.ccx.models import CustomCourseForEdX
from lms.djangoapps.ccx.overrides import (
bulk_delete_ccx_override_fields,
clear_ccx_field_info_from_ccx_map,
get_override_for_ccx,
override_field_for_ccx
)
from lms.djangoapps.ccx.utils import (
add_master_course_staff_to_ccx,
assign_staff_role_to_ccx,
ccx_course,
ccx_students_enrolling_center,
get_ccx_by_ccx_id,
get_ccx_creation_dict,
get_ccx_for_coach,
get_date,
parse_date,
)
from lms.djangoapps.grades.course_grade_factory import CourseGradeFactory
from lms.djangoapps.instructor.enrollment import enroll_email, get_email_params
from lms.djangoapps.instructor.views.api import _split_input_list
from lms.djangoapps.instructor.views.gradebook_api import get_grade_book_page
from student.models import CourseEnrollment
from student.roles import CourseCcxCoachRole
from xmodule.modulestore.django import SignalHandler
log = logging.getLogger(__name__)
TODAY = datetime.datetime.today # for patching in tests
def coach_dashboard(view):
"""
View decorator which enforces that the user have the CCX coach role on the
given course and goes ahead and translates the course_id from the Django
route into a course object.
"""
@functools.wraps(view)
def wrapper(request, course_id):
"""
Wraps the view function, performing access check, loading the course,
and modifying the view's call signature.
"""
course_key = CourseKey.from_string(course_id)
ccx = None
if isinstance(course_key, CCXLocator):
ccx_id = course_key.ccx
try:
ccx = CustomCourseForEdX.objects.get(pk=ccx_id)
except CustomCourseForEdX.DoesNotExist:
raise Http404
if ccx:
course_key = ccx.course_id
course = get_course_by_id(course_key, depth=None)
if not course.enable_ccx:
raise Http404
else:
is_staff = has_access(request.user, 'staff', course)
is_instructor = has_access(request.user, 'instructor', course)
if is_staff or is_instructor:
# if user is staff or instructor then he can view ccx coach dashboard.
return view(request, course, ccx)
else:
# if there is a ccx, we must validate that it is the ccx for this coach
role = CourseCcxCoachRole(course_key)
if not role.has_user(request.user):
return HttpResponseForbidden(_('You must be a CCX Coach to access this view.'))
elif ccx is not None:
coach_ccx = get_ccx_by_ccx_id(course, request.user, ccx.id)
if coach_ccx is None:
return HttpResponseForbidden(
_('You must be the coach for this ccx to access this view')
)
return view(request, course, ccx)
return wrapper
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def dashboard(request, course, ccx=None):
"""
Display the CCX Coach Dashboard.
"""
# right now, we can only have one ccx per user and course
# so, if no ccx is passed in, we can sefely redirect to that
if ccx is None:
ccx = get_ccx_for_coach(course, request.user)
if ccx:
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(course.id, unicode(ccx.id))}
)
return redirect(url)
context = {
'course': course,
'ccx': ccx,
}
context.update(get_ccx_creation_dict(course))
if ccx:
ccx_locator = CCXLocator.from_course_locator(course.id, unicode(ccx.id))
# At this point we are done with verification that current user is ccx coach.
assign_staff_role_to_ccx(ccx_locator, request.user, course.id)
schedule = get_ccx_schedule(course, ccx)
grading_policy = get_override_for_ccx(
ccx, course, 'grading_policy', course.grading_policy)
context['schedule'] = json.dumps(schedule, indent=4)
context['save_url'] = reverse(
'save_ccx', kwargs={'course_id': ccx_locator})
context['ccx_members'] = CourseEnrollment.objects.filter(course_id=ccx_locator, is_active=True)
context['gradebook_url'] = reverse(
'ccx_gradebook', kwargs={'course_id': ccx_locator})
context['grades_csv_url'] = reverse(
'ccx_grades_csv', kwargs={'course_id': ccx_locator})
context['grading_policy'] = json.dumps(grading_policy, indent=4)
context['grading_policy_url'] = reverse(
'ccx_set_grading_policy', kwargs={'course_id': ccx_locator})
with ccx_course(ccx_locator) as course:
context['course'] = course
else:
context['create_ccx_url'] = reverse(
'create_ccx', kwargs={'course_id': course.id})
return render_to_response('ccx/coach_dashboard.html', context)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def create_ccx(request, course, ccx=None):
"""
Create a new CCX
"""
name = request.POST.get('name')
if hasattr(course, 'ccx_connector') and course.ccx_connector:
# if ccx connector url is set in course settings then inform user that he can
# only create ccx by using ccx connector url.
context = get_ccx_creation_dict(course)
messages.error(request, context['use_ccx_con_error_message'])
return render_to_response('ccx/coach_dashboard.html', context)
# prevent CCX objects from being created for deprecated course ids.
if course.id.deprecated:
messages.error(request, _(
"You cannot create a CCX from a course using a deprecated id. "
"Please create a rerun of this course in the studio to allow "
"this action."))
url = reverse('ccx_coach_dashboard', kwargs={'course_id': course.id})
return redirect(url)
ccx = CustomCourseForEdX(
course_id=course.id,
coach=request.user,
display_name=name)
ccx.save()
# Make sure start/due are overridden for entire course
start = TODAY().replace(tzinfo=pytz.UTC)
override_field_for_ccx(ccx, course, 'start', start)
override_field_for_ccx(ccx, course, 'due', None)
# Enforce a static limit for the maximum amount of students that can be enrolled
override_field_for_ccx(ccx, course, 'max_student_enrollments_allowed', settings.CCX_MAX_STUDENTS_ALLOWED)
# Hide anything that can show up in the schedule
hidden = 'visible_to_staff_only'
for chapter in course.get_children():
override_field_for_ccx(ccx, chapter, hidden, True)
for sequential in chapter.get_children():
override_field_for_ccx(ccx, sequential, hidden, True)
for vertical in sequential.get_children():
override_field_for_ccx(ccx, vertical, hidden, True)
ccx_id = CCXLocator.from_course_locator(course.id, unicode(ccx.id))
# Create forum roles
seed_permissions_roles(ccx_id)
# Assign administrator forum role to CCX coach
assign_role(ccx_id, request.user, FORUM_ROLE_ADMINISTRATOR)
url = reverse('ccx_coach_dashboard', kwargs={'course_id': ccx_id})
# Enroll the coach in the course
email_params = get_email_params(course, auto_enroll=True, course_key=ccx_id, display_name=ccx.display_name)
enroll_email(
course_id=ccx_id,
student_email=request.user.email,
auto_enroll=True,
email_students=True,
email_params=email_params,
)
assign_staff_role_to_ccx(ccx_id, request.user, course.id)
add_master_course_staff_to_ccx(course, ccx_id, ccx.display_name)
# using CCX object as sender here.
responses = SignalHandler.course_published.send(
sender=ccx,
course_key=CCXLocator.from_course_locator(course.id, unicode(ccx.id))
)
for rec, response in responses:
log.info('Signal fired when course is published. Receiver: %s. Response: %s', rec, response)
return redirect(url)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def save_ccx(request, course, ccx=None):
"""
Save changes to CCX.
"""
if not ccx:
raise Http404
def override_fields(parent, data, graded, earliest=None, ccx_ids_to_delete=None):
"""
Recursively apply CCX schedule data to CCX by overriding the
`visible_to_staff_only`, `start` and `due` fields for units in the
course.
"""
if ccx_ids_to_delete is None:
ccx_ids_to_delete = []
blocks = {
str(child.location): child
for child in parent.get_children()}
for unit in data:
block = blocks[unit['location']]
override_field_for_ccx(
ccx, block, 'visible_to_staff_only', unit['hidden'])
start = parse_date(unit['start'])
if start:
if not earliest or start < earliest:
earliest = start
override_field_for_ccx(ccx, block, 'start', start)
else:
ccx_ids_to_delete.append(get_override_for_ccx(ccx, block, 'start_id'))
clear_ccx_field_info_from_ccx_map(ccx, block, 'start')
# Only subsection (aka sequential) and unit (aka vertical) have due dates.
if 'due' in unit: # checking that the key (due) exist in dict (unit).
due = parse_date(unit['due'])
if due:
override_field_for_ccx(ccx, block, 'due', due)
else:
ccx_ids_to_delete.append(get_override_for_ccx(ccx, block, 'due_id'))
clear_ccx_field_info_from_ccx_map(ccx, block, 'due')
else:
# In case of section aka chapter we do not have due date.
ccx_ids_to_delete.append(get_override_for_ccx(ccx, block, 'due_id'))
clear_ccx_field_info_from_ccx_map(ccx, block, 'due')
if not unit['hidden'] and block.graded:
graded[block.format] = graded.get(block.format, 0) + 1
children = unit.get('children', None)
# For a vertical, override start and due dates of all its problems.
if unit.get('category', None) == u'vertical':
for component in block.get_children():
# override start and due date of problem (Copy dates of vertical into problems)
if start:
override_field_for_ccx(ccx, component, 'start', start)
if due:
override_field_for_ccx(ccx, component, 'due', due)
if children:
override_fields(block, children, graded, earliest, ccx_ids_to_delete)
return earliest, ccx_ids_to_delete
graded = {}
earliest, ccx_ids_to_delete = override_fields(course, json.loads(request.body), graded, [])
bulk_delete_ccx_override_fields(ccx, ccx_ids_to_delete)
if earliest:
override_field_for_ccx(ccx, course, 'start', earliest)
# Attempt to automatically adjust grading policy
changed = False
policy = get_override_for_ccx(
ccx, course, 'grading_policy', course.grading_policy
)
policy = deepcopy(policy)
grader = policy['GRADER']
for section in grader:
count = graded.get(section.get('type'), 0)
if count < section.get('min_count', 0):
changed = True
section['min_count'] = count
if changed:
override_field_for_ccx(ccx, course, 'grading_policy', policy)
# using CCX object as sender here.
responses = SignalHandler.course_published.send(
sender=ccx,
course_key=CCXLocator.from_course_locator(course.id, unicode(ccx.id))
)
for rec, response in responses:
log.info('Signal fired when course is published. Receiver: %s. Response: %s', rec, response)
return HttpResponse(
json.dumps({
'schedule': get_ccx_schedule(course, ccx),
'grading_policy': json.dumps(policy, indent=4)}),
content_type='application/json',
)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def set_grading_policy(request, course, ccx=None):
"""
Set grading policy for the CCX.
"""
if not ccx:
raise Http404
override_field_for_ccx(
ccx, course, 'grading_policy', json.loads(request.POST['policy']))
# using CCX object as sender here.
responses = SignalHandler.course_published.send(
sender=ccx,
course_key=CCXLocator.from_course_locator(course.id, unicode(ccx.id))
)
for rec, response in responses:
log.info('Signal fired when course is published. Receiver: %s. Response: %s', rec, response)
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(course.id, unicode(ccx.id))}
)
return redirect(url)
def get_ccx_schedule(course, ccx):
"""
Generate a JSON serializable CCX schedule.
"""
def visit(node, depth=1):
"""
Recursive generator function which yields CCX schedule nodes.
We convert dates to string to get them ready for use by the js date
widgets, which use text inputs.
Visits students visible nodes only; nodes children of hidden ones
are skipped as well.
Dates:
Only start date is applicable to a section. If ccx coach did not override start date then
getting it from the master course.
Both start and due dates are applicable to a subsection (aka sequential). If ccx coach did not override
these dates then getting these dates from corresponding subsection in master course.
Unit inherits start date and due date from its subsection. If ccx coach did not override these dates
then getting them from corresponding subsection in master course.
"""
for child in node.get_children():
# in case the children are visible to staff only, skip them
if child.visible_to_staff_only:
continue
hidden = get_override_for_ccx(
ccx, child, 'visible_to_staff_only',
child.visible_to_staff_only)
start = get_date(ccx, child, 'start')
if depth > 1:
# Subsection has both start and due dates and unit inherit dates from their subsections
if depth == 2:
due = get_date(ccx, child, 'due')
elif depth == 3:
# Get start and due date of subsection in case unit has not override dates.
due = get_date(ccx, child, 'due', node)
start = get_date(ccx, child, 'start', node)
visited = {
'location': str(child.location),
'display_name': child.display_name,
'category': child.category,
'start': start,
'due': due,
'hidden': hidden,
}
else:
visited = {
'location': str(child.location),
'display_name': child.display_name,
'category': child.category,
'start': start,
'hidden': hidden,
}
if depth < 3:
children = tuple(visit(child, depth + 1))
if children:
visited['children'] = children
yield visited
else:
yield visited
with disable_overrides():
return tuple(visit(course))
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_schedule(request, course, ccx=None): # pylint: disable=unused-argument
"""
get json representation of ccx schedule
"""
if not ccx:
raise Http404
schedule = get_ccx_schedule(course, ccx)
json_schedule = json.dumps(schedule, indent=4)
return HttpResponse(json_schedule, content_type='application/json')
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_invite(request, course, ccx=None):
"""
Invite users to new ccx
"""
if not ccx:
raise Http404
action = request.POST.get('enrollment-button')
identifiers_raw = request.POST.get('student-ids')
identifiers = _split_input_list(identifiers_raw)
email_students = 'email-students' in request.POST
course_key = CCXLocator.from_course_locator(course.id, unicode(ccx.id))
email_params = get_email_params(course, auto_enroll=True, course_key=course_key, display_name=ccx.display_name)
ccx_students_enrolling_center(action, identifiers, email_students, course_key, email_params, ccx.coach)
url = reverse('ccx_coach_dashboard', kwargs={'course_id': course_key})
return redirect(url)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_student_management(request, course, ccx=None):
"""
Manage the enrollment of individual students in a CCX
"""
if not ccx:
raise Http404
action = request.POST.get('student-action', None)
student_id = request.POST.get('student-id', '')
email_students = 'email-students' in request.POST
identifiers = [student_id]
course_key = CCXLocator.from_course_locator(course.id, unicode(ccx.id))
email_params = get_email_params(course, auto_enroll=True, course_key=course_key, display_name=ccx.display_name)
errors = ccx_students_enrolling_center(action, identifiers, email_students, course_key, email_params, ccx.coach)
for error_message in errors:
messages.error(request, error_message)
url = reverse('ccx_coach_dashboard', kwargs={'course_id': course_key})
return redirect(url)
# Grades can potentially be written - if so, let grading manage the transaction.
@transaction.non_atomic_requests
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_gradebook(request, course, ccx=None):
"""
Show the gradebook for this CCX.
"""
if not ccx:
raise Http404
ccx_key = CCXLocator.from_course_locator(course.id, unicode(ccx.id))
with ccx_course(ccx_key) as course:
student_info, page = get_grade_book_page(request, course, course_key=ccx_key)
return render_to_response('courseware/gradebook.html', {
'page': page,
'page_url': reverse('ccx_gradebook', kwargs={'course_id': ccx_key}),
'students': student_info,
'course': course,
'course_id': course.id,
'staff_access': request.user.is_staff,
'ordered_grades': sorted(
course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True),
})
# Grades can potentially be written - if so, let grading manage the transaction.
@transaction.non_atomic_requests
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_grades_csv(request, course, ccx=None):
"""
Download grades as CSV.
"""
if not ccx:
raise Http404
ccx_key = CCXLocator.from_course_locator(course.id, unicode(ccx.id))
with ccx_course(ccx_key) as course:
enrolled_students = User.objects.filter(
courseenrollment__course_id=ccx_key,
courseenrollment__is_active=1
).order_by('username').select_related("profile")
grades = CourseGradeFactory().iter(enrolled_students, course)
header = None
rows = []
for student, course_grade, __ in grades:
if course_grade:
# We were able to successfully grade this student for this
# course.
if not header:
# Encode the header row in utf-8 encoding in case there are
# unicode characters
header = [section['label'].encode('utf-8')
for section in course_grade.summary[u'section_breakdown']]
rows.append(["id", "email", "username", "grade"] + header)
percents = {
section['label']: section.get('percent', 0.0)
for section in course_grade.summary[u'section_breakdown']
if 'label' in section
}
row_percents = [percents.get(label, 0.0) for label in header]
rows.append([student.id, student.email, student.username,
course_grade.percent] + row_percents)
buf = StringIO()
writer = csv.writer(buf)
for row in rows:
writer.writerow(row)
response = HttpResponse(buf.getvalue(), content_type='text/csv')
response['Content-Disposition'] = 'attachment'
return response
|
lduarte1991/edx-platform
|
lms/djangoapps/ccx/views.py
|
Python
|
agpl-3.0
| 22,386
|
[
"VisIt"
] |
fd07a7cf76af3c17b6a13e82b557f5f5b247588ad85f9d0bd74d689d22d56b28
|
# Copyright (C) 2012,2013,2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
**************************************
pathintegral - nuclear quantum effects
**************************************
- method to automatically run the system including nuclear quantum effects using the Feynman path-integral
!!WARNING: THIS IS STILL AN EXPERIMENTAL FEATURE!!
This method creates, based on the supplied topology of the system, an path-integral representation with P beads.
The path-integral system is a fully classical analog, which has to be run at an effective temperature P*T.
The method needs the following parameters:
* allParticles
particles of the sytem
* props
particle properties
* types
types, e.g. read from the gromacs parser
* system
* exclusions
non-bonded exclusions
* integrator
* langevin
langevin integrator
* rcut
the cutoff used for the rings non-bonded interactions
* P
the Trotter Number (number of imaginary time slices)
* polymerInitR
polymer radius for setting up ring in 2d plane
* hbar
hbar in gromacs units [kJ/mol ps]
* disableVVl
disable Virtual Verlet List (slow but safe). If false, the neighbour search is based on the VirtualParticles extension, which contain
the rings. This speeds up neighbour search significantly.
"""
import copy
import math
import espressopp
from espressopp import Real3D, Int3D
def createPathintegralSystem(allParticles,
props,
types,
system,
exclusions,
integrator,
langevin,
rcut,
P,
polymerInitR=0.01,
hbar=0.063507807,
disableVVL=False
):
# Turns the classical system into a Pathintegral system with P beads
numtypes=max(types)+1
num_cla_part=len(allParticles)
## make a dictionary for properties
##(TODO: better to use esp++ particle ?)
propDict={}
for p in props: propDict.update({p:len(propDict)})
piParticles=[]
ringids={} #dict with key: classical particle id, value vector of ids in the ring polymer
vptuples=[]
if not disableVVL:
vcl=espressopp.CellList()
ftpl = espressopp.FixedTupleList(system.storage)
#vvl=espressopp.VirtualVerletList(system, rcut, ftpl)
vvl=espressopp.VirtualVerletList(system, rcut, ftpl)
# create a cell list which will store the virtual particles after domain decomposition
vvl.setCellList(vcl)
## some data structures that will be usefull later
## ringids has all imaginary time beads belonging to a classical bead pid
## allParticlesById is used to acces particles properties by pid
allParticlesById={}
for p in allParticles:
pid=p[propDict['id']]
ringids.update({pid:[]})
allParticlesById.update({pid:p})
for i in xrange(1,P):
for p in allParticles:
pid=p[propDict['id']]
newparticle=copy.deepcopy(p)
# set types accoring to imag time index
newparticle[propDict['type']]=newparticle[propDict['type']]+numtypes*i
# set positions
newpos=newparticle[propDict['pos']]
newpos[0]=newpos[0]+polymerInitR*math.cos(i*2*math.pi/P)-polymerInitR
newpos[1]=newpos[1]+polymerInitR*math.sin(i*2*math.pi/P)
newid=len(allParticles)+len(piParticles)+1
newparticle[propDict['id']]=newid
piParticles.append(newparticle)
ringids[pid].append(newid)
if not disableVVL:
iVerletLists={}
for i in xrange(1,P+1):
iVerletLists.update({i:espressopp.VerletList(system, 0, rebuild=False)})
iVerletLists[i].disconnect()
## map types to sub-verlet lists using the VirtualVerletList classical
## classical types are in types
## type at imaginary time i=t+numtypes*i
for i in xrange(1,P+1):
tt=[]
for j in xrange(0, numtypes):
pitype=types[j]+numtypes*(i-1)
tt.append(pitype)
#print i, "mapped", tt, " to ", iVerletLists[i]
vvl.mapTypeToVerletList(tt, iVerletLists[1])
system.storage.addParticles(piParticles, *props)
#print "1 PYTHON IMG 1947", system.storage.getParticle(1947).pos, system.storage.getParticle(1947).imageBox
#print "RINGIDS", ringids
# store each ring in a FixedTupleList
if not disableVVL:
vParticles=[]
vptype=numtypes*(P+1)+1 # this is the type assigned to virtual particles
for k, v in ringids.iteritems():
cog=allParticlesById[k][propDict['pos']]
for pid in v:
cog=cog+allParticlesById[k][propDict['pos']]
cog=cog/(len(v)+1)
#create a virtual particle for each ring
vpprops = ['id', 'pos', 'v', 'type', 'mass', 'q']
vpid=len(allParticles)+len(piParticles)+len(vParticles)+1
part = [vpid ,cog,Real3D(0, 0, 0), vptype, 0, 0]
vParticles.append(part)
# first item in tuple is the virtual particle id:
t=[vpid]
t.append(k)
t=t+v
vptuples.append(t)
#print "VPARTICLE", part, "TUPLE", t
system.storage.addParticles(vParticles, *vpprops)
#always decpmpose before adding tuples
system.storage.decompose()
for t in vptuples:
ftpl.addTuple(t)
extVP = espressopp.integrator.ExtVirtualParticles(system, vcl)
extVP.addVirtualParticleTypes([vptype])
extVP.setFixedTupleList(ftpl)
integrator.addExtension(extVP)
# expand non-bonded potentials
numInteraction=system.getNumberOfInteractions()
for n in xrange(numInteraction):
interaction=system.getInteraction(n)
## TODO: in case of VVL: clone interaction, add potential!
print "expanding interaction", interaction
if interaction.bondType() == espressopp.interaction.Nonbonded:
for i in xrange(P):
for j in xrange(numtypes):
for k in xrange(numtypes):
pot=interaction.getPotential(j, k)
interaction.setPotential(numtypes*i+j, numtypes*i+k, pot)
print "Interaction", numtypes*i+j, numtypes*i+k, pot
if not disableVVL:
vl=interaction.getVerletList()
#print "VL has", vl.totalSize(),"disconnecting"
vl.disconnect()
interaction.setVerletList(iVerletLists[1])
if interaction.bondType() == espressopp.interaction.Pair:
bond_fpl=interaction.getFixedPairList()
cla_bonds=[]
# loop over bond lists returned by each cpu
for l in bond_fpl.getBonds():
cla_bonds.extend(l)
#print "CLA BONDS", bond_fpl.size()
for i in xrange(1, P):
tmp=0
for b in cla_bonds:
# create additional bonds for this imag time
bond_fpl.add(b[0]+num_cla_part*i, b[1]+num_cla_part*i)
tmp+=1
#print "trying to add", tmp, "bonds"
#print "i=", i, " PI BONDS", bond_fpl.size()
if interaction.bondType() == espressopp.interaction.Angular:
angle_ftl=interaction.getFixedTripleList()
# loop over triple lists returned by each cpu
cla_angles=[]
for l in angle_ftl.getTriples():
cla_angles.extend(l)
#print "CLA_ANGLES", cla_angles
for i in xrange(1, P):
for a in cla_angles:
# create additional angles for this imag time
angle_ftl.add(a[0]+num_cla_part*i,
a[1]+num_cla_part*i, a[2]+num_cla_part*i)
if interaction.bondType() == espressopp.interaction.Dihedral:
dihedral_fql=interaction.getFixedQuadrupleList()
cla_dihedrals=[]
for l in dihedral_fql.getQuadruples():
cla_dihedrals.extend(l)
for i in xrange(1, P):
for d in cla_dihedrals:
# create additional dihedrals for this imag time
dihedral_fql.add(d[0]+num_cla_part*i,
d[1]+num_cla_part*i, d[2]+num_cla_part*i, d[3]+num_cla_part*i)
piexcl=[]
for i in xrange(1, P):
for e in exclusions:
# create additional exclusions for this imag time
piexcl.append((e[0]+num_cla_part*i, e[1]+num_cla_part*i))
exclusions.extend(piexcl)
if not disableVVL:
vvl.exclude(exclusions)
# now we analyze how many unique different masses are in the system as we have to create an harmonic spring interaction for each of them
unique_masses=[]
for p in allParticles:
mass=p[propDict['mass']]
if not mass in unique_masses:
unique_masses.append(mass)
kineticTermInteractions={} # key: mass value: corresponding harmonic spring interaction
for m in unique_masses:
fpl=espressopp.FixedPairList(system.storage)
k=m*P*P*langevin.temperature*langevin.temperature/(hbar*hbar)
pot=espressopp.interaction.Harmonic(k,0.0)
interb = espressopp.interaction.FixedPairListHarmonic(system, fpl, pot)
system.addInteraction(interb)
kineticTermInteractions.update({m:interb})
for idcla, idpi in ringids.iteritems():
p=allParticlesById[idcla]
mass=p[propDict['mass']]
interactionList=kineticTermInteractions[mass].getFixedPairList() #find the appropriate interaction based on the mass
# harmonic spring between atom at imag-time i and imag-time i+1
for i in xrange(len(idpi)-1):
interactionList.add(idpi[i],idpi[i+1])
#close the ring
interactionList.add(idcla,idpi[0])
interactionList.add(idcla,idpi[len(idpi)-1])
# instead of scaling the potentials, we scale the temperature!
langevin.temperature = langevin.temperature*P
if not disableVVL:
return iVerletLists
|
kkreis/espressopp
|
src/tools/pathintegral.py
|
Python
|
gpl-3.0
| 9,666
|
[
"ESPResSo",
"Gromacs"
] |
92a5e98913b5ea4daba95c775780b97e22ee135f7df76d2e047f91d07904b19d
|
#!/home/brian/anaconda3/bin/python3
'''
I work on this in my free time, but I can't take all the credit.
If it weren't for Ethan and Jacob, I would have stopped working on this long ago.
So thank you guys for keeping me interested in this, and coming up with some of the more fun characters.
'''
import subprocess as sp #clear()
from time import sleep
import sys #detect if OS is Windows or Linux
from random import randint,random,choice #random chance things
from element_game_stats import * #everyone's stats
from os import listdir
opponents = []
for file in listdir('.'):
if '_opponent.gaf' in file:
opponents.append(file)
if opponents == []:
print('ERROR: no opponent files found')
exit()
if sys.platform == 'linux':
def clear():
tmp = sp.call('clear',shell=True)
else:
def clear():
tmp = sp.call('cls',shell=True)
def potterSpellCast(selectedPotterSpell,HP,EP,opponentDisarmed,opponentHP): #allows for different spells per battle
if selectedPotterSpell == 0:
if EP >= 10:
HP = int(1.5*HP)
EP = EP - 10
print('You cast Episkey!')
print('Your HP is now:',HP)
print()
else:
print('You don\'t have enough energy to cast Episkey!')
print()
elif selectedPotterSpell == 1:
opponentHP = int(opponentHP - .8*EP)
EP = 0
print('AVADA KEDAVRA!!!')
return dict(zip(['EP','opponentHP','HP','opponentDisarmed'],[EP,opponentHP,HP,statusEffects['opponentDisarmed']]))
#########################################################################
class _Getch:
"""Gets a single character from standard input. Does not echo to the
screen."""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self): return self.impl()
class _GetchUnix:
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
getch = _Getch()
#########################################################################
elements = [ #the main menu prints from this list, and it checks this list for the character you select
'air',
'water',
'earth',
'fire',
'clone trooper',
'narrator',
'jedi',
'walle',
'potter'
]
while True:
statusEffects={ #reset things between battles
'opponentPoisoned':False,
'opponentSinkHole':0,
'opponentAsleep':False,
'forceBlock':False,
'opponentDisarmed':0
}
earthQuakeValue = 0
plasmaCooldown = 3
clonesOnSite = 0
electroSnakeLife = 0
clear()
print('choose an element') #display characters
for x in elements:
print(x.title())
print('Settings')
print()
playerElement = str(input()).lower() #not case sensitive
clear()
if playerElement == elements[0]: #info screen
HP = airStats['hpPoints']
EP = airStats['epPoints']
clear()
print('You have chosen Air!')
print()
print('HP:',HP)
print('EP:',EP)
print('EP regen chance:',airStats['epRegenChance'])
print('EP regen:',airStats['epRegenCount'])
print()
for ability in airStats['abilities']:
print(ability)
print()
input()
sequence = 'init'
opponent = choice(opponents)
opponentAction = open('./'+opponent,'r').read()
exec(opponentAction)
clear()
print('You are fighting '+opponentName+'!')
sleep(1)
while True: #fighting sequence
sequence = 'battle'
if randint(1,100) <= airStats['epRegenChance']: #whether or not you'll regain EP
EP = EP + airStats['epRegenCount'] #how much EP you'll regain
if opponentHP < 1:
print('You win!')
input()
sequence = 'lost'
break
if HP < 1:
print('You lose!')
input()
sequence = 'won'
break
clear()
print(opponentName) #enemy status
print('Enemy HP:',opponentHP)
print('Enemy EP:',opponentEP)
print()
print('HP:',HP) #your status
print('EP:',EP)
print()
for ability in range(len(airStats['abilities'])): #print moveset
y = str(ability+1)+')'
print(y,airStats['abilities'][ability])
print()
attack = getch()
if str(attack) == '1':
if EP >= 15: #check if you have enough EP
opponentHP = opponentHP - 10 #damage
EP = EP - 15 #EP cost
print('You used Category 1 wind!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('You don\'t have enough energy to use Category 1 wind!')
print()
elif str(attack) == '2':
if EP >= 30:
opponentHP = opponentHP - 25
EP = EP - 30
print('You used Tornado!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('You don\'t have enough energy to use Tornado!')
print()
elif str(attack) == '3':
if EP >= 10:
opponentHP = opponentHP - 5
EP = EP - 10
print('You used Updraft!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('You don\'t have enough energy to use Updraft!')
print()
elif str(attack) == '4':
if EP >= 25:
opponentEP = opponentEP - randint(0,20)
EP = EP - 25
HP = HP + randint(10,30) #health regen
print('You used Vortex Blast!')
print(opponentName+'\'s EP is down to',opponentEP)
print()
else:
print('You don\'t have enough energy to use Vortex Blast!')
print()
else:
print(attack,'is not an attack')
exec(opponentAction)
HP = HP - damage #take damage
elif playerElement == elements[1]:
HP = waterStats['hpPoints']
EP = waterStats['epPoints']
clear()
print('You have chosen Water!')
print()
print('HP:',HP)
print('EP:',EP)
print('EP regen chance:',waterStats['epRegenChance'])
print('EP regen:',waterStats['epRegenCount'])
print()
print('Abilities')
for ability in waterStats['abilities']:
print(ability)
print()
input()
sequence = 'init'
opponent = choice(opponents)
opponentAction = open('./'+opponent,'r').read()
exec(opponentAction)
clear()
print('You are fighting '+opponentName+'!')
sleep(1)
while True:
sequence = 'battle'
if randint(1,100) <= waterStats['epRegenChance']:
EP = EP + waterStats['epRegenCount']
if opponentHP < 1:
print('You win!')
input()
break
if HP < 1:
print('You lose!')
input()
break
clear()
print(opponentName)
print('Enemy HP:',opponentHP)
print('Enemy EP:',opponentEP)
print()
print('HP:',HP)
print('EP:',EP)
print()
for ability in range(len(waterStats['abilities'])):
y = str(ability+1)+')'
print(y,waterStats['abilities'][ability])
print()
attack = getch()
if str(attack) == '1':
if EP >= 30:
opponentHP = opponentHP - randint(20,30)
EP = EP - 30
print('You used Water Spout!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('You don\'t have enough energy to use Water Spout!')
print()
elif str(attack) == '2':
if EP >= 15:
opponentHP = opponentHP - 15
EP = EP - 15
print('You used Quick Sand!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('You don\'t have enough energy to use Quick Sand!')
print()
elif str(attack) == '3':
if EP >= 50:
statusEffects['opponentPoisoned'] = True #inflict a status effect <sarcasm>definitely not overpowered</sarcasm>
EP = EP - 50
print('You used Poison!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('You don\'t have enough energy to use Poison!')
print()
elif str(attack) == '4':
WinterBlastCost = int(input('Cost: ')) #FANCY
if EP >= WinterBlastCost:
WinterBlastDamage = WinterBlastCost // 2 + 5
opponentHP = opponentHP - WinterBlastDamage
EP = EP - WinterBlastCost
print('You used Winter Blast!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print(attack,'is not an attack')
exec(opponentAction)
HP = HP - damage
elif playerElement == elements[2]:
HP = earthStats['hpPoints']
EP = earthStats['epPoints']
clear()
print('You have chosen Earth')
print('HP:',HP)
print('EP:',EP)
print('EP regen chance:',earthStats['epRegenChance'])
print('EP regen:',earthStats['epPoints'])
print()
print('Abilities')
for ability in earthStats['abilities']:
print(ability)
print()
input()
sequence = 'init'
opponent = choice(opponents)
opponentAction = open('./'+opponent,'r').read()
exec(opponentAction)
clear()
print('You are fighting '+opponentName+'!')
sleep(1)
while True:
sequence = 'battle'
if randint(1,100) <= earthStats['epRegenChance']:
EP = EP + earthStats['epRegenCount']
if opponentHP < 1:
print('You win!')
input()
break
if HP < 1:
print('You lose!')
input()
break
clear()
print(opponentName)
print('Enemy HP:',opponentHP)
print('Enemy EP:',opponentEP)
print()
print('HP:',HP)
print('EP:',EP)
print()
for ability in range(len(earthStats['abilities'])):
y = str(ability+1)+')'
print(y,earthStats['abilities'][ability])
print()
attack = getch()
if str(attack) == '1':
earthQuakeValue = randint(0,50)
if EP >= 30:
opponentHP = opponentHP - earthQuakeValue
EP = EP - earthQuakeValue
print('You used Earth Quake Level VII')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('You don\'t have enough energy to use Earth Quake Level VII')
print()
elif str(attack) == '2':
if EP >= 15:
statusEffects['opponentSinkHole'] = 4 #status effect with a duration
EP = EP - 15
print('You used Sink Hole')
print(opponentName+'\'s EP is down to',opponentEP)
print()
else:
print('You don\'t have enough energy to use Sink Hole!')
print()
elif str(attack) == '3':
if EP >= 30:
opponentHP = opponentHP - 20
EP = EP - 30
print('You used Metal Shot!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('You don\'t have enough energy to use Metal Shot!')
print()
elif str(attack) == '4':
if EP >= 15:
opponentHP = opponentHP - 10
EP = EP - 15
print('You used Fissure!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('You don\' have enough energy to use Fissure')
print()
else:
print(attack,'is not an attack')
exec(opponentAction)
HP = HP - damage
elif playerElement == elements[3]:
HP = fireStats['hpPoints']
EP = fireStats['epPoints']
clear()
print('You have chosen Fire!')
print('HP:',HP)
print('EP:',EP)
print('EP regen chance:',fireStats['epRegenChance'])
print('EP regen:',fireStats['epRegenCount'])
print()
print('Abilities')
for ability in fireStats['abilities']:
print(ability)
print()
input()
sequence = 'init'
opponent = choice(opponents)
opponentAction = open('./'+opponent,'r').read()
exec(opponentAction)
clear()
print('You are fighting '+opponentName+'!')
sleep(1)
while True:
sequence = 'battle'
if randint(1,100) <= fireStats['epRegenChance']:
EP = EP + fireStats['epRegenCount']
if opponentHP < 1:
print('You win!')
input()
break
if HP < 1:
print('You lose!')
input()
break
clear()
print(opponentName)
print('Enemy HP:',opponentHP)
print('Enemy EP:',opponentEP)
print()
print('HP:',HP)
print('EP:',EP)
print()
for ability in range(len(fireStats['abilities'])):
y = str(ability+1)+')'
print(y,fireStats['abilities'][ability])
print()
attack = getch()
if str(attack) == '1':
logs = int(input('How many logs? '))
refuelCost = logs * randint(5,10)
refuelGain = logs * randint(10,20)
if EP >= refuelCost:
print('You used Refuel!')
HP = HP + refuelGain
print('Your HP is:',HP)
print()
else:
print('You don\'t have enough energy to use Refuel!')
print()
elif str(attack) == '2':
if EP >= 30:
opponentHP = opponentHP - randint(10,30)
EP = EP - randint(20,30)
print('You used Fire Devil!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('You don\'t have enough energy to use Fire Devil!')
print()
elif str(attack) == '3':
if EP >= 10:
opponentHP = opponentHP - randint(30,50)
EP = EP - 50
print('You used Lightning Strike!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('You don\'t have enough energy to use Lightning Strike!')
print()
elif str(attack) == '4':
if EP >= 25:
opponentEP = opponentEP - randint(20,45)
EP = EP - randint(25,45)
print('You used Lava Flow!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('You don\'t have enough energy to use Lava Flow!')
print()
else:
print(attack,'is not an attack')
if opponent == 1:
results = test(opponentHP,opponentEP,statusEffects)
elif opponent == 2:
results = skullken(opponentHP,opponentEP,statusEffects)
exec(opponentAction)
HP = HP - damage
elif playerElement == elements[4]:
HP = cloneStats['hpPoints']
EP = cloneStats['epPoints']
clear()
print('You have chosen Clone Trooper!') #Ethan would approve
print()
print('HP:',HP)
print('EP:',EP)
print('EP regen chance:',cloneStats['epRegenChance'])
print('EP regen:',cloneStats['epRegenCount'])
print()
print('Abilities:')
for ability in cloneStats['abilities']:
print(ability)
print()
input()
opponent = randint(1,2)
if opponent == 1:
opponentHP = testStats['hpPoints']
opponentEP = testStats['epPoints']
opponentName = 'Test'
if opponent == 2:
opponentHP = skullkenStats['hpPoints']
opponentEP = skullkenStats['epPoints']
opponentName = 'Skullken'
clear()
print('You are fighting '+opponentName+'!')
sleep(1)
while True:
sequence = 'battle'
if randint(1,100) <= cloneStats['epRegenChance']:
EP = EP + cloneStats['epRegenCount']
if opponentHP < 1:
print('You win!')
input()
break
if HP < 1:
print('You lose!')
input()
break
if plasmaCooldown != 0:
plasmaCooldown = plasmaCooldown - 1
clear()
print(opponentName)
print('Enemy HP:',opponentHP)
print('Enemy EP:',opponentEP)
print()
print('HP:',HP)
print('EP:',EP)
print('Clones under your command:',clonesOnSite)
print()
for ability in range(len(cloneStats['abilities'])):
y = str(ability+1)+')'
print(y,cloneStats['abilities'][ability])
attack = getch()
if attack == '1': #unique attack that creates decoys
if EP >= 5:
EP = EP - 5
clonesOnSite = clonesOnSite + 2
print('You called in 2 clones!')
print()
else:
print('There are no clones on call!')
print()
elif attack == '2':
if EP >= 5:
opponentHP = opponentHP - 5
EP = EP - 5
print('You shot '+opponentName+'!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('You don\'t have enough ammo to shoot '+opponentName+'!')
print()
elif attack == '3':
if EP >= 20:
opponentHP = opponentHP - randint(40,60)
EP = EP - 20
print('You Dropped a nuke on '+opponentName+'!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('You don\'t have any Nukes')
print()
elif attack == '4':
if EP >= 10:
opponentHP = opponentHP - randint(10,25)
EP = EP - 10
print('You used electro whip!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('Your electro whip is out of power!')
print()
elif attack == '5': #unique attack that spawns a "sidekick" of sorts
if EP >= 15:
EP = EP - 15
electroSnakeLife = randint(4,6)
print('You unleashed your electro snake!')
print()
else:
print('Your electro snake is recharging!')
print()
elif attack == '6': #I just, just... Not much to say about this... Ethan likes it
if plasmaCooldown == 0:
if EP >= 70:
EP = EP - 70
opponentHP = opponentHP - randint(70,100) # O_O
print('You shot '+opponentName+' with a plasma ball!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('Your plasma blaster is being reloaded!')
print()
else:
print('The plasma blaster is still being built') #this is the only thing that adds some balance (and not very much at that)
print()
else:
print(attack,'is not an attack')
if clonesOnSite == 0:
if opponent == 1:
results = test(opponentHP,opponentEP,statusEffects)
elif opponent == 2:
results = skullken(opponentHP,opponentEP,statusEffects)
exec(opponentAction)
HP = HP - damage
else: #decoy
clonesOnSite = clonesOnSite - 1
print(opponentName+' shot one of your clone troopers!')
input()
if electroSnakeLife > 0: #Electro Snake attack
electroSnakeLife = electroSnakeLife - 1
opponentHP = opponentHP - randint(20,40)
print('Electro Snake shocked '+opponentName+'!')
print(opponentName+'\'s HP is down to',opponentHP)
input()
elif playerElement == elements[5]:#<humor>
HP = narratorStats['hpPoints']
EP = narratorStats['epPoints']
print('The Narrator prepares to speak in the third person!')
sleep(1)
clear()
print('The Narrator has been selected!')
print()
print('HP:',HP)
print('EP:',EP)
print('EP regen chance:',narratorStats['epRegenChance'])
print('EP regen:',narratorStats['epRegenCount'])
print()
print('Abilities')
for ability in narratorStats['abilities']:
print(ability)
print()
input()
sequence = 'init'
opponent = choice(opponents)
opponentAction = open('./'+opponent,'r').read()
exec(opponentAction)
clear()
print('You are fighting '+opponentName+'!')
sleep(1)
while True:
sequence = 'battle'
if randint(1,100) <= narratorStats['epRegenChance']:
EP = EP + narratorStats['epRegenCount']
if opponentHP < 1:
print('The Narrator won!')
input()
print('The Narrator stops speaking in the third person')
sleep(1)
break
if HP < 1:
print('The Narrator lost!')
input()
break
clear()
print(opponentName)
print('Enemy HP:',opponentHP)
print('Enemy EP:',opponentEP)
print()
print('HP:',HP)
print('EP:',EP)
print()
for ability in range(len(narratorStats['abilities'])):
y = str(ability+1)+')'
print(y,narratorStats['abilities'][ability])
print()
attack = getch()
if str(attack) == '1':
if EP >= 15:
opponentHP = opponentHP - 15
EP = EP - 15
print('The Narrator told a boring story!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('The Narrator doesn\'t have enough energy to tell a boring story!')
print()
elif str(attack) == '2':
if EP >= 30:
statusEffects['opponentAsleep'] = True #status effect
EP = EP - 30
print('The Narrator gave a mathematics lecture!')
print(opponentName+' fell asleep')
print()
else:
print('The Narrator doesn\'t have enough energy to give a mathematics lecture!')
print()
elif str(attack) == '3':
if EP >= 10:
opponentEP = opponentEP - 5
EP = EP - 10
print('The Narrator told a sermon!')
print(opponentName+' feels guilty...')
print()
else:
print('The Narrator doesn\'t have enough energy to tell a sermon!')
print()
elif str(attack) == '4':
if EP >= 25:
EP = EP - 25
HP = HP + randint(10,15)
print('The Narrator told a relaxing tale!')
print('The Narrator\'s HP is now',HP)
print()
else:
print('The Narrator doesn\'t have enough energy to tell a relaxing tale!')
print()
else:
print(attack,'is not an attack')
exec(opponentAction)
HP = HP - damage
input() #</humor>
elif playerElement == elements[6]: #Ethan and Jacob approve
HP = jediStats['hpPoints']
EP = jediStats['epPoints']
clear()
print('You have chosen Jedi!')
print()
print('HP:',HP)
print('EP:',EP)
print('EP regen chance:',jediStats['epRegenChance'])
print('EP regen:',jediStats['epRegenCount'])
print()
for ability in jediStats['abilities']:
print(ability)
print()
input()
sequence = 'init'
opponent = choice(opponents)
opponentAction = open('./'+opponent,'r').read()
exec(opponentAction)
clear()
print('You are fighting '+opponentName+'!')
sleep(1)
while True:
sequence = 'battle'
if randint(1,100) <= jediStats['epRegenChance']:
EP = EP + jediStats['epRegenCount']
if opponentHP < 1:
print('You win!')
input()
break
if HP < 1:
print('You lose!')
input()
break
clear()
print(opponentName)
print('Enemy HP:',opponentHP)
print('Enemy EP:',opponentEP)
print()
print('HP:',HP)
print('EP:',EP)
print()
for ability in range(len(jediStats['abilities'])):
y = str(ability+1)+')'
print(y,jediStats['abilities'][ability])
print()
attack = getch()
if str(attack) == '1':
if EP >= 10:
opponentHP = opponentHP - 10
EP = EP - 10
print('You used force push!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('You don\'t have enough energy to use force push!')
print()
elif str(attack) == '2':
if EP >= 5:
opponentHP = opponentHP - 10
EP = EP - 5
print('You used force ball!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('You don\'t have enough energy to use force ball!')
print()
elif str(attack) == '3':
if EP >= 10:
opponentHP = opponentHP - 20
EP = EP - 10
print('You used your lightsaber!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('You don\'t have enough energy to use your lightsaber!')
print()
elif str(attack) == '4':
if EP >= 10:
EP = EP - 10
statusEffects['forceBlock'] = True #self inflicted status effect
print('You used force block!')
print()
else:
print('You don\'t have enough energy to use force block!')
print()
else:
print(attack,'is not an attack')
exec(opponentAction)
HP = HP - damage
if statusEffects['forceBlock'] == True: #minimize damage
damage = round(damage*.3,0)
statusEffects['forceBlock'] = False
if randint(1,100) <= 40: #slightly overpowered...
print('Your Padawan attacked '+opponentName+'!')
opponentHP = opponentHP - 15
input()
elif playerElement == elements[7]: #Jacob approves
HP = walleStats['hpPoints']
EP = walleStats['epPoints']
clear()
print('You have chosen WALLⒺ!')
print()
print('HP:',HP)
print('EP:',EP)
print('EP regen chance:',walleStats['epRegenChance'])
print('EP regen:',walleStats['epRegenCount'])
print()
for ability in walleStats['abilities']:
print(ability)
print()
input()
sequence = 'init'
opponent = choice(opponents)
opponentAction = open('./'+opponent,'r').read()
exec(opponentAction)
clear()
print('You are fighting '+opponentName+'!')
sleep(1)
while True:
sequence = 'battle'
if randint(1,100) <= walleStats['epRegenChance']:
EP = EP + walleStats['epRegenCount']
if opponentHP < 1:
print('You win!')
input()
break
if HP < 1:
print('You lose!')
input()
break
clear()
print(opponentName)
print('Enemy HP:',opponentHP)
print('Enemy EP:',opponentEP)
print()
print('HP:',HP)
print('EP:',EP)
print()
for ability in range(len(walleStats['abilities'])):
y = str(ability+1)+')'
print(y,walleStats['abilities'][ability])
print()
attack = getch()
if str(attack) == '1':
if EP >= 5:
opponentHP = opponentHP - 10
EP = EP - 5
print('You used your laser!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('You don\'t have enough energy to use your laser!')
print()
elif str(attack) == '2':
if EP >= 20:
opponentHP = opponentHP - 30
EP = EP - 20
print('You used your Trash Compactor!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('You don\'t have enough energy to use your Trash Compactor!')
print()
elif str(attack) == '3':
if EP >= 30:
opponentHP = opponentHP - 5 # don't
EP = EP - 30 # ask
print('You used EVE\'s Blaster!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('You don\'t have enough energy to use EVE\'s Blaster!')
print()
elif str(attack) == '4':
if EP >= 2:
opponentHP = opponentHP - 10
EP = EP - 2
print('You used Cute Eyes!')
print(opponentName+'\'s EP is down to',opponentEP)
print()
else:
print('You don\'t have enough energy to look cute!')
print()
else:
print(attack,'is not an attack')
exec(opponentAction)
HP = HP - damage
elif playerElement == elements[8]:
HP = potterStats['hpPoints']
EP = potterStats['epPoints']
print('You have chosen The Boy Who Lived!')
print()
print('HP:',HP)
print('EP:',EP)
print('EP regen chance:'+str(potterStats['epRegenChance'])+'%')
print('EP regen:',potterStats['epRegenCount'])
print()
print('Abilities:')
for ability in potterStats['abilities']:
print(ability)
print()
input()
sequence = 'init'
opponent = choice(opponents)
opponentAction = open('./'+opponent,'r').read()
exec(opponentAction)
clear()
print('You are fighting '+opponentName+'!')
sleep(1)
while True:
sequence = 'battle'
if randint(0,100) <= potterStats['epRegenChance']:
EP = EP + potterStats['epRegenCount']
if opponentHP < 1:
print('You win!')
input()
break
if HP < 1:
print('You lose!')
input()
break
clear()
print(opponentName)
print('Enemy HP:',opponentHP)
print('Enemy EP:',opponentEP)
print()
print('HP:',HP)
print('EP:',EP)
print()
for ability in range(len(potterStats['abilities'])):
y = str(ability+1)+')'
print(y,potterStats['abilities'][ability])
print()
attack = input()
if str(attack) == '1': #selectable spell. batteries not included
spellResult = potterSpellCast(selectedPotterSpell,HP,EP,statusEffects['opponentDisarmed'],opponentHP)
EP,opponentHP,HP,statusEffects['opponentDisarmed'] = spellResult['EP'],spellResult['opponentHP'],spellResult['HP'],spellResult['opponentDisarmed']
elif str(attack) == '2':
if EP >= 10:
EP = EP - 10
print('You cast Expelliarmus!')
if randint(0,100) <= 50:
statusEffects['opponentDisarmed'] = statusEffects['opponentDisarmed'] + 2 #status effect
opponentAttack = 0
print(opponentName,'was disarmed!')
else:
print(opponentName,'countered!')
print()
else:
print('You don\'t have enough energy to cast Expelliarmus!')
print()
elif str(attack) == '3':
if EP >= 30:
opponentHP = int(random()*opponentHP) #this can hurt a bit...
EP = EP - 30
print('You cast Sectumsempra!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('You don\'t have enough energy to cast Sectumsempra!')
print()
elif str(attack) == '4':
if EP >= 15:
EP = EP - 15
opponentHP = opponentHP - randint(15,20)
print('You cast Expulso!')
print(opponentName+'\'s HP is down to',opponentHP)
print()
else:
print('You don\'t have enough energy to cast Expulso!')
print()
else:
print(attack,'is not an attack')
exec(opponentAction)
HP = HP - damage
if randint(0,100) <= 10: #<sarcasm> This is definitely not overpowered. No not at all... </sarcasm>
HP = HP + 2*damage
EP = 10*EP
clear()
print('Dumbledore!')
input()
elif playerElement == 'settings':
clear()
print('Choose who to edit')
for x in range(0,len(elements)):
print(elements[x].title())
print()
playerElement = input()
clear()
if playerElement == elements[0]:
print('HP',airStats['hpPoints'],'default: 80')
print()
airStats['hpPoints'] = int(input())
clear()
print('EP',airStats['epPoints'],'default: 120')
print()
airStats['epPoints'] = int(input())
clear()
print('EP regen',airStats['epRegenCount'],'default: 10')
print()
airStats['epRegenCount'] = int(input())
clear()
elif playerElement == elements[1]:
print('HP',waterStats['hpPoints'],'default: 120')
print()
waterStats['hpPoints'] = int(input())
clear()
print('EP',waterStats['epPoints'],'default: 100')
print()
waterStats['epPoints'] = int(input())
clear()
print('EP regen',waterStats['epRegenCount'],'default: 15')
print()
waterStats['epRegenCount'] = int(input())
clear()
elif playerElement == elements[2]:
print('HP',earthStats['hpPoints'],'default: 150')
print()
earthStats['hpPoints'] = int(input())
clear()
print('EP',earthStats['epPoints'],'default: 100')
print()
earthStats['epPoints'] = int(input())
clear()
print('EP regen',earthStats['epRegenCount'],'default: 30')
print()
earthStats['epRegenCount'] = int(input())
clear()
elif playerElement == elements[3]:
print('HP',fireStats['hpPoints'],'default: 100')
print()
fireStats['hpPoints'] = int(input())
clear()
print('EP',fireStats['epPoints'],'default: 100')
print()
fireStats['epPoints'] = int(input())
clear()
print('EP regen',fireStats['epRegenCount'],'default: 50')
print()
fireStats['epRegenCount'] = int(input())
clear()
elif playerElement == elements[4]:
print('HP',cloneStats['hpPoints'],'default: 110')
print()
cloneStats['hpPoints'] = int(input())
clear()
print('EP',cloneStats['epPoints'],'default: 110')
print()
cloneStats['epPoints'] = int(input())
clear()
print('EP regen',cloneStats['epRegenCount'],'default: 10')
print()
cloneStats['epRegenCount'] = int(input())
clear()
elif playerElement == 'exit':
break
else:
clear()
print(playerElement,'is not a choice')
input()
|
Thurii/legendary-telegram
|
Merged Element Game.py
|
Python
|
cc0-1.0
| 30,639
|
[
"BLAST",
"Brian"
] |
fa6d6d5d8dc3f5bffa79435374685756b6c402c522278903445cb12d6faddfb8
|
import warnings
import numpy as np
import scipy
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter1d
from scipy.interpolate import UnivariateSpline
from astropy import log
from astropy.table import Table
from stingray.lightcurve import Lightcurve
from ..crossspectrum import AveragedCrossspectrum, show_progress, get_flux_generator
from ..powerspectrum import AveragedPowerspectrum
from ..fourier import normalize_periodograms, fft, fftfreq, positive_fft_bins
from ..gti import cross_two_gtis, bin_intervals_from_gtis
__all__ = ["calculate_FAD_correction", "get_periodograms_from_FAD_results", "FAD"]
def FAD(
data1,
data2,
segment_size,
dt=None,
norm="frac",
plot=False,
ax=None,
smoothing_alg='gauss',
smoothing_length=None,
verbose=False,
tolerance=0.05,
strict=False,
output_file=None,
return_objects=False
):
"""Calculate Frequency Amplitude Difference-corrected (cross)power spectra.
Reference: Bachetti \& Huppenkothen, 2018, ApJ, 853L, 21
The two input light curve must be strictly simultaneous, and recorded by
two independent detectors with similar responses, so that the count rates
are similar and dead time is independent.
The method does not apply to different energy channels of the same
instrument, or to the signal observed by two instruments with very
different responses. See the paper for caveats.
Parameters
----------
data1 : `Lightcurve` or `EventList`
Input data for channel 1
data2 : `Lightcurve` or `EventList`
Input data for channel 2. Must be strictly simultaneous to ``data1``
and, if a light curve, have the same binning time. Also, it must be
strictly independent, e.g. from a different detector. There must be
no dead time cross-talk between the two time series.
segment_size: float
The final Fourier products are averaged over many segments of the
input light curves. This is the length of each segment being averaged.
Note that the light curve must be long enough to have at least 30
segments, as the result gets better as one averages more and more
segments.
dt : float
Time resolution of the light curves used to produce periodograms
norm: {``frac``, ``abs``, ``leahy``, ``none``}, default ``none``
The normalization of the (real part of the) cross spectrum.
Other parameters
----------------
plot : bool, default False
Plot diagnostics: check if the smoothed Fourier difference scatter is
a good approximation of the data scatter.
ax : :class:`matplotlib.axes.axes` object
If not None and ``plot`` is True, use this axis object to produce
the diagnostic plot. Otherwise, create a new figure.
smoothing_alg : {'gauss', ...}
Smoothing algorithm. For now, the only smoothing algorithm allowed is
``gauss``, which applies a Gaussian Filter from `scipy`.
smoothing_length : int, default ``segment_size * 3``
Number of bins to smooth in gaussian window smoothing
verbose: bool, default False
Print out information on the outcome of the algorithm (recommended)
tolerance : float, default 0.05
Accepted relative error on the FAD-corrected Fourier amplitude, to be
used as success diagnostics.
Should be
```
stdtheor = 2 / np.sqrt(n)
std = (average_corrected_fourier_diff / n).std()
np.abs((std - stdtheor) / stdtheor) < tolerance
```
strict : bool, default False
Decide what to do if the condition on tolerance is not met. If True,
raise a ``RuntimeError``. If False, just throw a warning.
output_file : str, default None
Name of an output file (any extension automatically recognized by
Astropy is fine)
Returns
-------
results : class:`astropy.table.Table` object or ``dict`` or ``str``
The content of ``results`` depends on whether ``return_objects`` is
True or False.
If ``return_objects==False``,
``results`` is a `Table` with the following columns:
+ pds1: the corrected PDS of ``lc1``
+ pds2: the corrected PDS of ``lc2``
+ cs: the corrected cospectrum
+ ptot: the corrected PDS of lc1 + lc2
If ``return_objects`` is True, ``results`` is a ``dict``, with keys
named like the columns
listed above but with `AveragePowerspectrum` or
`AverageCrossspectrum` objects instead of arrays.
"""
gti = cross_two_gtis(data1.gti, data2.gti)
data1.gti = data2.gti = gti
if isinstance(data1, Lightcurve):
dt = data1.dt
flux_iterable1 = get_flux_generator(data1, segment_size, dt=dt)
flux_iterable2 = get_flux_generator(data2, segment_size, dt=dt)
# Initialize stuff
freq = None
# These will be the final averaged periodograms. Initializing with a single
# scalar 0, but the final products will be arrays.
pds1 = 0
pds2 = 0
ptot = 0
cs = 0
M = 0
nph1_tot = nph2_tot = nph_tot = 0
average_diff = average_diff_uncorr = 0
if plot:
if ax is None:
fig, ax = plt.subplots()
for flux1, flux2 in show_progress(zip(flux_iterable1, flux_iterable2)):
if flux1 is None or flux2 is None:
continue
N = flux1.size
segment_size = N * dt
if smoothing_length is None:
smoothing_length = segment_size * 3
if freq is None:
fgt0 = positive_fft_bins(N)
freq = fftfreq(N, dt)[fgt0]
# Calculate the sum of each light curve, to calculate the mean
# This will
nph1 = flux1.sum()
nph2 = flux2.sum()
nphtot = nph1 + nph2
# Calculate the FFTs
f1 = fft(flux1)[fgt0]
f2 = fft(flux2)[fgt0]
ftot = fft(flux1 + flux2)[fgt0]
f1_leahy = f1 * np.sqrt(2 / nph1)
f2_leahy = f2 * np.sqrt(2 / nph2)
ftot_leahy = ftot * np.sqrt(2 / nphtot)
fourier_diff = f1_leahy - f2_leahy
if plot:
ax.scatter(freq, fourier_diff.real, s=1)
if smoothing_alg == 'gauss':
smooth_real = gaussian_filter1d(fourier_diff.real ** 2,
smoothing_length)
else:
raise ValueError("Unknown smoothing algorithm: {}".format(
smoothing_alg))
p1 = (f1 * f1.conj()).real
p1 = p1 / smooth_real * 2
p2 = (f2 * f2.conj()).real
p2 = p2 / smooth_real * 2
pt = (ftot * ftot.conj()).real
pt = pt / smooth_real * 2
c = (f2 * f1.conj()).real
c = c / smooth_real * 2
nphgeom = np.sqrt(nph1 * nph2)
power1 = normalize_periodograms(p1, dt, N, nph1 / N, n_ph=nph1, norm=norm)
power2 = normalize_periodograms(p2, dt, N, nph2 / N, n_ph=nph2, norm=norm)
power_tot = normalize_periodograms(pt, dt, N, nphtot / N, n_ph=nphtot, norm=norm)
cs_power = normalize_periodograms(c, dt, N, nphgeom / N, n_ph=nphgeom, norm=norm)
if M == 0 and plot:
ax.plot(freq, smooth_real, zorder=10, lw=3)
ax.plot(freq, f1_leahy.real, zorder=5, lw=1)
ax.plot(freq, f2_leahy.real, zorder=5, lw=1)
ptot += power_tot
pds1 += power1
pds2 += power2
cs += cs_power
average_diff += fourier_diff / smooth_real ** 0.5 * np.sqrt(2)
average_diff_uncorr += fourier_diff
nph1_tot += nph1
nph2_tot += nph2
nph_tot += nphtot
M += 1
std = (average_diff / M).std()
stdtheor = 2 / np.sqrt(M)
stduncorr = (average_diff_uncorr / M).std()
is_compliant = np.abs((std - stdtheor) / stdtheor) < tolerance
verbose_string = \
'''
-------- FAD correction ----------
I smoothed over {smoothing_length} power spectral bins
{M} intervals averaged.
The uncorrected standard deviation of the Fourier
differences is {stduncorr} (dead-time affected!)
The final standard deviation of the FAD-corrected
Fourier differences is {std}. For the results to be
acceptable, this should be close to {stdtheor}
to within {tolerance} %.
In this case, the results ARE {compl}complying.
{additional}
----------------------------------
'''.format(smoothing_length=smoothing_length,
M=M,
stduncorr=stduncorr,
std=std,
stdtheor=stdtheor,
tolerance=tolerance * 100,
compl='NOT ' if not is_compliant else '',
additional='Maybe something is not right.' if not is_compliant else '')
if verbose and is_compliant:
log.info(verbose_string)
elif not is_compliant:
warnings.warn(verbose_string)
if strict and not is_compliant:
raise RuntimeError('Results are not compliant, and `strict` mode '
'selected. Exiting.')
results = Table()
results['freq'] = freq
results['pds1'] = pds1 / M
results['pds2'] = pds2 / M
results['cs'] = cs / M
results['ptot'] = ptot / M
results['fad'] = average_diff / M
results.meta['fad_delta'] = (std - stdtheor) / stdtheor
results.meta['is_compliant'] = is_compliant
results.meta['M'] = M
results.meta['dt'] = dt
results.meta['nph1'] = nph1_tot / M
results.meta['nph2'] = nph2_tot / M
results.meta['nph'] = nph_tot / M
results.meta['norm'] = norm
results.meta['smoothing_length'] = smoothing_length
results.meta['df'] = np.mean(np.diff(freq))
if output_file is not None:
results.write(output_file, overwrite=True)
if return_objects:
result_table = results
results = {}
results['pds1'] = \
get_periodograms_from_FAD_results(result_table, kind='pds1')
results['pds2'] = \
get_periodograms_from_FAD_results(result_table, kind='pds2')
results['cs'] = \
get_periodograms_from_FAD_results(result_table, kind='cs')
results['ptot'] = \
get_periodograms_from_FAD_results(result_table, kind='ptot')
results['fad'] = result_table['fad']
return results
def calculate_FAD_correction(lc1, lc2, segment_size, norm="frac", gti=None,
plot=False, ax=None, smoothing_alg='gauss',
smoothing_length=None, verbose=False,
tolerance=0.05, strict=False,
output_file=None, return_objects=False):
"""Calculate Frequency Amplitude Difference-corrected (cross)power spectra.
Reference: Bachetti \& Huppenkothen, 2018, ApJ, 853L, 21
The two input light curve must be strictly simultaneous, and recorded by
two independent detectors with similar responses, so that the count rates
are similar and dead time is independent.
The method does not apply to different energy channels of the same
instrument, or to the signal observed by two instruments with very
different responses. See the paper for caveats.
Parameters
----------
lc1: class:`stingray.ligthtcurve.Lightcurve`
Light curve from channel 1
lc2: class:`stingray.ligthtcurve.Lightcurve`
Light curve from channel 2. Must be strictly simultaneous to ``lc1``
and have the same binning time. Also, it must be strictly independent,
e.g. from a different detector. There must be no dead time cross-talk
between the two light curves.
segment_size: float
The final Fourier products are averaged over many segments of the
input light curves. This is the length of each segment being averaged.
Note that the light curve must be long enough to have at least 30
segments, as the result gets better as one averages more and more
segments.
norm: {``frac``, ``abs``, ``leahy``, ``none``}, default ``none``
The normalization of the (real part of the) cross spectrum.
Other parameters
----------------
plot : bool, default False
Plot diagnostics: check if the smoothed Fourier difference scatter is
a good approximation of the data scatter.
ax : :class:`matplotlib.axes.axes` object
If not None and ``plot`` is True, use this axis object to produce
the diagnostic plot. Otherwise, create a new figure.
smoothing_alg : {'gauss', ...}
Smoothing algorithm. For now, the only smoothing algorithm allowed is
``gauss``, which applies a Gaussian Filter from `scipy`.
smoothing_length : int, default ``segment_size * 3``
Number of bins to smooth in gaussian window smoothing
verbose: bool, default False
Print out information on the outcome of the algorithm (recommended)
tolerance : float, default 0.05
Accepted relative error on the FAD-corrected Fourier amplitude, to be
used as success diagnostics.
Should be
```
stdtheor = 2 / np.sqrt(n)
std = (average_corrected_fourier_diff / n).std()
np.abs((std - stdtheor) / stdtheor) < tolerance
```
strict : bool, default False
Decide what to do if the condition on tolerance is not met. If True,
raise a ``RuntimeError``. If False, just throw a warning.
output_file : str, default None
Name of an output file (any extension automatically recognized by
Astropy is fine)
Returns
-------
results : class:`astropy.table.Table` object or ``dict`` or ``str``
The content of ``results`` depends on whether ``return_objects`` is
True or False.
If ``return_objects==False``,
``results`` is a `Table` with the following columns:
+ pds1: the corrected PDS of ``lc1``
+ pds2: the corrected PDS of ``lc2``
+ cs: the corrected cospectrum
+ ptot: the corrected PDS of lc1 + lc2
If ``return_objects`` is True, ``results`` is a ``dict``, with keys
named like the columns
listed above but with `AveragePowerspectrum` or
`AverageCrossspectrum` objects instead of arrays.
"""
return FAD(
lc1,
lc2,
segment_size,
dt=lc1.dt,
norm=norm,
plot=plot,
ax=ax,
smoothing_alg=smoothing_alg,
smoothing_length=smoothing_length,
verbose=verbose,
tolerance=tolerance,
strict=strict,
output_file=output_file,
return_objects=return_objects
)
def get_periodograms_from_FAD_results(FAD_results, kind='ptot'):
"""Get Stingray periodograms from FAD results.
Parameters
----------
FAD_results : :class:`astropy.table.Table` object or `str`
Results from `calculate_FAD_correction`, either as a Table or an output
file name
kind : :class:`str`, one of ['ptot', 'pds1', 'pds2', 'cs']
Kind of periodogram to get (E.g., 'ptot' -> PDS from the sum of the two
light curves, 'cs' -> cospectrum, etc.)
Returns
-------
results : `AveragedCrossspectrum` or `Averagedpowerspectrum` object
The periodogram.
"""
if isinstance(FAD_results, str):
FAD_results = Table.read(FAD_results)
if kind.startswith('p') and kind in FAD_results.colnames:
powersp = AveragedPowerspectrum()
powersp.nphot = FAD_results.meta['nph']
if '1' in kind:
powersp.nphots = FAD_results.meta['nph1']
elif '2' in kind:
powersp.nphots = FAD_results.meta['nph2']
elif kind == 'cs':
powersp = AveragedCrossspectrum(power_type='all')
powersp.nphots1 = FAD_results.meta['nph1']
powersp.nphots2 = FAD_results.meta['nph2']
else:
raise ValueError("Unknown periodogram type")
powersp.freq = FAD_results['freq']
powersp.power = FAD_results[kind]
powersp.power_err = np.zeros_like(powersp.power)
powersp.m = FAD_results.meta['M']
powersp.df = FAD_results.meta['df']
powersp.dt = FAD_results.meta['dt']
powersp.n = len(powersp.freq) * 2
powersp.norm = FAD_results.meta['norm']
return powersp
|
StingraySoftware/stingray
|
stingray/deadtime/fad.py
|
Python
|
mit
| 16,305
|
[
"Gaussian"
] |
962216bf36764fbd158196cb7f3fbf69c760ddb6283adc193912506dea2a1bc0
|
# Copyright (c) 2017, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import os,sys, copy
from rdkit.Chem import rdFreeSASA
from rdkit import Chem
expected = [
[0, 1, "Polar", 1.64], [1, 0, "Apolar", 1.88],
[2, 0, "Apolar", 1.61], [3, 1, "Polar", 1.42],
[4, 0, "Apolar", 1.88], [5, 0, "Apolar", 1.88],
[6, 1, "Polar", 1.77], [7, 0, "Apolar", 1.88],
[8, 1, "Polar", 1.64], [9, 0, "Apolar", 1.88],
[10, 0, "Apolar", 1.61], [11, 1, "Polar", 1.42],
[12, 0, "Apolar", 1.88], [13, 0, "Apolar", 1.88],
[14, 0, "Apolar", 1.61], [15, 1, "Polar", 1.42],
[16, 1, "Polar", 1.64], [17, 1, "Polar", 1.64],
[18, 0, "Apolar", 1.88], [19, 0, "Apolar", 1.61],
[20, 1, "Polar", 1.42], [21, 0, "Apolar", 1.88],
[22, 0, "Apolar", 1.88], [23, 0, "Apolar", 1.88],
[24, 0, "Apolar", 1.88], [25, 1, "Polar", 1.64],
[26, 0, "Apolar", 1.88], [27, 0, "Apolar", 1.61],
[28, 1, "Polar", 1.42], [29, 0, "Apolar", 1.88],
[30, 0, "Apolar", 1.61], [31, 0, "Apolar", 1.76],
[32, 0, "Apolar", 1.76], [33, 0, "Apolar", 1.76],
[34, 0, "Apolar", 1.76], [35, 0, "Apolar", 1.76],
[36, 1, "Polar", 1.64], [37, 0, "Apolar", 1.88],
[38, 0, "Apolar", 1.61], [39, 1, "Polar", 1.42],
[40, 0, "Apolar", 1.88], [41, 0, "Apolar", 1.88],
[42, 0, "Apolar", 1.88], [43, 1, "Polar", 1.64],
[44, 0, "Apolar", 1.88], [45, 0, "Apolar", 1.61],
[46, 1, "Polar", 1.42], [47, 0, "Apolar", 1.88],
[48, 0, "Apolar", 1.88], [49, 0, "Apolar", 1.88],
[50, 0, "Apolar", 1.88], [51, 1, "Polar", 1.64],
[52, 1, "Polar", 1.64], [53, 0, "Apolar", 1.88],
[54, 0, "Apolar", 1.61], [55, 1, "Polar", 1.42],
[56, 0, "Apolar", 1.88], [57, 1, "Polar", 1.46],
[58, 0, "Apolar", 1.88], [59, 1, "Polar", 1.64],
[60, 0, "Apolar", 1.88], [61, 0, "Apolar", 1.61],
[62, 1, "Polar", 1.42], [63, 0, "Apolar", 1.88],
[64, 0, "Apolar", 1.88], [65, 0, "Apolar", 1.88],
[66, 0, "Apolar", 1.88], [67, 1, "Polar", 1.64],
[68, 0, "Apolar", 1.88], [69, 0, "Apolar", 1.61],
[70, 1, "Polar", 1.42], [71, 0, "Apolar", 1.88],
[72, 1, "Polar", 1.46], [73, 0, "Apolar", 1.88],
[74, 1, "Polar", 1.64], [75, 0, "Apolar", 1.88],
[76, 0, "Apolar", 1.61], [77, 1, "Polar", 1.42],
[78, 1, "Polar", 1.64], [79, 0, "Apolar", 1.88],
[80, 0, "Apolar", 1.61], [81, 1, "Polar", 1.42],
[82, 0, "Apolar", 1.88], [83, 0, "Apolar", 1.88],
[84, 0, "Apolar", 1.88], [85, 0, "Apolar", 1.88],
[86, 1, "Polar", 1.64], [87, 1, "Polar", 1.64],
[88, 0, "Apolar", 1.88], [89, 0, "Apolar", 1.61],
[90, 1, "Polar", 1.42], [91, 0, "Apolar", 1.88],
[92, 1, "Polar", 1.46], [93, 0, "Apolar", 1.88],
[94, 1, "Polar", 1.64], [95, 0, "Apolar", 1.88],
[96, 0, "Apolar", 1.61], [97, 1, "Polar", 1.42],
[98, 0, "Apolar", 1.88], [99, 0, "Apolar", 1.88],
[100, 0, "Apolar", 1.88], [101, 0, "Apolar", 1.88],
[102, 1, "Polar", 1.64], [103, 0, "Apolar", 1.88],
[104, 0, "Apolar", 1.61], [105, 1, "Polar", 1.42],
[106, 0, "Apolar", 1.88], [107, 1, "Polar", 1.46],
[108, 0, "Apolar", 1.88], [109, 1, "Polar", 1.64],
[110, 0, "Apolar", 1.88], [111, 0, "Apolar", 1.61],
[112, 1, "Polar", 1.42], [113, 0, "Apolar", 1.88],
[114, 0, "Apolar", 1.88], [115, 0, "Apolar", 1.88],
[116, 0, "Apolar", 1.88], [117, 1, "Polar", 1.64],
[118, 0, "Apolar", 1.88], [119, 0, "Apolar", 1.61],
[120, 1, "Polar", 1.42], [121, 0, "Apolar", 1.88],
[122, 0, "Apolar", 1.88], [123, 0, "Apolar", 1.61],
[124, 1, "Polar", 1.42], [125, 1, "Polar", 1.46],
[126, 1, "Polar", 1.64], [127, 0, "Apolar", 1.88],
[128, 0, "Apolar", 1.61], [129, 1, "Polar", 1.42],
[130, 0, "Apolar", 1.88], [131, 0, "Apolar", 1.88],
[132, 0, "Apolar", 1.88], [133, 1, "Polar", 1.64],
[134, 0, "Apolar", 1.88], [135, 0, "Apolar", 1.61],
[136, 1, "Polar", 1.42], [137, 0, "Apolar", 1.88],
[138, 0, "Apolar", 1.88], [139, 0, "Apolar", 1.61],
[140, 1, "Polar", 1.42], [141, 1, "Polar", 1.46],
[142, 1, "Polar", 1.64], [143, 0, "Apolar", 1.88],
[144, 0, "Apolar", 1.61], [145, 1, "Polar", 1.42],
[146, 0, "Apolar", 1.88], [147, 0, "Apolar", 1.88],
[148, 0, "Apolar", 1.88], [149, 1, "Polar", 1.64],
[150, 0, "Apolar", 1.88], [151, 0, "Apolar", 1.61],
[152, 1, "Polar", 1.42], [153, 0, "Apolar", 1.88],
[154, 1, "Polar", 1.46], [155, 1, "Polar", 1.64],
[156, 0, "Apolar", 1.88], [157, 0, "Apolar", 1.61],
[158, 1, "Polar", 1.42], [159, 0, "Apolar", 1.88],
[160, 0, "Apolar", 1.61], [161, 1, "Polar", 1.42],
[162, 1, "Polar", 1.46], [163, 1, "Polar", 1.64],
[164, 0, "Apolar", 1.88], [165, 0, "Apolar", 1.61],
[166, 1, "Polar", 1.42], [167, 0, "Apolar", 1.88],
[168, 1, "Polar", 1.46], [169, 0, "Apolar", 1.88],
[170, 1, "Polar", 1.64], [171, 0, "Apolar", 1.88],
[172, 0, "Apolar", 1.61], [173, 1, "Polar", 1.42],
[174, 0, "Apolar", 1.88], [175, 0, "Apolar", 1.88],
[176, 0, "Apolar", 1.88], [177, 0, "Apolar", 1.88],
[178, 1, "Polar", 1.64], [179, 0, "Apolar", 1.88],
[180, 0, "Apolar", 1.61], [181, 1, "Polar", 1.42],
[182, 0, "Apolar", 1.88], [183, 0, "Apolar", 1.88],
[184, 0, "Apolar", 1.61], [185, 1, "Polar", 1.42],
[186, 1, "Polar", 1.46], [187, 1, "Polar", 1.64],
[188, 0, "Apolar", 1.88], [189, 0, "Apolar", 1.61],
[190, 1, "Polar", 1.42], [191, 0, "Apolar", 1.88],
[192, 0, "Apolar", 1.61], [193, 1, "Polar", 1.42],
[194, 1, "Polar", 1.64], [195, 1, "Polar", 1.64],
[196, 0, "Apolar", 1.88], [197, 0, "Apolar", 1.61],
[198, 1, "Polar", 1.42], [199, 0, "Apolar", 1.88],
[200, 0, "Apolar", 1.88], [201, 0, "Apolar", 1.88],
[202, 1, "Polar", 1.64], [203, 0, "Apolar", 1.88],
[204, 0, "Apolar", 1.61], [205, 1, "Polar", 1.42],
[206, 0, "Apolar", 1.88], [207, 0, "Apolar", 1.88],
[208, 0, "Apolar", 1.88], [209, 0, "Apolar", 1.88],
[210, 1, "Polar", 1.64], [211, 1, "Polar", 1.64],
[212, 0, "Apolar", 1.88], [213, 0, "Apolar", 1.61],
[214, 1, "Polar", 1.42], [215, 0, "Apolar", 1.88],
[216, 1, "Polar", 1.64], [217, 0, "Apolar", 1.88],
[218, 0, "Apolar", 1.61], [219, 1, "Polar", 1.42],
[220, 0, "Apolar", 1.88], [221, 0, "Apolar", 1.88],
[222, 0, "Apolar", 1.88], [223, 0, "Apolar", 1.88],
[224, 1, "Polar", 1.64], [225, 1, "Polar", 1.64],
[226, 0, "Apolar", 1.88], [227, 0, "Apolar", 1.61],
[228, 1, "Polar", 1.42], [229, 0, "Apolar", 1.88],
[230, 0, "Apolar", 1.88], [231, 0, "Apolar", 1.88],
[232, 0, "Apolar", 1.88], [233, 1, "Polar", 1.64],
[234, 0, "Apolar", 1.88], [235, 0, "Apolar", 1.61],
[236, 1, "Polar", 1.42], [237, 0, "Apolar", 1.88],
[238, 0, "Apolar", 1.88], [239, 0, "Apolar", 1.61],
[240, 1, "Polar", 1.42], [241, 1, "Polar", 1.64],
[242, 1, "Polar", 1.64], [243, 0, "Apolar", 1.88],
[244, 0, "Apolar", 1.61], [245, 1, "Polar", 1.42],
[246, 0, "Apolar", 1.88], [247, 0, "Apolar", 1.61],
[248, 1, "Polar", 1.42], [249, 1, "Polar", 1.46],
[250, 1, "Polar", 1.64], [251, 0, "Apolar", 1.88],
[252, 0, "Apolar", 1.61], [253, 1, "Polar", 1.42],
[254, 0, "Apolar", 1.88], [255, 0, "Apolar", 1.88],
[256, 0, "Apolar", 1.88], [257, 0, "Apolar", 1.88],
[258, 1, "Polar", 1.64], [259, 1, "Polar", 1.64],
[260, 0, "Apolar", 1.88], [261, 0, "Apolar", 1.61],
[262, 1, "Polar", 1.42], [263, 0, "Apolar", 1.88],
[264, 0, "Apolar", 1.88], [265, 0, "Apolar", 1.61],
[266, 1, "Polar", 1.42], [267, 1, "Polar", 1.46],
[268, 1, "Polar", 1.64], [269, 0, "Apolar", 1.88],
[270, 0, "Apolar", 1.61], [271, 1, "Polar", 1.42],
[272, 1, "Polar", 1.64], [273, 0, "Apolar", 1.88],
[274, 0, "Apolar", 1.61], [275, 1, "Polar", 1.42],
[276, 0, "Apolar", 1.88], [277, 0, "Apolar", 1.88],
[278, 0, "Apolar", 1.88], [279, 0, "Apolar", 1.88],
[280, 1, "Polar", 1.64], [281, 0, "Apolar", 1.88],
[282, 0, "Apolar", 1.61], [283, 1, "Polar", 1.42],
[284, 0, "Apolar", 1.88], [285, 0, "Apolar", 1.88],
[286, 0, "Apolar", 1.88], [287, 1, "Polar", 1.64],
[288, 0, "Apolar", 1.88], [289, 0, "Apolar", 1.61],
[290, 1, "Polar", 1.42], [291, 0, "Apolar", 1.88],
[292, 0, "Apolar", 1.88], [293, 0, "Apolar", 1.88],
[294, 1, "Polar", 1.64], [295, 0, "Apolar", 1.88],
[296, 0, "Apolar", 1.61], [297, 1, "Polar", 1.42],
[298, 0, "Apolar", 1.88], [299, 0, "Apolar", 1.61],
[300, 1, "Polar", 1.42], [301, 1, "Polar", 1.46],
[302, 1, "Polar", 1.64], [303, 0, "Apolar", 1.88],
[304, 0, "Apolar", 1.61], [305, 1, "Polar", 1.42],
[306, 0, "Apolar", 1.88], [307, 0, "Apolar", 1.88],
[308, 0, "Apolar", 1.61], [309, 1, "Polar", 1.42],
[310, 1, "Polar", 1.64], [311, 1, "Polar", 1.64],
[312, 0, "Apolar", 1.88], [313, 0, "Apolar", 1.61],
[314, 1, "Polar", 1.42], [315, 0, "Apolar", 1.88],
[316, 0, "Apolar", 1.88], [317, 0, "Apolar", 1.61],
[318, 1, "Polar", 1.42], [319, 1, "Polar", 1.64],
[320, 1, "Polar", 1.64], [321, 0, "Apolar", 1.88],
[322, 0, "Apolar", 1.61], [323, 1, "Polar", 1.42],
[324, 0, "Apolar", 1.88], [325, 0, "Apolar", 1.88],
[326, 0, "Apolar", 1.88], [327, 1, "Polar", 1.64],
[328, 0, "Apolar", 1.61], [329, 1, "Polar", 1.64],
[330, 1, "Polar", 1.64], [331, 1, "Polar", 1.64],
[332, 0, "Apolar", 1.88], [333, 0, "Apolar", 1.61],
[334, 1, "Polar", 1.42], [335, 0, "Apolar", 1.88],
[336, 0, "Apolar", 1.88], [337, 0, "Apolar", 1.88],
[338, 0, "Apolar", 1.88], [339, 1, "Polar", 1.64],
[340, 0, "Apolar", 1.88], [341, 0, "Apolar", 1.61],
[342, 1, "Polar", 1.42], [343, 0, "Apolar", 1.88],
[344, 0, "Apolar", 1.88], [345, 0, "Apolar", 1.88],
[346, 0, "Apolar", 1.88], [347, 1, "Polar", 1.64],
[348, 0, "Apolar", 1.88], [349, 0, "Apolar", 1.61],
[350, 1, "Polar", 1.42], [351, 0, "Apolar", 1.88],
[352, 0, "Apolar", 1.61], [353, 0, "Apolar", 1.76],
[354, 0, "Apolar", 1.76], [355, 0, "Apolar", 1.76],
[356, 0, "Apolar", 1.76], [357, 0, "Apolar", 1.76],
[358, 1, "Polar", 1.64], [359, 0, "Apolar", 1.88],
[360, 0, "Apolar", 1.61], [361, 1, "Polar", 1.42],
[362, 0, "Apolar", 1.88], [363, 1, "Polar", 1.64],
[364, 0, "Apolar", 1.88], [365, 0, "Apolar", 1.61],
[366, 1, "Polar", 1.42], [367, 1, "Polar", 1.64],
[368, 0, "Apolar", 1.88], [369, 0, "Apolar", 1.61],
[370, 1, "Polar", 1.42], [371, 0, "Apolar", 1.88],
[372, 0, "Apolar", 1.88], [373, 0, "Apolar", 1.88],
[374, 0, "Apolar", 1.88], [375, 1, "Polar", 1.64],
[376, 1, "Polar", 1.64], [377, 0, "Apolar", 1.88],
[378, 0, "Apolar", 1.61], [379, 1, "Polar", 1.42],
[380, 0, "Apolar", 1.88], [381, 0, "Apolar", 1.88],
[382, 0, "Apolar", 1.61], [383, 1, "Polar", 1.42],
[384, 1, "Polar", 1.64], [385, 1, "Polar", 1.64],
[386, 0, "Apolar", 1.88], [387, 0, "Apolar", 1.61],
[388, 1, "Polar", 1.42], [389, 0, "Apolar", 1.88],
[390, 0, "Apolar", 1.88], [391, 0, "Apolar", 1.88],
[392, 0, "Apolar", 1.88], [393, 1, "Polar", 1.64],
[394, 0, "Apolar", 1.88], [395, 0, "Apolar", 1.61],
[396, 1, "Polar", 1.42], [397, 0, "Apolar", 1.88],
[398, 0, "Apolar", 1.88], [399, 0, "Apolar", 1.61],
[400, 1, "Polar", 1.42], [401, 1, "Polar", 1.46],
[402, 1, "Polar", 1.64], [403, 0, "Apolar", 1.88],
[404, 0, "Apolar", 1.61], [405, 1, "Polar", 1.42],
[406, 0, "Apolar", 1.88], [407, 0, "Apolar", 1.61],
[408, 1, "Polar", 1.42], [409, 1, "Polar", 1.46],
[410, 1, "Polar", 1.64], [411, 0, "Apolar", 1.88],
[412, 0, "Apolar", 1.61], [413, 1, "Polar", 1.42],
[414, 1, "Polar", 1.64], [415, 0, "Apolar", 1.88],
[416, 0, "Apolar", 1.61], [417, 1, "Polar", 1.42],
[418, 0, "Apolar", 1.88], [419, 0, "Apolar", 1.88],
[420, 0, "Apolar", 1.88], [421, 1, "Polar", 1.64],
[422, 0, "Apolar", 1.61], [423, 1, "Polar", 1.64],
[424, 1, "Polar", 1.64], [425, 1, "Polar", 1.64],
[426, 0, "Apolar", 1.88], [427, 0, "Apolar", 1.61],
[428, 1, "Polar", 1.42], [429, 0, "Apolar", 1.88],
[430, 1, "Polar", 1.46], [431, 0, "Apolar", 1.88],
[432, 1, "Polar", 1.64], [433, 0, "Apolar", 1.88],
[434, 0, "Apolar", 1.61], [435, 1, "Polar", 1.42],
[436, 0, "Apolar", 1.88], [437, 0, "Apolar", 1.88],
[438, 0, "Apolar", 1.88], [439, 0, "Apolar", 1.88],
[440, 1, "Polar", 1.64], [441, 0, "Apolar", 1.88],
[442, 0, "Apolar", 1.61], [443, 1, "Polar", 1.42],
[444, 0, "Apolar", 1.88], [445, 1, "Polar", 1.46],
[446, 1, "Polar", 1.64], [447, 0, "Apolar", 1.88],
[448, 0, "Apolar", 1.61], [449, 1, "Polar", 1.42],
[450, 0, "Apolar", 1.88], [451, 0, "Apolar", 1.61],
[452, 1, "Polar", 1.42], [453, 1, "Polar", 1.46],
[454, 1, "Polar", 1.64], [455, 0, "Apolar", 1.88],
[456, 0, "Apolar", 1.61], [457, 1, "Polar", 1.42],
[458, 0, "Apolar", 1.88], [459, 0, "Apolar", 1.61],
[460, 0, "Apolar", 1.76], [461, 0, "Apolar", 1.76],
[462, 0, "Apolar", 1.76], [463, 0, "Apolar", 1.76],
[464, 0, "Apolar", 1.61], [465, 1, "Polar", 1.46],
[466, 1, "Polar", 1.64], [467, 0, "Apolar", 1.88],
[468, 0, "Apolar", 1.61], [469, 1, "Polar", 1.42],
[470, 0, "Apolar", 1.88], [471, 0, "Apolar", 1.61],
[472, 1, "Polar", 1.42], [473, 1, "Polar", 1.64],
[474, 1, "Polar", 1.64], [475, 0, "Apolar", 1.88],
[476, 0, "Apolar", 1.61], [477, 1, "Polar", 1.42],
[478, 0, "Apolar", 1.88], [479, 0, "Apolar", 1.88],
[480, 0, "Apolar", 1.88], [481, 0, "Apolar", 1.88],
[482, 1, "Polar", 1.64], [483, 0, "Apolar", 1.88],
[484, 0, "Apolar", 1.61], [485, 1, "Polar", 1.42],
[486, 0, "Apolar", 1.88], [487, 0, "Apolar", 1.88],
[488, 0, "Apolar", 1.61], [489, 1, "Polar", 1.42],
[490, 1, "Polar", 1.64], [491, 1, "Polar", 1.64],
[492, 0, "Apolar", 1.88], [493, 0, "Apolar", 1.61],
[494, 1, "Polar", 1.42], [495, 0, "Apolar", 1.88],
[496, 0, "Apolar", 1.88], [497, 0, "Apolar", 1.88],
[498, 0, "Apolar", 1.88], [499, 1, "Polar", 1.64],
[500, 1, "Polar", 1.64], [501, 0, "Apolar", 1.88],
[502, 0, "Apolar", 1.61], [503, 1, "Polar", 1.42],
[504, 0, "Apolar", 1.88], [505, 0, "Apolar", 1.88],
[506, 0, "Apolar", 1.61], [507, 1, "Polar", 1.42],
[508, 1, "Polar", 1.46], [509, 1, "Polar", 1.64],
[510, 0, "Apolar", 1.88], [511, 0, "Apolar", 1.61],
[512, 1, "Polar", 1.42], [513, 0, "Apolar", 1.88],
[514, 1, "Polar", 1.46], [515, 1, "Polar", 1.64],
[516, 0, "Apolar", 1.88], [517, 0, "Apolar", 1.61],
[518, 1, "Polar", 1.42], [519, 0, "Apolar", 1.88],
[520, 1, "Polar", 1.46], [521, 0, "Apolar", 1.88],
[522, 1, "Polar", 1.64], [523, 0, "Apolar", 1.88],
[524, 0, "Apolar", 1.61], [525, 1, "Polar", 1.42],
[526, 0, "Apolar", 1.88], [527, 0, "Apolar", 1.88],
[528, 0, "Apolar", 1.88], [529, 0, "Apolar", 1.88],
[530, 1, "Polar", 1.64], [531, 0, "Apolar", 1.88],
[532, 0, "Apolar", 1.61], [533, 1, "Polar", 1.42],
[534, 0, "Apolar", 1.88], [535, 0, "Apolar", 1.61],
[536, 1, "Polar", 1.64], [537, 0, "Apolar", 1.76],
[538, 0, "Apolar", 1.76], [539, 1, "Polar", 1.64],
[540, 1, "Polar", 1.64], [541, 0, "Apolar", 1.88],
[542, 0, "Apolar", 1.61], [543, 1, "Polar", 1.42],
[544, 0, "Apolar", 1.88], [545, 0, "Apolar", 1.88],
[546, 0, "Apolar", 1.88], [547, 0, "Apolar", 1.88],
[548, 1, "Polar", 1.64], [549, 0, "Apolar", 1.88],
[550, 0, "Apolar", 1.61], [551, 1, "Polar", 1.42],
[552, 0, "Apolar", 1.88], [553, 0, "Apolar", 1.88],
[554, 0, "Apolar", 1.88], [555, 1, "Polar", 1.64],
[556, 0, "Apolar", 1.88], [557, 0, "Apolar", 1.61],
[558, 1, "Polar", 1.42], [559, 0, "Apolar", 1.88],
[560, 0, "Apolar", 1.88], [561, 0, "Apolar", 1.88],
[562, 0, "Apolar", 1.88], [563, 1, "Polar", 1.64],
[564, 0, "Apolar", 1.88], [565, 0, "Apolar", 1.61],
[566, 1, "Polar", 1.42], [567, 0, "Apolar", 1.88],
[568, 0, "Apolar", 1.88], [569, 0, "Apolar", 1.88],
[570, 1, "Polar", 1.64], [571, 0, "Apolar", 1.61],
[572, 1, "Polar", 1.64], [573, 1, "Polar", 1.64],
[574, 1, "Polar", 1.64], [575, 0, "Apolar", 1.88],
[576, 0, "Apolar", 1.61], [577, 1, "Polar", 1.42],
[578, 0, "Apolar", 1.88], [579, 0, "Apolar", 1.88],
[580, 0, "Apolar", 1.88], [581, 0, "Apolar", 1.88],
[582, 1, "Polar", 1.64], [583, 0, "Apolar", 1.88],
[584, 0, "Apolar", 1.61], [585, 1, "Polar", 1.42],
[586, 0, "Apolar", 1.88], [587, 0, "Apolar", 1.88],
[588, 0, "Apolar", 1.88], [589, 1, "Polar", 1.64],
[590, 0, "Apolar", 1.61], [591, 1, "Polar", 1.64],
[592, 1, "Polar", 1.64], [593, 1, "Polar", 1.64],
[594, 0, "Apolar", 1.88], [595, 0, "Apolar", 1.61],
[596, 1, "Polar", 1.42], [597, 1, "Polar", 1.64],
[598, 0, "Apolar", 1.88], [599, 0, "Apolar", 1.61],
[600, 1, "Polar", 1.42], [601, 1, "Polar", 1.46]
]
class TestCase(unittest.TestCase) :
def test_basics(self):
fname = os.path.join(os.environ["RDBASE"],
"External", "FreeSASA", "test_data", "1d3z.pdb")
mol = Chem.MolFromPDBFile(fname)
radii = rdFreeSASA.classifyAtoms(mol)
for atom in mol.GetAtoms():
self.assertEqual( expected[atom.GetIdx()][3], radii[atom.GetIdx()] )
leeRichards = 5004.79964427
shrakerupley = 5000.340175
sasa = rdFreeSASA.CalcSASA(mol, radii=radii)
self.assertTrue( (sasa-leeRichards) < 1e-5 )
opts = rdFreeSASA.SASAOpts(rdFreeSASA.ShrakeRupley, rdFreeSASA.Protor)
sasa = rdFreeSASA.CalcSASA(mol, radii=radii, opts=opts)
self.assertTrue( (sasa-shrakerupley) < 1e-5 )
apolar = rdFreeSASA.CalcSASA(mol, radii, query=rdFreeSASA.MakeFreeSasaAPolarAtomQuery(), opts=opts)
polar = rdFreeSASA.CalcSASA(mol, radii, query=rdFreeSASA.MakeFreeSasaPolarAtomQuery(), opts=opts)
self.assertTrue( (polar + apolar - 5000.340175) < 1e-5 )
def test_opts(self):
fname = os.path.join(os.environ["RDBASE"],
"External", "FreeSASA", "test_data", "1d3z.pdb")
mol = Chem.MolFromPDBFile(fname)
radii = rdFreeSASA.classifyAtoms(mol)
for atom in mol.GetAtoms():
self.assertEqual( expected[atom.GetIdx()][3], radii[atom.GetIdx()] )
leeRichards = 5004.79964427
shrakerupley = 5000.340175
opts = rdFreeSASA.SASAOpts()
for alg, res in ( (rdFreeSASA.ShrakeRupley, shrakerupley),
(rdFreeSASA.LeeRichards, leeRichards)):
opts.algorithm = alg
sasa = rdFreeSASA.CalcSASA(mol, radii=radii, opts=opts)
self.assertTrue( abs(sasa-res) < 1e-5 )
leeRichards = 5009.93014166
shrakerupley = 4977.7709106
opts = rdFreeSASA.SASAOpts()
opts.probeRadius = 2.0
for alg, res in ( (rdFreeSASA.ShrakeRupley, shrakerupley),
(rdFreeSASA.LeeRichards, leeRichards)):
opts.algorithm = alg
sasa = rdFreeSASA.CalcSASA(mol, radii=radii, opts=opts)
self.assertTrue( abs(sasa-res) < 1e-5 )
if __name__ == '__main__':
unittest.main()
|
rdkit/rdkit
|
External/FreeSASA/Wrap/testFreeSASA.py
|
Python
|
bsd-3-clause
| 20,739
|
[
"RDKit"
] |
5647f5fb063d5f0411e5e114c2ba244e36ed8fdc594fa06accf0893a0cdcbd73
|
# Copyright 2000 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
import sys
# Add path to Bio
sys.path.append('../..')
"""Code to access resources at ExPASy over the WWW.
See http://www.expasy.ch/
Functions:
- get_prodoc_entry Interface to the get-prodoc-entry CGI script.
- get_prosite_entry Interface to the get-prosite-entry CGI script.
- get_prosite_raw Interface to the get-prosite-raw CGI script.
- get_sprot_raw Interface to the get-sprot-raw CGI script.
- sprot_search_ful Interface to the sprot-search-ful CGI script.
- sprot_search_de Interface to the sprot-search-de CGI script.
"""
# Importing these functions with leading underscore as not intended for reuse
from Bio._py3k import urlopen as _urlopen
from Bio._py3k import urlencode as _urlencode
__docformat__ = "restructuredtext en"
def get_prodoc_entry(id, cgi='http://www.expasy.ch/cgi-bin/get-prodoc-entry'):
"""get_prodoc_entry(id,
cgi='http://www.expasy.ch/cgi-bin/get-prodoc-entry') -> handle
Get a handle to a PRODOC entry at ExPASy in HTML format.
For a non-existing key XXX, ExPASy returns an HTML-formatted page
containing this line:
'There is no PROSITE documentation entry XXX. Please try again.'
"""
# Open a handle to ExPASy.
return _urlopen("%s?%s" % (cgi, id))
def get_prosite_entry(id,
cgi='http://www.expasy.ch/cgi-bin/get-prosite-entry'):
"""get_prosite_entry(id,
cgi='http://www.expasy.ch/cgi-bin/get-prosite-entry') -> handle
Get a handle to a PROSITE entry at ExPASy in HTML format.
For a non-existing key XXX, ExPASy returns an HTML-formatted page
containing this line:
'There is currently no PROSITE entry for XXX. Please try again.'
"""
return _urlopen("%s?%s" % (cgi, id))
def get_prosite_raw(id, cgi='http://www.expasy.ch/cgi-bin/get-prosite-raw.pl'):
"""get_prosite_raw(id,
cgi='http://www.expasy.ch/cgi-bin/get-prosite-raw.pl')
-> handle
Get a handle to a raw PROSITE or PRODOC entry at ExPASy.
For a non-existing key, ExPASy returns nothing.
"""
return _urlopen("%s?%s" % (cgi, id))
def get_sprot_raw(id):
"""Get a handle to a raw SwissProt entry at ExPASy.
For an ID of XXX, fetches http://www.uniprot.org/uniprot/XXX.txt
(as per the http://www.expasy.ch/expasy_urls.html documentation).
"""
return _urlopen("http://www.uniprot.org/uniprot/%s.txt" % id)
def sprot_search_ful(text, make_wild=None, swissprot=1, trembl=None,
cgi='http://www.expasy.ch/cgi-bin/sprot-search-ful'):
"""sprot_search_ful(text, make_wild=None, swissprot=1, trembl=None,
cgi='http://www.expasy.ch/cgi-bin/sprot-search-ful') -> handle
Search SwissProt by full text.
"""
variables = {'SEARCH': text}
if make_wild:
variables['makeWild'] = 'on'
if swissprot:
variables['S'] = 'on'
if trembl:
variables['T'] = 'on'
options = _urlencode(variables)
fullcgi = "%s?%s" % (cgi, options)
handle = _urlopen(fullcgi)
return handle
def sprot_search_de(text, swissprot=1, trembl=None,
cgi='http://www.expasy.ch/cgi-bin/sprot-search-de'):
"""sprot_search_de(text, swissprot=1, trembl=None,
cgi='http://www.expasy.ch/cgi-bin/sprot-search-de') -> handle
Search SwissProt by name, description, gene name, species, or
organelle.
"""
variables = {'SEARCH': text}
if swissprot:
variables['S'] = 'on'
if trembl:
variables['T'] = 'on'
options = _urlencode(variables)
fullcgi = "%s?%s" % (cgi, options)
handle = _urlopen(fullcgi)
return handle
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/ExPASy/__init__.py
|
Python
|
gpl-2.0
| 3,830
|
[
"Biopython"
] |
c631027ab04e1f7f8b480c9c0f353e407948f3b373729bb39d3580d3ec9e60f0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
r"""Reader for files produced with the cf netcdf writer in satpy.
Introduction
------------
The ``satpy_cf_nc`` reader reads data written by the satpy cf_writer. Filenames for cf_writer are optional.
There are several readers using the same satpy_cf_nc.py reader.
* Generic reader ``satpy_cf_nc``
* EUMETSAT GAC FDR reader ``avhrr_l1c_eum_gac_fdr_nc``
Generic reader
--------------
The generic ``satpy_cf_nc`` reader reads files of type:
.. code-block:: none
'{platform_name}-{sensor}-{start_time:%Y%m%d%H%M%S}-{end_time:%Y%m%d%H%M%S}.nc'
Example
-------
Here is an example how to read the data in satpy:
.. code-block:: python
from satpy import Scene
filenames = ['data/npp-viirs-mband-20201007075915-20201007080744.nc']
scn = Scene(reader='satpy_cf_nc', filenames=filenames)
scn.load(['M05'])
scn['M05']
Output:
.. code-block:: none
<xarray.DataArray 'M05' (y: 4592, x: 3200)>
dask.array<open_dataset-d91cfbf1bf4f14710d27446d91cdc6e4M05, shape=(4592, 3200),
dtype=float32, chunksize=(4096, 3200), chunktype=numpy.ndarray>
Coordinates:
longitude (y, x) float32 dask.array<chunksize=(4096, 3200), meta=np.ndarray>
latitude (y, x) float32 dask.array<chunksize=(4096, 3200), meta=np.ndarray>
Dimensions without coordinates: y, x
Attributes:
start_time: 2020-10-07 07:59:15
start_orbit: 46350
end_time: 2020-10-07 08:07:44
end_orbit: 46350
calibration: reflectance
long_name: M05
modifiers: ('sunz_corrected',)
platform_name: Suomi-NPP
resolution: 742
sensor: viirs
standard_name: toa_bidirectional_reflectance
units: %
wavelength: 0.672 µm (0.662-0.682 µm)
date_created: 2020-10-07T08:20:02Z
instrument: VIIRS
Notes:
Available datasets and attributes will depend on the data saved with the cf_writer.
EUMETSAT AVHRR GAC FDR L1C reader
---------------------------------
The ``avhrr_l1c_eum_gac_fdr_nc`` reader reads files of type:
.. code-block:: none
''AVHRR-GAC_FDR_1C_{platform}_{start_time:%Y%m%dT%H%M%SZ}_{end_time:%Y%m%dT%H%M%SZ}_{processing_mode}_{disposition_mode}_{creation_time}_{version_int:04d}.nc'
Example
-------
Here is an example how to read the data in satpy:
.. code-block:: python
from satpy import Scene
filenames = ['data/AVHRR-GAC_FDR_1C_N06_19810330T042358Z_19810330T060903Z_R_O_20200101T000000Z_0100.nc']
scn = Scene(reader='avhrr_l1c_eum_gac_fdr_nc', filenames=filenames)
scn.load(['brightness_temperature_channel_4'])
scn['brightness_temperature_channel_4']
Output:
.. code-block:: none
<xarray.DataArray 'brightness_temperature_channel_4' (y: 11, x: 409)>
dask.array<open_dataset-55ffbf3623b32077c67897f4283640a5brightness_temperature_channel_4, shape=(11, 409),
dtype=float32, chunksize=(11, 409), chunktype=numpy.ndarray>
Coordinates:
* x (x) int16 0 1 2 3 4 5 6 7 8 ... 401 402 403 404 405 406 407 408
* y (y) int64 0 1 2 3 4 5 6 7 8 9 10
acq_time (y) datetime64[ns] dask.array<chunksize=(11,), meta=np.ndarray>
longitude (y, x) float64 dask.array<chunksize=(11, 409), meta=np.ndarray>
latitude (y, x) float64 dask.array<chunksize=(11, 409), meta=np.ndarray>
Attributes:
start_time: 1981-03-30 04:23:58
end_time: 1981-03-30 06:09:03
calibration: brightness_temperature
modifiers: ()
resolution: 1050
standard_name: toa_brightness_temperature
units: K
wavelength: 10.8 µm (10.3-11.3 µm)
Conventions: CF-1.8 ACDD-1.3
comment: Developed in cooperation with EUME...
creator_email: ops@eumetsat.int
creator_name: EUMETSAT
creator_url: https://www.eumetsat.int/
date_created: 2020-09-14T10:50:51.073707
disposition_mode: O
gac_filename: NSS.GHRR.NA.D81089.S0423.E0609.B09...
geospatial_lat_max: 89.95386902434623
geospatial_lat_min: -89.97581969005503
geospatial_lat_resolution: 1050 meters
geospatial_lat_units: degrees_north
geospatial_lon_max: 179.99952992568998
geospatial_lon_min: -180.0
geospatial_lon_resolution: 1050 meters
geospatial_lon_units: degrees_east
ground_station: GC
id: DOI:10.5676/EUM/AVHRR_GAC_L1C_FDR/...
institution: EUMETSAT
instrument: Earth Remote Sensing Instruments >...
keywords: ATMOSPHERE > ATMOSPHERIC RADIATION...
keywords_vocabulary: GCMD Science Keywords, Version 9.1
licence: EUMETSAT data policy https://www.e...
naming_authority: int.eumetsat
orbit_number_end: 9123
orbit_number_start: 9122
orbital_parameters_tle: ['1 11416U 79057A 81090.16350942...
platform: Earth Observation Satellites > NOA...
processing_level: 1C
processing_mode: R
product_version: 1.0.0
references: Devasthale, A., M. Raspaud, C. Sch...
source: AVHRR GAC Level 1 Data
standard_name_vocabulary: CF Standard Name Table v73
summary: Fundamental Data Record (FDR) of m...
sun_earth_distance_correction_factor: 0.9975244779999585
time_coverage_end: 19820803T003900Z
time_coverage_start: 19800101T000000Z
title: AVHRR GAC L1C FDR
version_calib_coeffs: PATMOS-x, v2017r1
version_pygac: 1.4.0
version_pygac_fdr: 0.1.dev107+gceb7b26.d20200910
version_satpy: 0.21.1.dev894+g5cf76e6
history: Created by pytroll/satpy on 2020-0...
name: brightness_temperature_channel_4
_satpy_id: DataID(name='brightness_temperatur...
ancillary_variables: []
"""
import itertools
import json
import logging
import xarray as xr
from satpy import CHUNK_SIZE
from satpy.dataset.dataid import WavelengthRange
from satpy.readers.file_handlers import BaseFileHandler
logger = logging.getLogger(__name__)
class SatpyCFFileHandler(BaseFileHandler):
"""File handler for Satpy's CF netCDF files."""
def __init__(self, filename, filename_info, filetype_info, numeric_name_prefix='CHANNEL_'):
"""Initialize file handler."""
super().__init__(filename, filename_info, filetype_info)
self.engine = None
self._numeric_name_prefix = numeric_name_prefix
@property
def start_time(self):
"""Get start time."""
return self.filename_info['start_time']
@property
def end_time(self):
"""Get end time."""
return self.filename_info.get('end_time', self.start_time)
@property
def sensor(self):
"""Get sensor."""
nc = xr.open_dataset(self.filename, engine=self.engine)
return nc.attrs['instrument'].replace('/', '-').lower()
@property
def sensor_names(self):
"""Get sensor set."""
return {self.sensor}
def available_datasets(self, configured_datasets=None):
"""Add information of available datasets."""
existing = self._existing_datasets(configured_datasets=configured_datasets)
dynamic = self._dynamic_datasets()
coordinates = self._coordinate_datasets()
for dataset_available, dataset_info in itertools.chain(existing, dynamic, coordinates):
yield dataset_available, dataset_info
def _existing_datasets(self, configured_datasets=None):
"""Add information of existing datasets."""
for is_avail, ds_info in (configured_datasets or []):
yield is_avail, ds_info
def fix_modifier_attr(self, ds_info):
"""Fix modifiers attribute."""
# Empty modifiers are read as [], which causes problems later
if 'modifiers' in ds_info and not ds_info['modifiers']:
ds_info['modifiers'] = ()
try:
try:
ds_info['modifiers'] = tuple(ds_info['modifiers'].split(' '))
except AttributeError:
pass
except KeyError:
pass
def _assign_ds_info(self, var_name, val):
"""Assign ds_info."""
ds_info = dict(val.attrs)
ds_info['file_type'] = self.filetype_info['file_type']
ds_info['name'] = ds_info['nc_store_name'] = var_name
if 'original_name' in ds_info:
ds_info['name'] = ds_info['original_name']
elif self._numeric_name_prefix and var_name.startswith(self._numeric_name_prefix):
ds_info['name'] = var_name.replace(self._numeric_name_prefix, '')
try:
ds_info['wavelength'] = WavelengthRange.from_cf(ds_info['wavelength'])
except KeyError:
pass
return ds_info
def _dynamic_datasets(self):
"""Add information of dynamic datasets."""
nc = xr.open_dataset(self.filename, engine=self.engine)
# get dynamic variables known to this file (that we created)
for var_name, val in nc.data_vars.items():
ds_info = self._assign_ds_info(var_name, val)
self.fix_modifier_attr(ds_info)
yield True, ds_info
def _coordinate_datasets(self, configured_datasets=None):
"""Add information of coordinate datasets."""
nc = xr.open_dataset(self.filename, engine=self.engine)
for var_name, val in nc.coords.items():
ds_info = dict(val.attrs)
ds_info['file_type'] = self.filetype_info['file_type']
ds_info['name'] = var_name
self.fix_modifier_attr(ds_info)
yield True, ds_info
def get_dataset(self, ds_id, ds_info):
"""Get dataset."""
logger.debug("Getting data for: %s", ds_id['name'])
nc = xr.open_dataset(self.filename, engine=self.engine,
chunks={'y': CHUNK_SIZE, 'x': CHUNK_SIZE})
name = ds_info.get('nc_store_name', ds_id['name'])
file_key = ds_info.get('file_key', name)
data = nc[file_key]
if name != ds_id['name']:
data = data.rename(ds_id['name'])
data.attrs.update(nc.attrs) # For now add global attributes to all datasets
if "orbital_parameters" in data.attrs:
data.attrs["orbital_parameters"] = _str2dict(data.attrs["orbital_parameters"])
return data
def _str2dict(val):
"""Convert string to dictionary."""
if isinstance(val, str):
val = json.loads(val)
return val
|
pytroll/satpy
|
satpy/readers/satpy_cf_nc.py
|
Python
|
gpl-3.0
| 12,691
|
[
"NetCDF"
] |
5160b4aebae2f88ed4af2dd5d5ee3370e2842c23a307273cd55c68348c01bac1
|
# -*- coding: utf-8 -*-
u"""
This script is to parse SRW Python scripts and to produce JSON-file with the parsed data.
It's highly dependent on the external Sirepo/SRW libraries and is written to allow parsing of the .py files using
SRW objects.
:copyright: Copyright (c) 2017 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkcollections
from pykern import pkio
from pykern import pkjson
from pykern import pkrunpy
from pykern.pkdebug import pkdlog, pkdexc, pkdp
import ast
import inspect
import os
import py.path
import re
import srwl_bl
import sirepo.sim_data
_SIM_DATA, SIM_TYPE, SCHEMA = sirepo.sim_data.template_globals('srw')
class SRWParser(object):
def __init__(self, script, user_filename, arguments, optics_func_name='set_optics'):
m = pkrunpy.run_path_as_module(script)
if arguments:
import shlex
arguments = shlex.split(arguments)
self.var_param = srwl_bl.srwl_uti_parse_options(m.varParam, use_sys_argv=False, args=arguments)
self.replace_mirror_files()
self.replace_image_files()
try:
self.optics = getattr(m, optics_func_name)(self.var_param)
except ValueError as e:
if re.search('could not convert string to float', str(e.args)):
self.replace_mirror_files('mirror_2d.dat')
self.optics = getattr(m, optics_func_name)(self.var_param)
self.data = _parsed_dict(self.var_param, self.optics)
self.data.models.simulation.name = _name(user_filename)
def replace_mirror_files(self, mirror_file='mirror_1d.dat'):
for key in self.var_param.__dict__.keys():
if key == 'fdir':
self.var_param.__dict__[key] = os.getcwd()
if re.search(r'\_ofn$', key):
self.var_param.__dict__[key] = ''
if re.search(r'\_(h|i)fn$', key):
if getattr(self.var_param, key) != '' and getattr(self.var_param, key) != 'None':
self.var_param.__dict__[key] = str(_SIM_DATA.lib_file_abspath(mirror_file))
def replace_image_files(self, image_file='sample.tif'):
for key in self.var_param.__dict__.keys():
if key.find('op_sample') >= 0:
if getattr(self.var_param, key) != '':
self.var_param.__dict__[key] = str(_SIM_DATA.lib_file_abspath(image_file))
class Struct(object):
def __init__(self, **entries):
self.__dict__.update(entries)
def import_python(code, tmp_dir, user_filename=None, arguments=None):
"""Converts script_text into json and stores as new simulation.
Avoids too much data back to the user in the event of an error.
This could be a potential security issue, because the script
could be used to probe the system.
Args:
simulation_type (str): always "srw", but used to find lib dir
code (str): Python code that runs SRW
user_filename (str): uploaded file name for log
arguments (str): argv to be passed to script
Returns:
dict: simulation data
"""
script = None
# Patch for the mirror profile for the exported .py file from Sirepo:
code = _patch_mirror_profile(code)
try:
with pkio.save_chdir(tmp_dir):
# This string won't show up anywhere
script = pkio.write_text(
'in.py',
re.sub(r'^main\(', '#', code, flags=re.MULTILINE),
)
o = SRWParser(
script,
user_filename=user_filename,
arguments=arguments,
)
return o.data
except Exception as e:
lineno = script and _find_line_in_trace(script)
if hasattr(e, 'args'):
if len(e.args) == 1:
m = str(e.args[0])
elif e.args:
m = str(e.args)
else:
m = e.__class__.__name__
else:
m = str(e)
pkdlog(
'Error: {}; exception={}; script={}; filename={}; stack:\n{}',
m,
e.__class__.__name__,
script,
user_filename,
pkdexc(),
)
m = m[:50]
raise ValueError(
'Error on line {}: {}'.format(lineno, m) if lineno
else 'Error: {}'.format(m),
)
# Mapping all the values to a dictionary:
def _beamline_element(obj, idx, title, elem_type, position):
data = pkcollections.Dict()
data['id'] = idx
data['type'] = elem_type
data['title'] = title
data['position'] = position
if elem_type in ['aperture', 'obstacle']:
data['shape'] = obj.shape
data['horizontalOffset'] = obj.x
data['verticalOffset'] = obj.y
data['horizontalSize'] = obj.Dx * 1e3
data['verticalSize'] = obj.Dy * 1e3
elif elem_type == 'crl':
keys = ['attenuationLength', 'focalPlane', 'horizontalApertureSize', 'numberOfLenses', 'radius',
'refractiveIndex', 'shape', 'verticalApertureSize', 'wallThickness']
for key in keys:
data[key] = obj.input_parms[key]
# Should be multiplied by 1000.0:
for key in ['horizontalApertureSize', 'verticalApertureSize']:
data[key] *= 1000.0
elif elem_type == 'crystal':
# Fixed values in srw.js:
data['heightAmplification'] = 1
data['heightProfileFile'] = None
data['orientation'] = 'x'
data['material'] = 'Unknown'
data['h'] = '1'
data['k'] = '1'
data['l'] = '1'
try:
data['energy'] = obj.aux_energy
except Exception:
data['energy'] = None
try:
data['grazingAngle'] = obj.aux_ang_dif_pl
except Exception:
data['grazingAngle'] = 0.0
data['asymmetryAngle'] = obj.angAs
data['rotationAngle'] = 0.0
data['crystalThickness'] = obj.tc
data['geometryType'] = obj.uc
data['dSpacing'] = obj.dSp
data['psi0r'] = obj.psi0r
data['psi0i'] = obj.psi0i
data['psiHr'] = obj.psiHr
data['psiHi'] = obj.psiHi
data['psiHBr'] = obj.psiHbr
data['psiHBi'] = obj.psiHbi
data['nvx'] = obj.nvx
data['nvy'] = obj.nvy
data['nvz'] = obj.nvz
data['tvx'] = obj.tvx
data['tvy'] = obj.tvy
elif elem_type == 'ellipsoidMirror':
# Fixed values in srw.js:
data['heightAmplification'] = 1
data['heightProfileFile'] = None
data['orientation'] = 'x'
data['firstFocusLength'] = obj.p
data['focalLength'] = obj.q
data['grazingAngle'] = obj.angGraz * 1e3
data['normalVectorX'] = obj.nvx
data['normalVectorY'] = obj.nvy
data['normalVectorZ'] = obj.nvz
data['sagittalSize'] = obj.ds
data['tangentialSize'] = obj.dt
data['tangentialVectorX'] = obj.tvx
data['tangentialVectorY'] = obj.tvy
elif elem_type == 'fiber':
data['method'] = 'server'
data['externalMaterial'] = 'User-defined'
data['coreMaterial'] = 'User-defined'
keys = ['focalPlane', 'externalRefractiveIndex', 'coreRefractiveIndex', 'externalAttenuationLength',
'coreAttenuationLength', 'externalDiameter', 'coreDiameter', 'horizontalCenterPosition',
'verticalCenterPosition']
for key in keys:
data[key] = obj.input_parms[key]
elif elem_type == 'grating':
# Fixed values in srw.js:
data['grazingAngle'] = 12.9555790185373
data['diffractionOrder'] = obj.m
data['grooveDensity0'] = obj.grDen
data['grooveDensity1'] = obj.grDen1
data['grooveDensity2'] = obj.grDen2
data['grooveDensity3'] = obj.grDen3
data['grooveDensity4'] = obj.grDen4
data['normalVectorX'] = obj.mirSub.nvx
data['normalVectorY'] = obj.mirSub.nvy
data['normalVectorZ'] = obj.mirSub.nvz
data['sagittalSize'] = obj.mirSub.ds
data['tangentialSize'] = obj.mirSub.dt
data['tangentialVectorX'] = obj.mirSub.tvx
data['tangentialVectorY'] = obj.mirSub.tvy
elif elem_type == 'lens':
data['horizontalFocalLength'] = obj.Fx
data['horizontalOffset'] = obj.x
data['verticalFocalLength'] = obj.Fy
data['verticalOffset'] = obj.y
elif elem_type in ['mirror', 'mirror2d']:
keys = ['grazingAngle', 'heightAmplification', 'heightProfileFile', 'horizontalTransverseSize',
'orientation', 'verticalTransverseSize']
for key in keys:
if type(obj.input_parms) == tuple:
data[key] = obj.input_parms[0][key]
else:
data[key] = obj.input_parms[key]
# Should be multiplied by 1000.0:
for key in ['grazingAngle', 'horizontalTransverseSize', 'verticalTransverseSize']:
data[key] *= 1000.0
data['type'] = 'mirror'
data['heightProfileFile'] = 'mirror_1d.dat' if elem_type == 'mirror' else 'mirror_2d.dat'
elif elem_type == 'sample':
data['imageFile'] = 'sample.tif'
data['material'] = 'User-defined'
data['method'] = 'server'
keys = ['resolution', 'thickness', 'refractiveIndex', 'attenuationLength']
for key in keys:
if type(obj.input_parms) == tuple:
data[key] = obj.input_parms[0][key]
else:
data[key] = obj.input_parms[key]
data['resolution'] *= 1e9
data['thickness'] *= 1e6
elif elem_type == 'sphericalMirror':
# Fixed values in srw.js:
data['grazingAngle'] = 13.9626000172
data['heightAmplification'] = 1
data['heightProfileFile'] = None
data['orientation'] = 'x'
data['normalVectorX'] = obj.nvx
data['normalVectorY'] = obj.nvy
data['normalVectorZ'] = obj.nvz
data['radius'] = obj.rad
data['sagittalSize'] = obj.ds
data['tangentialSize'] = obj.dt
data['tangentialVectorX'] = obj.tvx
data['tangentialVectorY'] = obj.tvy
elif elem_type == 'zonePlate':
data['numberOfZones'] = obj.nZones
data['outerRadius'] = obj.rn * 1e3
data['thickness'] = obj.thick * 1e6
data['method'] = 'server'
data['mainMaterial'] = 'User-defined'
data['mainRefractiveIndex'] = obj.delta1
data['mainAttenuationLength'] = obj.atLen1
data['complementaryMaterial'] = 'User-defined'
data['complementaryRefractiveIndex'] = obj.delta2
data['complementaryAttenuationLength'] = obj.atLen2
data['horizontalOffset'] = obj.x
data['verticalOffset'] = obj.y
elif elem_type == 'watch':
pass
else:
raise ValueError('Element type <{}> does not exist.'.format(elem_type))
return data
def _get_beamline(obj_arOpt, init_distance=20.0):
"""The function creates a beamline from the provided object and/or AST tree.
:param obj_arOpt: SRW object containing properties of the beamline elements.
:param init_distance: distance from the source to the first element (20.0 m by default).
:return elements_list: list of all found beamline elements.
"""
num_elements = len(obj_arOpt)
elements_list = []
# The dictionary to count the elements of different types:
names = pkcollections.Dict({
'S': 0,
'O': 0,
'HDM': 0,
'CRL': 0,
'KL': 0,
'KLA': 0,
'AUX': 0,
'M': 0, # mirror
'G': 0, # grating
'ZP': 0, # zone plate
'Crystal': 0,
'Fiber': 0,
'Watch': '',
'Sample': '',
})
positions = [] # a list of dictionaries with sequence of distances between elements
d_src = init_distance
counter = 0
for i in range(num_elements):
name = obj_arOpt[i].__class__.__name__
try:
next_name = obj_arOpt[i + 1].__class__.__name__
except Exception:
next_name = None
if name == 'SRWLOptD':
d = obj_arOpt[i].L
else:
d = 0.0
d_src += d
if (len(positions) == 0) or \
(name != 'SRWLOptD') or \
(name == 'SRWLOptD' and next_name == 'SRWLOptD') or \
(name == 'SRWLOptD' and (i + 1) == num_elements):
counter += 1
elem_type = ''
if name == 'SRWLOptA':
if obj_arOpt[i].ap_or_ob == 'a':
elem_type = 'aperture'
key = 'S'
else:
elem_type = 'obstacle'
key = 'O'
elif name == 'SRWLOptCryst':
key = 'Crystal'
elem_type = 'crystal'
elif name == 'SRWLOptD':
key = 'AUX'
elem_type = 'watch'
elif name == 'SRWLOptG':
key = 'G'
elem_type = 'grating'
elif name == 'SRWLOptL':
key = 'KL'
elem_type = 'lens'
elif name == 'SRWLOptMirEl':
key = 'M'
elem_type = 'ellipsoidMirror'
elif name == 'SRWLOptMirSph':
key = 'M'
elem_type = 'sphericalMirror'
elif name == 'SRWLOptT':
if type(obj_arOpt[i].input_parms) == tuple:
elem_type = obj_arOpt[i].input_parms[0]['type']
else:
elem_type = obj_arOpt[i].input_parms['type']
if elem_type in ['mirror', 'mirror2d']:
key = 'HDM'
elif elem_type == 'crl': # CRL
key = 'CRL'
elif elem_type == 'cyl_fiber':
elem_type = 'fiber'
key = 'Fiber'
elif elem_type == 'sample':
key = 'Sample'
elif name == 'SRWLOptZP':
key = 'ZP'
elem_type = 'zonePlate'
# Last element is Sample:
if name == 'SRWLOptD' and (i + 1) == num_elements:
key = 'Watch'
elem_type = 'watch'
try:
names[key] += 1
except Exception:
pass
title = key + str(names[key])
if not elem_type:
raise ValueError('Unhandled element named: {}.'.format(name))
positions.append(pkcollections.Dict({
'id': counter,
'object': obj_arOpt[i],
'elem_class': name,
'elem_type': elem_type,
'title': title,
'dist': d,
'dist_source': float(str(d_src)),
}))
for i in range(len(positions)):
data = _beamline_element(
positions[i]['object'],
positions[i]['id'],
positions[i]['title'],
positions[i]['elem_type'],
positions[i]['dist_source']
)
elements_list.append(data)
return elements_list
def _get_default_drift():
"""The function parses srw.js file to find the default values for drift propagation parameters, which can be
sometimes missed in the exported .py files (when distance = 0), but should be presented in .json files.
Returns:
str: default drift propagation paramters
"""
def _search_for_default_drift():
return re.search(
r'function defaultDriftPropagationParams.*?return\s*(\[[^\]]+\])',
pkio.read_text(sirepo.resource.static('js', 'srw.js')),
re.DOTALL,
).group(1)
return pkjson.load_any(_search_for_default_drift())
def _get_propagation(op):
prop_dict = pkcollections.Dict()
counter = 0
for i in range(len(op.arProp) - 1):
name = op.arOpt[i].__class__.__name__
try:
next_name = op.arOpt[i + 1].__class__.__name__
except Exception:
next_name = None
if (name != 'SRWLOptD') or \
(name == 'SRWLOptD' and next_name == 'SRWLOptD') or \
((i + 1) == len(op.arProp) - 1): # exclude last drift
counter += 1
prop_dict[str(counter)] = [op.arProp[i]]
if next_name == 'SRWLOptD':
prop_dict[str(counter)].append(op.arProp[i + 1])
else:
prop_dict[str(counter)].append(_get_default_drift())
return prop_dict
def _find_line_in_trace(script):
"""Parse the stack trace for the most recent error message
Returns:
int: first line number in trace that was called from the script
"""
trace = None
t = None
f = None
try:
trace = inspect.trace()
for t in reversed(trace):
f = t[0]
if py.path.local(f.f_code.co_filename) == script:
return f.f_lineno
finally:
del trace
del f
del t
return None
def _list2dict(data_list):
"""
The function converts list of lists to a dictionary with keys from 1st elements and values from 3rd elements.
:param data_list: list of SRW parameters (e.g., 'appParam' in Sirepo's *.py files).
:return out_dict: dictionary with all parameters.
"""
out_dict = pkcollections.Dict()
for i in range(len(data_list)):
out_dict[data_list[i][0]] = data_list[i][2]
return out_dict
def _name(user_filename):
"""Parse base name from user_filename
Can't assume the file separators will be understood so have to
parse out the name manually.
Will need to be uniquely named by sirepo.server, but not done
yet.
Args:
user_filename (str): Passed in from browser
Returns:
str: suitable name
"""
# crude but good enough for now.
m = re.search(r'([^:/\\]+)\.\w+$', user_filename)
return m.group(1) if m else user_filename
def _parsed_dict(v, op):
import sirepo.template.srw
std_options = Struct(**_list2dict(srwl_bl.srwl_uti_std_options()))
beamline_elements = _get_beamline(op.arOpt, v.op_r)
# Since the rotation angle cannot be passed from SRW object, we update the angle here:
beamline_elements = _update_crystals(beamline_elements, v)
def _default_value(parm, obj, std, def_val=None):
if not hasattr(obj, parm):
try:
return getattr(std, parm)
except Exception:
if def_val is not None:
return def_val
else:
return ''
try:
return getattr(obj, parm)
except Exception:
if def_val is not None:
return def_val
else:
return ''
# This dictionary will is used for both initial intensity report and for watch point:
initialIntensityReport = pkcollections.Dict({
'characteristic': v.si_type,
'fieldUnits': 1,
'polarization': v.si_pol,
'precision': v.w_prec,
'sampleFactor': 0,
})
predefined_beams = sirepo.template.srw.get_predefined_beams()
# Default electron beam:
if (hasattr(v, 'source_type') and v.source_type == 'u') \
or (hasattr(v, 'ebm_nm') and (not hasattr(v, 'gbm_pen') or v.gbm_pen == 0)):
source_type = 'u'
if v.ebm_nms == 'Day1':
v.ebm_nms = 'Day 1'
full_beam_name = '{}{}'.format(v.ebm_nm, v.ebm_nms)
if not full_beam_name:
full_beam_name = 'Electron Beam'
electronBeam = pkcollections.Dict()
for b in predefined_beams:
if b['name'] == full_beam_name:
electronBeam = b
electronBeam['beamSelector'] = full_beam_name
break
if not electronBeam:
electronBeam = pkcollections.Dict({
'beamSelector': full_beam_name,
'current': v.ebm_i,
'energy': _default_value('ebm_e', v, std_options, 3.0),
'energyDeviation': _default_value('ebm_de', v, std_options, 0.0),
'horizontalAlpha': _default_value('ebm_alphax', v, std_options, 0.0),
'horizontalBeta': _default_value('ebm_betax', v, std_options, 2.02),
'horizontalDispersion': _default_value('ebm_etax', v, std_options, 0.0),
'horizontalDispersionDerivative': _default_value('ebm_etaxp', v, std_options, 0.0),
'horizontalEmittance': _default_value('ebm_emx', v, std_options, 9e-10) * 1e9,
'horizontalPosition': v.ebm_x,
'isReadOnly': False,
'name': full_beam_name,
'rmsSpread': _default_value('ebm_ens', v, std_options, 0.00089),
'verticalAlpha': _default_value('ebm_alphay', v, std_options, 0.0),
'verticalBeta': _default_value('ebm_betay', v, std_options, 1.06),
'verticalDispersion': _default_value('ebm_etay', v, std_options, 0.0),
'verticalDispersionDerivative': _default_value('ebm_etayp', v, std_options, 0.0),
'verticalEmittance': _default_value('ebm_emy', v, std_options, 8e-12) * 1e9,
'verticalPosition': v.ebm_y,
})
undulator = pkcollections.Dict({
'horizontalAmplitude': _default_value('und_bx', v, std_options, 0.0),
'horizontalInitialPhase': _default_value('und_phx', v, std_options, 0.0),
'horizontalSymmetry': str(int(_default_value('und_sx', v, std_options, 1.0))),
'length': _default_value('und_len', v, std_options, 1.5),
'longitudinalPosition': _default_value('und_zc', v, std_options, 1.305),
'period': _default_value('und_per', v, std_options, 0.021) * 1e3,
'verticalAmplitude': _default_value('und_by', v, std_options, 0.88770981) if hasattr(v, 'und_by') else _default_value('und_b', v, std_options, 0.88770981),
'verticalInitialPhase': _default_value('und_phy', v, std_options, 0.0),
'verticalSymmetry': str(int(_default_value('und_sy', v, std_options, -1))),
})
gaussianBeam = pkcollections.Dict({
'energyPerPulse': None,
'polarization': 1,
'rmsPulseDuration': None,
'rmsSizeX': None,
'rmsSizeY': None,
'waistAngleX': None,
'waistAngleY': None,
'waistX': None,
'waistY': None,
'waistZ': None,
})
else:
source_type = 'g'
electronBeam = pkcollections.Dict()
default_ebeam_name = 'NSLS-II Low Beta Final'
for beam in predefined_beams:
if beam['name'] == default_ebeam_name:
electronBeam = beam
electronBeam['beamSelector'] = default_ebeam_name
break
if not electronBeam:
raise ValueError('Electron beam is not set during import')
undulator = pkcollections.Dict({
"horizontalAmplitude": "0",
"horizontalInitialPhase": 0,
"horizontalSymmetry": 1,
"length": 3,
"longitudinalPosition": 0,
"period": "20",
"undulatorParameter": 1.65776086,
"verticalAmplitude": "0.88770981",
"verticalInitialPhase": 0,
"verticalSymmetry": -1,
})
gaussianBeam = pkcollections.Dict({
'energyPerPulse': _default_value('gbm_pen', v, std_options),
'polarization': _default_value('gbm_pol', v, std_options),
'rmsPulseDuration': _default_value('gbm_st', v, std_options) * 1e12,
'rmsSizeX': _default_value('gbm_sx', v, std_options) * 1e6,
'rmsSizeY': _default_value('gbm_sy', v, std_options) * 1e6,
'waistAngleX': _default_value('gbm_xp', v, std_options),
'waistAngleY': _default_value('gbm_yp', v, std_options),
'waistX': _default_value('gbm_x', v, std_options),
'waistY': _default_value('gbm_y', v, std_options),
'waistZ': _default_value('gbm_z', v, std_options),
})
python_dict = pkcollections.Dict({
'models': pkcollections.Dict({
'beamline': beamline_elements,
'electronBeam': electronBeam,
'electronBeams': [],
'beamline3DReport': pkcollections.Dict({}),
'fluxReport': pkcollections.Dict({
'azimuthalPrecision': v.sm_pra,
'distanceFromSource': v.op_r,
'finalEnergy': v.sm_ef,
'fluxType': v.sm_type,
'horizontalApertureSize': v.sm_rx * 1e3,
'horizontalPosition': v.sm_x,
'initialEnergy': v.sm_ei,
'longitudinalPrecision': v.sm_prl,
'photonEnergyPointCount': v.sm_ne,
'polarization': v.sm_pol,
'verticalApertureSize': v.sm_ry * 1e3,
'verticalPosition': v.sm_y,
}),
'initialIntensityReport': initialIntensityReport,
'intensityReport': pkcollections.Dict({
'distanceFromSource': v.op_r,
'fieldUnits': 1,
'finalEnergy': v.ss_ef,
'horizontalPosition': v.ss_x,
'initialEnergy': v.ss_ei,
'photonEnergyPointCount': v.ss_ne,
'polarization': v.ss_pol,
'precision': v.ss_prec,
'verticalPosition': v.ss_y,
}),
'multiElectronAnimation': pkcollections.Dict({
'horizontalPosition': 0,
'horizontalRange': v.w_rx * 1e3,
'stokesParameter': '0',
'verticalPosition': 0,
'verticalRange': v.w_ry * 1e3,
}),
'multipole': pkcollections.Dict({
'distribution': 'n',
'field': 0,
'length': 0,
'order': 1,
}),
'postPropagation': op.arProp[-1],
'powerDensityReport': pkcollections.Dict({
'distanceFromSource': v.op_r,
'horizontalPointCount': v.pw_nx,
'horizontalPosition': v.pw_x,
'horizontalRange': v.pw_rx * 1e3,
'method': v.pw_meth,
'precision': v.pw_pr,
'verticalPointCount': v.pw_ny,
'verticalPosition': v.pw_y,
'verticalRange': v.pw_ry * 1e3,
}),
'propagation': _get_propagation(op),
'simulation': pkcollections.Dict({
'horizontalPointCount': v.w_nx,
'horizontalPosition': v.w_x,
'horizontalRange': v.w_rx * 1e3,
'isExample': 0,
'name': '',
'photonEnergy': v.w_e,
'sampleFactor': v.w_smpf,
'samplingMethod': 1,
'simulationId': '',
'sourceType': source_type,
'verticalPointCount': v.w_ny,
'verticalPosition': v.w_y,
'verticalRange': v.w_ry * 1e3,
}),
'sourceIntensityReport': pkcollections.Dict({
'characteristic': v.si_type, # 0,
'distanceFromSource': v.op_r,
'fieldUnits': 1,
'polarization': v.si_pol,
}),
'undulator': undulator,
'gaussianBeam': gaussianBeam,
}),
'simulationType': 'srw',
'version': '',
})
# Format the key name to be consistent with Sirepo:
for i in range(len(beamline_elements)):
if beamline_elements[i]['type'] == 'watch':
idx = beamline_elements[i]['id']
python_dict['models']['watchpointReport{}'.format(idx)] = initialIntensityReport
return python_dict
def _patch_mirror_profile(code, mirror_file='mirror_1d.dat'):
"""Patch for the mirror profile for the exported .py file from Sirepo"""
import sirepo.template.srw
# old format mirror names
var_names = ['Cryst', 'ElMirror', 'Mirror', 'SphMirror', 'TorMirror']
code_list = code.split('\n')
for var_name in var_names:
if var_name in ['Mirror']:
final_mirror_file = '"{}"'.format(_SIM_DATA.lib_file_abspath(mirror_file))
else:
final_mirror_file = None
var_name = 'ifn' + var_name
for i in range(len(code_list)):
if re.search(r'^(\s*)' + var_name + r'(\d*)(\s*)=(\s*)(.*\.dat\w*)(\s*)', code_list[i]):
full_var_name = code_list[i].strip().split('=')[0].strip()
code_list[i] = code_list[i].replace(
full_var_name,
'{} = {} # '.format(full_var_name, final_mirror_file)
)
code = '\n'.join(code_list)
return code
def _update_crystals(data, v):
"""Update rotation angle from the parameters value.
Args:
data: list of beamline elements from get_beamline() function.
v: object containing all variables.
Returns:
data: updated list.
"""
for i in range(len(data)):
if data[i]['type'] == 'crystal':
try: # get crystal #
crystal_id = int(data[i]['title'].replace('Crystal', ''))
except Exception:
crystal_id = 1
try: # update rotation angle
data[i]['rotationAngle'] = getattr(v, 'op_DCM_ac{}'.format(crystal_id))
except Exception:
pass
if not data[i]['energy']:
try: # update energy if an old srwlib.py is used
data[i]['energy'] = v.op_DCM_e
except Exception:
data[i]['energy'] = v.w_e
return data
|
radiasoft/sirepo
|
sirepo/template/srw_importer.py
|
Python
|
apache-2.0
| 29,980
|
[
"CRYSTAL"
] |
39041d627726804db354b1336037a62ec9741ecf3de0a7027311dbbb79605c3b
|
from base import BaseBot
from scraping import fleet, general, hangar
from copy import copy
class TransporterBot(BaseBot):
"""Logging functions for the bot"""
def __init__(self, browser, config, planets):
self.fleet_client = fleet.Fleet(browser, config, planets)
self.hangar_client = hangar.Hangar(browser, config)
self.general_client = general.General(browser, config)
super(TransporterBot, self).__init__(browser, config, planets)
def transport_resources_to_planet(self, planet=None):
"""transport resources to the planet,
if there is not planet especified the function will chose the default origin planet
"""
planets = self.planets
if self.planet is not None:
destination_planet = self.planet
elif planet is not None:
destination_planet = planet
else:
self.logger.info("there is no specified target planet, using default origin planet instead")
destination_planet = self.default_origin_planet
self.logger.info("Transporting resources to planet: %s" % destination_planet.name)
for planet in [planet for planet in planets if planet != destination_planet]:
planet.ships = self.hangar_client.get_ships(planet)
resources = self.general_client.get_resources(planet)
restricted_resources = self.get_restrict_resources_under_user_preferences(resources)
self.fleet_client.transport_resources(planet, destination_planet, restricted_resources)
def get_restrict_resources_under_user_preferences(self, resources):
restricted_resources = copy(resources)
restricted_resources.energy = 0
if not self.config.transport_metal:
restricted_resources.metal = 0
if not self.config.transport_crystal:
restricted_resources.crystal = 0
if not self.config.transport_deuterium:
restricted_resources.deuterium = 0
return restricted_resources
|
yosh778/OG-Bot
|
ogbot/core/transporter.py
|
Python
|
mit
| 2,029
|
[
"CRYSTAL"
] |
18674dd71283860adbef2b9f44bb735eaa4c2b035dfbb4b1944fbcbd77732e03
|
'''
Python module dependencies:
biopython==1.63
fastcluster==1.1.13
numpy==1.7.1
python-Levenshtein==0.11.2
scipy==0.12.0
Under Ubuntu, scipy, numpy and biopython can be installed as:
sudo apt-get install python-biopython python-numpy python-scipy
fastcluster and python-Levenshtein can be installed using pip:
pip install fastcluster python-Levenshtein
'''
from __future__ import print_function
import time
import math
import json
import numpy as np
from multiprocessing import Pool, cpu_count
import fastcluster as fc
from Bio import pairwise2
from Levenshtein import distance
from scipy.cluster.hierarchy import fcluster
import subprocess
import os
import resource
default_dtype = 'f4'
distance_cutoff = 0.32
class Seq(object):
"""Contains genetic characteristics for a single sequence.
Input:
data = a MongoDB result (dict-like) containing the following fields:
[seq_id, v_gene, j_gene, <junc_query>, var_muts_nt]
where <junc_query> is the sequence of the nucleotide or AA junction.
junc_query = either 'junc_aa' or 'junc_nt' for nucleotide or AA junctions, respectively.
"""
def __init__(self, data, junc_query):
self.id = data['seq_id']
self.v_fam = data['v_gene']['fam']
self.v_gene = data['v_gene']['gene']
self.v_all = data['v_gene']['all']
self.j_gene = data['j_gene']['gene']
self.j_all = data['j_gene']['all']
self.junc = data[junc_query]
self.junc_len = len(self.junc)
self.muts = []
if 'var_muts_nt' in data.keys():
self.muts = data['var_muts_nt']
def v_gene_string(self):
return 'v{0}-{1}'.format(self.v_fam, self.v_gene)
def v_fam_string(self):
return 'v{0}'.format(self.v_fam)
def get_LD(i, j):
'''Calculate sequence distance between a pair of Seq objects'''
# pairwise2 is used to force 'gapless' distance when sequence pair is of the same length
if i.junc_len == j.junc_len:
identity = pairwise2.align.globalms(i.junc, j.junc, 1, 0, -50, -50, score_only=True, one_alignment_only=True)
return i.junc_len - identity
# Levenshtein distance is used for sequence pairs of different lengths
else:
return distance(i.junc, j.junc)
def vCompare(i, j):
'''Calculate penalty for mismatches in Variable segment.'''
if i.v_gene != j.v_gene:
return 8
if i.v_all != j.v_all:
return 1
return 0
def jCompare(i, j):
'''Calculate penalty for mismatches in Joining segment.'''
if i.j_gene != j.j_gene:
return 8
if i.j_all != j.j_all:
return 1
return 0
def sharedMuts(i, j):
'''Calculate bonus for shared mutations.'''
if i.id == j.id:
return 0.0
bonus = 0.0
for mut in i.muts:
if mut == '':
continue
if mut in j.muts:
bonus += 0.35
return bonus
def get_score(i, j=None):
if j is None:
i, j = i
if i.id == j.id:
return 0.0
LD = get_LD(i, j)
vPenalty = vCompare(i, j)
jPenalty = jCompare(i, j)
lenPenalty = math.fabs(i.junc_len - j.junc_len) * 2
editLength = min(i.junc_len, j.junc_len)
mutBonus = sharedMuts(i, j)
if mutBonus > (LD + vPenalty + jPenalty):
mutBonus = (LD + vPenalty + jPenalty - 0.001) # distance values can't be negative
return (LD + vPenalty + jPenalty + lenPenalty - mutBonus) / editLength
def make_iter(seqs, mode=1):
for i, seq_i in enumerate(seqs):
if mode == 1:
for seq_j in seqs[i + 1:]:
yield (seq_i, seq_j)
else:
yield (seq_i, seqs[i + 1:])
def get_scores_one_row(args):
(seq_i, row_j) = args
return np.array([get_score(seq_i, seq_j) for seq_j in row_j], dtype=default_dtype)
def build_condensed_matrix(seqs, mode=2):
result = np.array([], dtype=default_dtype)
p = Pool(processes=cpu_count())
if mode == 1:
n = len(seqs)
#chunksize = 500000
chunksize = int(n * (n - 1) / 2 / cpu_count() / 2)
result_one = p.imap(get_score, make_iter(seqs, mode=1), chunksize=chunksize)
result = np.array(list(result_one), dtype=default_dtype)
else:
result_one_row = p.imap(get_scores_one_row, make_iter(seqs, mode=2), chunksize=100)
result = np.concatenate(list(result_one_row))
#p.close()
#p.join()
return result
def build_cluster_dict(flatCluster):
clusters = {}
for i, c in enumerate(flatCluster):
if c in clusters:
clusters[c].append(i)
else:
clusters[c] = [i]
return clusters
def make_clusters(con_distMatrix):
linkageMatrix = fc.linkage(con_distMatrix, method='average', preserve_input=False)
del con_distMatrix
flatCluster = fcluster(linkageMatrix, distance_cutoff, criterion='distance')
del linkageMatrix
return flatCluster
def write_output(outfile, clusters, seqs, vh='v0'):
with open(outfile, 'w') as out_f:
for c in clusters.keys():
if len(clusters[c]) < 2:
continue
rString = "#lineage_{0}_{1}\n".format(vh, str(c))
for seq_idx in clusters[c]:
seq = seqs[seq_idx]
rString += '>{0}\n{1}\n'.format(seq.id, seq.junc)
rString += '\n'
out_f.write(rString)
def get_memery_usage():
rss = subprocess.check_output('ps -p {} u'.format(os.getpid()), shell=True).decode('utf-8').split('\n')[1].split()[5]
max_rss = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print('current_rss: {}\tmax_rss: {}'.format(rss, max_rss))
def analyze(infile, outfile=None, n=None, output_format='cluster_only', memory_usage=False):
if memory_usage:
get_memery_usage()
t00 = time.time()
print("Loading input sequences...", end='')
with open(infile) as in_f:
seqs = json.load(in_f)
if n:
seqs = seqs[:n]
seqs = [Seq(s, 'junc_aa') for s in seqs]
print("done. [{}, {:.2f}s]".format(len(seqs), time.time() - t00))
if memory_usage:
get_memery_usage()
t0 = time.time()
print("Calculating condensed distance matrix...", end='')
con_distMatrix = build_condensed_matrix(seqs, mode=2) # ####
print("done. [{}, {:.2f}s]".format(con_distMatrix.shape, time.time() - t0))
print("\tmin: {}, max: {}".format(con_distMatrix.min(), con_distMatrix.max()))
if memory_usage:
get_memery_usage()
t0 = time.time()
print("Calculating clusters...", end='')
clusters = make_clusters(con_distMatrix)
print("done. [{}, {:.2f}s]".format(clusters.max(), time.time() - t0))
if memory_usage:
get_memery_usage()
t0 = time.time()
print ("Outputting clusters...", end='')
if output_format == 'seqs':
clusters = build_cluster_dict(clusters)
write_output(outfile, clusters, seqs)
else:
np.savetxt(outfile, clusters, fmt='%d')
print("done. {:.2f}s".format(time.time() - t0))
print('=' * 20)
print("Finished! Total time= {:.2f}s".format(time.time() - t00))
if memory_usage:
get_memery_usage()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Clonify script.')
parser.add_argument('infile', action="store", help='input sequence file')
parser.add_argument('outfile', action="store", help='output file')
parser.add_argument('-n', action="store", dest="n", type=int,
help='maximum number of sequences to process from input file')
parser.add_argument('-f', action='store', dest='output_format', default='cluster_only',
help='output format: cluster_only | seqs.')
parser.add_argument('-m', action='store_true', dest='memory_usage',
help='print out memeory useage')
args = parser.parse_args()
analyze(args.infile, args.outfile, n=args.n,
output_format=args.output_format,
memory_usage=args.memory_usage)
|
newgene/clonify
|
clonify_contest.py
|
Python
|
mit
| 8,044
|
[
"Biopython"
] |
5cb10bd9ec1630a55f87296a0f734fc68a4346756990c33be9264ff590ba7ea9
|
#
# @file TestSpeciesType_newSetters.py
# @brief SpeciesType unit tests for new set function API
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestSpeciesType_newSetters.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestSpeciesType_newSetters(unittest.TestCase):
global ST
ST = None
def setUp(self):
self.ST = libsbml.SpeciesType(2,2)
if (self.ST == None):
pass
pass
def tearDown(self):
_dummyList = [ self.ST ]; _dummyList[:] = []; del _dummyList
pass
def test_SpeciesType_setId2(self):
i = self.ST.setId( "1cell")
self.assert_( i == libsbml.LIBSBML_INVALID_ATTRIBUTE_VALUE )
self.assertEqual( False, self.ST.isSetId() )
pass
def test_SpeciesType_setId3(self):
i = self.ST.setId( "cell")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( True, self.ST.isSetId() )
self.assert_(( "cell" == self.ST.getId() ))
pass
def test_SpeciesType_setId4(self):
i = self.ST.setId( "cell")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( True, self.ST.isSetId() )
self.assert_(( "cell" == self.ST.getId() ))
i = self.ST.setId("")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.ST.isSetId() )
pass
def test_SpeciesType_setName1(self):
i = self.ST.setName( "cell")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( True, self.ST.isSetName() )
i = self.ST.unsetName()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.ST.isSetName() )
pass
def test_SpeciesType_setName2(self):
i = self.ST.setName( "1cell")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( True, self.ST.isSetName() )
i = self.ST.unsetName()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.ST.isSetName() )
pass
def test_SpeciesType_setName3(self):
i = self.ST.setName( "cell")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( True, self.ST.isSetName() )
i = self.ST.setName("")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.ST.isSetName() )
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestSpeciesType_newSetters))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
TheCoSMoCompany/biopredyn
|
Prototype/src/libsbml-5.10.0/src/bindings/python/test/sbml/TestSpeciesType_newSetters.py
|
Python
|
bsd-3-clause
| 3,818
|
[
"VisIt"
] |
3239712ef82b16e039f4ca8b55fe1ddb0764fcb0c87469f7d3900a4434350e5c
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 7 14:38:46 2011
Plot pca test error vs train error
@author: -
"""
# Computes the gaussian gradients on a boxm_alpha_scene
import os;
import optparse;
import time;
import sys;
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
import glob
main_dir="/Users/isa/Experiments/BOF/helicopter_providence";
nobjects = 120;
thresh = [1, 5, 10, 20, 50, 75, 90];
trials = [0,1,2,3,4];
num_means=20;
k_label = ['Harris $(\kappa=0.01)$' , 'Harris $(\kappa=0.0075)$', 'Harris $(\kappa=0.005)$'];
colors = ['magenta','blue','green'];
fig=plt.figure()
ax = fig.add_subplot(111)
plt.autoscale(tight=False);
harris_marker=['--o', '--*','--^' ];
for k in range(1,4):
param_dir= main_dir + "/corners/k_" + str(k);
#param_dir= main_dir + "/beaudet_corners";
accuracy = np.zeros((len(trials),len(thresh)));
row=0;
for trial in trials:
col=0;
for t in thresh:
bof_dir=param_dir +"/bof_cross_validation/trial_"+str(trial) + "/thresh_" + str(t);
classification_dir=bof_dir +"/classification_" + str(num_means)
accuracy_file = classification_dir + "/accuracy.txt"
f = open(accuracy_file, 'r');
lines=[];
lines = f.readlines();
#print lines
#print str(int(lines[0]))
accuracy[row,col]=(float(lines[0])/ float(lines[1]));
col=col+1;
row=row+1;
yerr_up = accuracy.max(0)-accuracy.mean(0);
yerr_down = accuracy.mean(0)-accuracy.min(0);
ax.errorbar(thresh, accuracy.mean(0),yerr=[yerr_down, yerr_up], fmt=harris_marker[k-1], label=k_label[k-1], color=colors[k-1], capsize=12, markersize=8)
plt.hold(True);
#Plot det(Hessian)
param_dir= main_dir + "/beaudet_corners";
accuracy = np.zeros((len(trials),len(thresh)));
row=0;
for trial in trials:
col=0;
for t in thresh:
bof_dir=param_dir +"/bof_cross_validation/trial_"+str(trial) + "/thresh_" + str(t);
classification_dir=bof_dir +"/classification_" + str(num_means)
accuracy_file = classification_dir + "/accuracy.txt"
f = open(accuracy_file, 'r');
lines=[];
lines = f.readlines();
#print lines
#print str(int(lines[0]))
accuracy[row,col]=(float(lines[0])/ float(lines[1]));
col=col+1;
row=row+1;
yerr_up = accuracy.max(0)-accuracy.mean(0);
yerr_down = accuracy.mean(0)-accuracy.min(0);
ax.errorbar(thresh, accuracy.mean(0),yerr=[yerr_down, yerr_up], fmt='--v', label='DoH', color='r', capsize=12, markersize=8)
ax.set_xlabel('Percentage of salient features ',fontsize= 18);
ax.set_ylabel('Accuracy',fontsize= 18);
x=np.arange(0,100,10);#[1,5,10,20,50,75,90];
ax.set_xticklabels(x, fontsize= 14);
ax.set_xticks(x)
ax.set_xlim((0, 100));
ax.set_ylim((0,1.02));
ylabels = np.arange(0,1.1,0.2);
ax.set_yticklabels(ylabels, fontsize= 14)
plt.legend(loc='lower center', frameon=False);
plt.show();
# plt.plot(x, accuracy, label=k_label[k-1]);
# plt.xlabel('Percentage of samples used for recognition',fontsize= 'large');
#
# plt.ylabel('Accuracy',fontsize= 'large');
# plt.hold(True);
#
#plt.legend(loc='lower right');
#plt.show();
#plt.hold(False);
|
mirestrepo/voxels-at-lems
|
harris_experiments/plot_accuracy_all_trials.py
|
Python
|
bsd-2-clause
| 3,238
|
[
"Gaussian"
] |
bacdea6038a8ad9505054bbd568536d2b511952b578ad87621b9840c4f091e62
|
#!/usr/bin/env ipython
# -*- coding: utf-8 -*-
from datetime import datetime, time, timedelta
import numpy as np
import console_colors as ccl
from scipy.io.netcdf import netcdf_file
from ShiftTimes import ShiftCorrection, ShiftDts
import os, argparse
import h5py
from h5py import File as h5
from numpy import (
mean, median, nanmean, nanmedian, std, nan,
isnan, min, max, zeros, ones, size, loadtxt
)
from os.path import isfile, isdir
if 'DISPLAY' in os.environ: # to avoid crash when running remotely
from pylab import figure, savefig, close, find, pause
import matplotlib.patches as patches
import matplotlib.transforms as transforms
#from read_NewTable import tshck, tini_icme, tend_icme, tini_mc, tend_mc, n_icmes, MCsig
#from z_expansion_gulisano import z as z_exp
_ERROR_ = ccl.Rn+' ### ERROR ###: '+ccl.W
def flags2nan(VAR, FLAG):
cond = VAR < FLAG
VAR = np.array(VAR)
VAR[~cond] = np.nan
return VAR
def date_to_utc(fecha):
utc = datetime(1970, 1, 1, 0, 0, 0, 0)
sec_utc = (fecha - utc).total_seconds()
return sec_utc
def selecc_data(data, tshk):
time = data[0] #[s] utc sec
rate = data[1]
day = 86400. # [seg]
utc = datetime(1970, 1, 1, 0, 0, 0, 0)
tshk_utc = (tshk - utc).total_seconds()
ti = tshk_utc - 10.*day # [seg] utc
tf = tshk_utc + 30.*day
cond = (time > ti) & (time < tf)
time = (time[cond] - tshk_utc) / day # [days] since shock
rate = rate[cond]
return (time, rate)
def selecc_window(data, tini, tend):
time = data[0] #[s] utc sec
y = data[1]
day = 86400. # [seg]
utc = datetime(1970, 1, 1, 0, 0, 0, 0)
tini_utc = (tini - utc).total_seconds() # [s] utc sec
tend_utc = (tend - utc).total_seconds() # [s] utc sec
ti = tini_utc # [seg] utc
tf = tend_utc
cond = (time > ti) & (time < tf)
time = (time[cond] - tini_utc) / day # [days] since 'ti'
y = y[cond]
return (time, y)
def enoughdata(var, fgap):
n = len(var)
ngood = len(find(~isnan(var)))
fdata = 1.*ngood/n # fraccion de data sin gaps
if fdata>=(1.-fgap):
return True
else:
return False
def averages_and_std(n_icmes, t_shck, ti_icme, dTday, nbin, t_utc, VAR, fgap):
day = 86400.
nok=0; nbad=0
adap = []
for i in range(n_icmes):
dT = (ti_icme[i] - t_shck[i]).total_seconds()/day # [day]
if dT>dTday:
dt = dT/nbin
t, var = selecc_window(
[t_utc, VAR],
t_shck[i], ti_icme[i]
)
if enoughdata(var, fgap): # pido q haya mas del 80% NO sean gaps
adap += [adaptar(nbin, dt, t, var)]
nok +=1
else:
continue
else:
print " i:%d ---> Este evento es muy chico!, dT/day:%g" % (i, dT)
nbad +=1
VAR_adap = zeros(nbin*nok).reshape(nok, nbin)
for i in range(nok):
VAR_adap[i,:] = adap[i][1]
VAR_avrg = zeros(nbin)
VAR_std = zeros(nbin)
ndata = zeros(nbin)
for i in range(nbin):
cond = ~isnan(VAR_adap.T[i,:])
ndata[i] = len(find(cond)) # nro de datos != flag
VAR_avrg[i] = mean(VAR_adap.T[i,cond]) # promedio entre los valores q no tienen flag
VAR_std[i] = std(VAR_adap.T[i,cond]) # std del mismo conjunto de datos
tnorm = adap[0][0]
return [nok, nbad, tnorm, VAR_avrg, VAR_std, ndata]
def adaptar(n, dt, t, r):
#n = int(5./dt) # nro de puntos en todo el intervalo de ploteo
tt = zeros(n)
rr = zeros(n)
for i in range(n):
tmin = i*dt
tmax = (i+1.)*dt
cond = (t>tmin) & (t<tmax)
tt[i] = mean(t[cond])
rr[i] = mean(r[cond])
return [tt/(n*dt), rr]
def adaptar(nwndw, dT, n, dt, t, r):
#n = int(5./dt) # nro de puntos en todo el intervalo de ploteo
tt = zeros(n)
rr = zeros(n)
_nbin_ = n/(1+nwndw[0]+nwndw[1]) # nro de bins en la sheath
for i in range(n):
tmin = (i-nwndw[0]*_nbin_)*dt
tmax = tmin + dt
cond = (t>tmin) & (t<tmax)
tt[i] = mean(t[cond])#; print "tt:", t[i]; pause(1)
rr[i] = mean(r[cond])
return [tt/dT, rr] # tiempo normalizado x la duracion de la sheath
#@profile
def adaptar_ii(nwndw, dT, n, dt, t, r, fgap):
tt = zeros(n)
rr = zeros(n)
_nbin_ = n/(1+nwndw[0]+nwndw[1]) # nro de bins en la sheath/mc
cc = (t>0.) & (t<dT) # intervalo de la sheath/mc
#print " r[cc]: ", r[cc]
if len(r[cc])==0: # no hay data en esta ventana
rr = nan*ones(n)
enough = False
else:
enough = enoughdata(r[cc], fgap) # [bool] True si hay mas del 80% de data buena.
if not(enough):
rr = nan*ones(n) # si no hay suficiente data, este evento no aporta
for i in range(n):
tmin = (i-nwndw[0]*_nbin_)*dt
tmax = tmin + dt
cond = (t>=tmin) & (t<=tmax)
#tt[i] = mean(t[cond])#; print "tt:", t[i]; pause(1) # bug
tt[i] = tmin + .5*dt # bug corregido
if enough:
#cc = ~isnan(r[cond]) # no olvidemos filtrar los gaps
#rr[i] = mean(r[cond][cc])
rr[i] = nanmean(r[cond])
return enough, [tt/dT, rr] # tiempo normalizado x la duracion de la sheath/mc/etc
#@profile
def selecc_window_ii(nwndw, data, tini, tend):
time = data[0] #[s] utc sec
y = data[1]
day = 86400. # [seg]
utc = datetime(1970, 1, 1, 0, 0, 0, 0)
tini_utc = (tini - utc).total_seconds() # [s] utc sec
tend_utc = (tend - utc).total_seconds() # [s] utc sec
dt = tend_utc - tini_utc
ti = tini_utc - nwndw[0]*dt # [seg] utc
tf = tend_utc + nwndw[1]*dt
cond = (time > ti) & (time < tf)
time = (time[cond] - tini_utc) / day # [days] since 'ti'
y = y[cond]
return (time, y)
def averages_and_std_ii(nwndw,
SELECC, #MCsig, MCwant,
n_icmes, tini, tend, dTday, nbin, t_utc, VAR):
day = 86400.
nok=0; nbad=0
adap = []
for i in range(n_icmes):
dT = (tend[i] - tini[i]).total_seconds()/day # [day]
if ((dT>dTday) & SELECC[i]):# (MCsig[i]>=MCwant)):
dt = dT*(1+nwndw[0]+nwndw[1])/nbin
t, var = selecc_window_ii(
nwndw, # nro de veces hacia atras y adelante
[t_utc, VAR],
tini[i], tend[i]
)
adap += [adaptar(nwndw, dT, nbin, dt, t, var)] # rebinea usando 'dt' como el ancho de nuevo bineo
nok +=1
else:
print " i:%d ---> Filtramos este evento!, dT/day:%g" % (i, dT)
nbad +=1
VAR_adap = zeros(nbin*nok).reshape(nok, nbin)
for i in range(nok):
VAR_adap[i,:] = adap[i][1]
VAR_avrg = zeros(nbin)
VAR_medi = zeros(nbin)
VAR_std = zeros(nbin)
ndata = zeros(nbin)
for i in range(nbin):
cond = ~isnan(VAR_adap.T[i,:])
ndata[i] = len(find(cond)) # nro de datos != flag
VAR_avrg[i] = mean(VAR_adap.T[i,cond]) # promedio entre los valores q no tienen flag
VAR_medi[i] = median(VAR_adap.T[i,cond])# mediana entre los valores q no tienen flag
VAR_std[i] = std(VAR_adap.T[i,cond]) # std del mismo conjunto de datos
tnorm = adap[0][0]
return [nok, nbad, tnorm, VAR_avrg, VAR_medi, VAR_std, ndata]
def mvs_for_each_event(VAR_adap, nbin, nwndw, Enough, verbose=False):
nok = size(VAR_adap, axis=0)
mvs = zeros(nok) # valores medios por cada evento
binsPerTimeUnit = nbin/(1+nwndw[0]+nwndw[1]) # nro de bines por u. de tiempo
start = nwndw[0]*binsPerTimeUnit # en este bin empieza la estructura (MC o sheath)
for i in range(nok):
aux = VAR_adap[i, start:start+binsPerTimeUnit] # (*)
cc = ~isnan(aux) # pick good-data only
#if len(find(cc))>1:
if Enough[i]: # solo imprimo los q tienen *suficiente data*
if verbose:
print ccl.G + "id %d/%d: %r"%(i+1, nok, aux[cc]) + ccl.W
mvs[i] = mean(aux[cc])
else:
mvs[i] = nan
#(*): esta es la serie temporal (de esta variable) para el evento "i"
pause(1)
return mvs
def diff_dates(tend, tini):
n = len(tend)
diffs = np.nan*np.ones(n)
for i in range(n):
ok = type(tend[i]) == type(tini[i]) == datetime # ambos deben ser fechas!
if ok:
diffs[i] = (tend[i] - tini[i]).total_seconds()
else:
diffs[i] = np.nan
return diffs #[sec]
def write_variable(fout, varname, dims, var, datatype, comments):
dummy = fout.createVariable(varname, datatype, dims)
dummy[:] = var
dummy.units = comments
def calc_beta(Temp, Pcc, B):
# Agarramos la definicion de OMNI, de:
# http://omniweb.gsfc.nasa.gov/ftpbrowser/magnetopause/Reference.html
# http://pamela.roma2.infn.it/index.php
# Beta = [(4.16*10**-5 * Tp) + 5.34] * Np/B**2 (B in nT)
#
beta = ((4.16*10**-5 * Temp) + 5.34) * Pcc/B**2
return beta
def thetacond(ThetaThres, ThetaSh):
"""
Set a lower threshold for shock orientation, using Wang's
catalog of shocks.
NOTE: Near 180Â means very close to the nose!
"""
if ThetaThres<=0.:
print ccl.Rn + ' ----> BAD WANG FILTER!!: ThetaThres<=0.'
print ' ----> Saliendo...' + ccl.Rn
raise SystemExit
#return ones(len(ThetaSh), dtype=bool)
else:
return (ThetaSh > ThetaThres)
def wangflag(ThetaThres):
if ThetaThres<0:
return 'NaN'
else:
return str(ThetaThres)
def makefig(medVAR, avrVAR, stdVAR, nVAR, tnorm,
SUBTITLE, YLIMS, YLAB, fname_fig):
fig = figure(1, figsize=(13, 6))
ax = fig.add_subplot(111)
ax.plot(tnorm, avrVAR, 'o-', color='black', markersize=5, label='mean')
ax.plot(tnorm, medVAR, 'o-', color='red', alpha=.5, markersize=5, markeredgecolor='none', label='median')
inf = avrVAR + stdVAR/np.sqrt(nVAR)
sup = avrVAR - stdVAR/np.sqrt(nVAR)
ax.fill_between(tnorm, inf, sup, facecolor='gray', alpha=0.5)
trans = transforms.blended_transform_factory(
ax.transData, ax.transAxes)
rect1 = patches.Rectangle((0., 0.), width=1.0, height=1,
transform=trans, color='blue',
alpha=0.3)
ax.add_patch(rect1)
ax.legend(loc='upper right')
ax.grid()
ax.set_ylim(YLIMS)
TITLE = SUBTITLE
ax.set_title(TITLE)
ax.set_xlabel('time normalized to MC passage time [1]', fontsize=14)
ax.set_ylabel(YLAB, fontsize=20)
savefig(fname_fig, format='png', dpi=180, bbox_inches='tight')
close()
def makefig_ii(mc, sh, YLIMS, YLAB, **kws):
"""
- ftext{bool}:
if False, we put the text in the title. Otherwise, we put
the text inside the figure, using `TEXT_LOC`{dict} as positions
- TEXT_LOC{dict}:
coordinates for the text inside the figure. The `TEXT_LOC['sh']`{2-tuple} are
the positions for the left part, and `TEXT_LOC['mc']`{2-tuple} for the right
part.
"""
#--- kws
ftext = kws.get('ftext', False)
TEXT = kws.get('TEXT', None)
TEXT_LOC = kws.get('TEXT_LOC', None)
fname_fig = kws.get('fname_fig', None)
#-------------------------------------
fmc,fsh = 3.0, 1.0 # escaleos temporales
#--- if figure is not given, create one
if 'fig' in kws:
fig, ax = kws['fig'], kws['ax']
else:
fig = figure(1, figsize=(13, 6))
ax = fig.add_subplot(111)
# catch the name of the observable
if 'varname' in kws:
varname = kws['varname']
else:
varname = fname_fig[:-4].split('_')[-1]
if(varname == 'Temp'):
mc.med /= 1.0e4; sh.med /= 1.0e4
mc.avr /= 1.0e4; sh.avr /= 1.0e4
mc.std_err /= 1.0e4; sh.std_err /= 1.0e4
YLIMS[0] /= 1.0e4; YLIMS[1] /= 1.0e4
if ftext:
TEXT_LOC['mc'][1] /= 1.0e4
TEXT_LOC['sh'][1] /= 1.0e4
# curvas del mc
time = fsh+fmc*mc.tnorm
cc = time>=fsh
ax.plot(time[cc], mc.avr[cc], 'o-', color='black', markersize=5)
ax.plot(time[cc], mc.med[cc], 'o-', color='red', alpha=.8, markersize=5, markeredgecolor='none')
# sombra del mc
inf = mc.avr + mc.std_err/np.sqrt(mc.nValues)
sup = mc.avr - mc.std_err/np.sqrt(mc.nValues)
ax.fill_between(time[cc], inf[cc], sup[cc], facecolor='gray', alpha=0.5)
trans = transforms.blended_transform_factory(
ax.transData, ax.transAxes)
rect1 = patches.Rectangle((fsh, 0.), width=fmc, height=1,
transform=trans, color='blue',
alpha=0.3)
ax.add_patch(rect1)
# curvas del sheath
time = fsh*sh.tnorm
cc = time<=fsh
ax.plot(time[cc], sh.avr[cc], 'o-', color='black', markersize=5)
ax.plot(time[cc], sh.med[cc], 'o-', color='red', alpha=.8, markersize=5, markeredgecolor='none')
# sombra del sheath
inf = sh.avr + sh.std_err/np.sqrt(sh.nValues)
sup = sh.avr - sh.std_err/np.sqrt(sh.nValues)
ax.fill_between(time[cc], inf[cc], sup[cc], facecolor='gray', alpha=0.5)
#trans = transforms.blended_transform_factory(
# ax.transData, ax.transAxes)
rect1 = patches.Rectangle((0., 0.), width=fsh, height=1,
transform=trans, color='orange',
alpha=0.3)
ax.add_patch(rect1)
ax.tick_params(labelsize=17)
ax.grid()
ax.set_ylim(YLIMS)
if ftext:
ax.text(TEXT_LOC['mc'][0], TEXT_LOC['mc'][1], TEXT['mc'], fontsize=22)
ax.text(TEXT_LOC['sh'][0], TEXT_LOC['sh'][1], TEXT['sh'], fontsize=22)
else:
if TEXT is not None:
ax.set_title(
'left: '+TEXT['sh']+'\n'
'right: '+TEXT['mc']
)
else:
pass # no text anywhere
ax.set_ylabel(YLAB, fontsize=27)
# if `varname` has any of these strings, plot in log-scale.
if any([(nm in varname) for nm in \
('beta','Temp', 'rmsB', 'rmsBoB', 'ratio')]):
ax.set_yscale('log')
else:
ax.set_yscale('linear')
ax.legend(loc='best', fontsize=20)
if 'fig' not in kws: # if figure not given, save to disk
ax.set_xlim(-2.0, 7.0)
ax.set_xlabel('time normalized to sheath/MC passage [1]', fontsize=25)
savefig(fname_fig, format='png', dpi=100, bbox_inches='tight')
close()
return None
else:
# return changes of passed figure
return fig, ax
#--- chekea q el archivo no repita elementos de la 1ra columna
def check_redundancy(fname, name):
f = open(fname, 'r')
dummy = {}
for line in f:
ll = line.split(' ')
varname = ll[0]
dummy[varname] = 0
dummy_names = dummy.keys()
dummy_set = set(dummy_names)
redundancy = len(dummy_set)<len(dummy_names)
overwriting = name in dummy_set
if redundancy or overwriting:
return True
else:
return False
class general:
def __init__(self):
self.name = 'name'
class dummy1:
def __init__(self,):
pass
class dummy2 (object):
"""
can be used:
>>> dd = dummy2()
>>> dd['name'] = [3,4,5]
>>> dd['name2'].time = [0,1,2,3,4]
"""
def __init__(self):
self.this = {}
def __getitem__(self, idx):
if not idx in self.this.keys():
self.this[idx] = dummy1()
return self.this[idx]
def set(self, name, attname, value):
if not name in self.this.keys():
self.this[name] = dummy1()
setattr(self.this[name], attname, value)
def keys(self,):
return self.this.keys()
class boundaries:
def __init__(self):
print self.__dict__
print dict(self)
def ff(self):
self.fg = 0.2
def nans(sh):
return np.nan*np.ones(sh)
def grab_time_domain(adap, check=False):
"""
Search for a valid time domain for
this `varname` and return.
If `check`==True, it checks that all time domains
are the same (for all `varname`s) unless a difference
of 10 times the numerical epsilon.
"""
na = len(adap)
# grab all posible time domains
found = False
for i in range(na):
for name in adap[i].keys():
if not(found):
tarr = adap[i][name][0]
if tarr is not None:
found = True
if found:
# we found a valid time domain (`tarr`)
if check:
# assume time array is 'np.float32'
eps32 = np.finfo(np.float32).eps
for i in range(na):
for name in adap[i].keys():
tarr_ = adap[i][name][0]
if tarr_ is not None:
# they differ at most in its
# numerical epsilon
ok = (tarr_-tarr<=eps32)
assert ok.prod(),\
" we have more than 1 valid time domain!!:\n%r\n\n%r"%(
tarr_, tarr)
return tarr
#--- didn't find any valid time domain
try:
# hung in debug mode
import pdb; pdb.set_trace()
except ImportError:
# ok, get out!
raise SystemExit(
'shut! none are valid time domains:\n %r'%t_array
)
class events_mgr(object):
def __init__(self, gral, FILTER, CUTS, bd, nBin, fgap, tb, z_exp, structure='mc', fparam='mc_V', verbose=True):
"""
structure: can be 'sh.mc', 'sh.i', 'mc', 'i', refering to sheath-of-mc,
sheath-of-icme, mc, and icme, respectively. This is to
use the proper mean values calculated in each structure.
"""
self.fparam = fparam
self.structure = structure
self.data_name = gral.data_name
self.FILTER = FILTER
self.CUTS = CUTS
self.bd = bd
self.nBin = nBin
self.fgap = fgap
self.tb = tb
self.z_exp = z_exp
self.dir_plots = gral.dirs['dir_plots']
self.dir_ascii = gral.dirs['dir_ascii']
self.gral = gral
self._dirs_ = gral.dirs
self.verbose = verbose
#self.f_sc = netcdf_file(gral.fnames[gral.data_name], 'r')
self.f_events = netcdf_file(gral.fnames['table_richardson'], 'r')
print " -------> archivos input leidos!"
#--- put False to all possible data-flags (all CR detector-names
# must be included in 'self.CR_observs')
self.names_ok = ('Auger_BandMuons', 'Auger_BandScals', 'Auger_scals', \
'McMurdo', 'ACE', 'ACE_o7o6', 'ACE1sec')
for name in self.names_ok:
read_flag = 'read_'+name
setattr(self, read_flag, False) # True: if files are already read
#--- names of CR observatories
self.CR_observs = ( #must **include** the 'load_data_..()' methods
'Auger_scals', 'Auger_BandMuons', 'Auger_BandScals',\
'McMurdo')
#--- just a check for load_data_.. methods
for att_name in dir(events_mgr): # iterate on all methods
if att_name.startswith('load_data_'):
att_suffix = att_name.replace('load_data_', '')
assert att_suffix in self.names_ok,\
" [-] ERROR: one of the methods '%s' is NOT taken into account in 'self.CR_observs' (%s) " % (att_name, att_suffix)
self.data_name_ = str(self.data_name) # nombre de la data input inicial (*1)
self.IDs_locked = False # (*2)
"""
(*1): si despues cambia 'self.data_name', me voy a dar
cuenta en la "linea" FLAG_001.
(*2): lock in lock_IDs().
True: if the id's of the events have been
fixed/locked, so that later analysis is
resctricted only with theses locked id's.
"""
#++++++++++ CORRECTION OF BORDERS ++++++++++
# IMPORTANTE:
# Solo valido para los "63 eventos" (MCflag='2', y visibles en ACE)
# NOTA: dan saltos de shock mas marcados con True.
# TODO: make a copy/deepcopy of `tb` and `bd`, so that we don't
# bother the rest of data_names (i.e. Auger_scals, Auger_BandMuons,
# etc.)
if FILTER['CorrShift']:
ShiftCorrection(ShiftDts, tb.tshck)
ShiftCorrection(ShiftDts, tb.tini_icme)
ShiftCorrection(ShiftDts, tb.tend_icme)
ShiftCorrection(ShiftDts, tb.tini_mc)
ShiftCorrection(ShiftDts, tb.tend_mc)
ShiftCorrection(ShiftDts, bd.tini)
ShiftCorrection(ShiftDts, bd.tend)
def run_all(self, _data_handler):
#----- seleccion de eventos
self.filter_events()
print "\n ---> filtrado de eventos (n:%d): OK\n" % (self.n_SELECC)
#----- load data y los shiftimes "omni"
self.load_files_and_timeshift_ii(_data_handler)
#----- rebineo y promedios
self.rebine()
self.rebine_final()
#----- hacer ploteos
self.make_plots()
#----- archivos "stuff"
self.build_params_file()
#@profile
def rebine(self, collect_only=False):
"""
rebineo de c/evento
"""
nvars = self.nvars #len(VARS)
n_icmes = self.tb.n_icmes
bd = self.bd
VARS = self.VARS
nbin = self.nBin['total']
nwndw = [self.nBin['before'], self.nBin['after']]
day = 86400.
#---- quiero una lista de los eventos-id q van a incluirse en c/promedio :-)
IDs = {}
Enough, nEnough = {}, {}
self.__ADAP__ = ADAP = [] # conjunto de varios 'adap' (uno x c/variable)
for varname in VARS.keys():
IDs[varname] = []
Enough[varname] = []
nEnough[varname] = 0 # counter
# recorremos los eventos:
nok, nbad = 0, 0
nnn = 0 # nro de evento q pasan el filtro a-priori
self.out = {}
if collect_only:
self.out['events_data'] = {} # bag to save data from events
ok = np.zeros(n_icmes,dtype=np.bool) # all `False` by default
for i in range(n_icmes):
try: #no todos los elementos de 'tend' son fechas (algunos eventos no tienen fecha definida)
# this 'i'-event must be contained in our data-base
ok[i] = date_to_utc(bd.tini[i]) >= self.t_utc[0] #True
ok[i] &= date_to_utc(bd.tend[i]) <= self.t_utc[-1]
if self.IDs_locked:
ok[i] &= i in self.restricted_IDs
except: # e.g. if `bd.{tini,tend}[i]` is NaN
ok[i] = False
for i in range(n_icmes):
#np.set_printoptions(4) # nro de digitos a imprimir al usar numpy.arrays
if not (ok[i] & self.SELECC[i]): #---FILTRO--- (*1)
print ccl.Rn, " id:%d ---> ok, SELECC: "%i, ok[i], self.SELECC[i], ccl.W
nbad +=1
continue
dT = (bd.tend[i] - bd.tini[i]).total_seconds()/day # [day]
ADAP += [ {} ] # agrego un diccionario a la lista
nnn += 1
print ccl.Gn + " id:%d ---> dT/day:%g" % (i, dT) + ccl.W
print self.tb.tshck[i]
nok +=1
if collect_only:
# evdata is just a pointer
evdata = self.out['events_data']['id_%03d'%i] = dummy2() #{}
# recorremos las variables:
for varname in VARS.keys():
dt = dT*(1+nwndw[0]+nwndw[1])/nbin
t, var = self.grab_window(
nwndw=nwndw, #rango ploteo
data=[self.t_utc, VARS[varname]['value']],
tini=bd.tini[i],
tend=bd.tend[i],
vname=varname, # for ACE 1sec
)
if collect_only:
evdata.set(varname, 'time', t)
evdata.set(varname, 'data', var)
#--- read average CR rates before shock/disturbance
if self.data_name in self.CR_observs: # is it CR data?
rate_pre = getattr(self, 'rate_pre_'+self.data_name)
var = 100.*(var - rate_pre[i]) / rate_pre[i]
#--- rebinea usando 'dt' como el ancho de nuevo bineo
out = adaptar_ii(
nwndw = nwndw,
dT = dT,
n = nbin,
dt = dt,
t = t,
r = var,
fgap = self.fgap
)
enough = out[0] # True: data con menos de 100*'fgap'% de gap
Enough[varname] += [ enough ]
ADAP[nok-1][varname] = out[1] # out[1] = [tiempo, variable]
if enough:
#import pdb; pdb.set_trace()
IDs[varname] += [i]
nEnough[varname] += 1
#NOTE: `ADAP` points to `self.__ADAP__`
print " ----> len.ADAP: %d" % len(ADAP)
self.__nok__ = nok
self.__nbad__ = nbad
self.out['nok'] = nok
self.out['nbad'] = nbad
self.out['IDs'] = IDs
self.out['nEnough'] = nEnough
self.out['Enough'] = Enough
def lock_IDs(self):
"""
This assumes that 'IDs' has only *one* key.
That is, len(IDs)=1 !!
"""
IDs = self.out['IDs']
varname = IDs.keys()[0]
self.restricted_IDs = IDs[varname]
self.IDs_locked = True
def rebine_final(self):
"""
rebineo de c/evento ... PARTE FINAL
"""
nvars = self.nvars #len(VARS)
VARS = self.VARS
nbin = self.nBin['total']
nwndw = [self.nBin['before'], self.nBin['after']]
day = 86400.
## salidas del 'self.rebine()'
ADAP = self.__ADAP__
Enough = self.out['Enough']
nEnough = self.out['nEnough']
IDs = self.out['IDs']
nok = self.out['nok']
nbad = self.out['nbad']
stuff = {} #[]
# Hacemos un lugar para la data rebineada (posible uso post-analisis)
if self.data_name==self.data_name_:
self.rebined_data = {} # creamos el diccionario UNA sola vez
for varname in VARS.keys():
if self.verbose:
print ccl.On + " -------> procesando: %s" % VARS[varname]['label']
print " nEnough/nok/(nok+nbad): %d/%d/%d " % (nEnough[varname], nok, nok+nbad) + ccl.W
VAR_adap = zeros((nok, nbin)) # perfiles rebineados (*)
# (*): uno de estos por variable
# recorro los 'nok' eventos q pasaron el filtro de arriba:
for i in range(nok):
VAR_adap[i,:] = ADAP[i][varname][1] # valores rebineados de la variable "j" para el evento "i"
self.rebined_data[varname] = VAR_adap
# valores medios de esta variable para c/evento
avrVAR_adap = mvs_for_each_event(VAR_adap, nbin, nwndw, Enough[varname], self.verbose)
if self.verbose:
print " ---> (%s) avrVAR_adap[]: \n" % varname, avrVAR_adap
VAR_avrg = zeros(nbin)
VAR_avrgNorm = zeros(nbin)
VAR_medi = zeros(nbin)
VAR_std = zeros(nbin)
ndata = zeros(nbin)
# recorremos bin a bin, para calular media, mediana, error, etc...
for i in range(nbin):
cond = ~np.isnan(VAR_adap.T[i,:]) # filtro eventos q no aportan data en este bin
ndata[i] = len(find(cond)) # nro de datos != nan
VAR_avrg[i] = np.mean(VAR_adap.T[i,cond]) # promedio entre los valores q no tienen flag
VAR_avrgNorm[i] = np.mean(VAR_adap.T[i,cond]/avrVAR_adap[cond])
VAR_medi[i] = np.median(VAR_adap.T[i,cond])# mediana entre los valores q no tienen flag
VAR_std[i] = np.std(VAR_adap.T[i,cond]) # std del mismo conjunto de datos
stuff[varname] = [VAR_avrg, VAR_medi, VAR_std, ndata, avrVAR_adap]
# NOTA: chekar q 'ADAP[j][varname][0]' sea igual para TODOS los
# eventos 'j', y para TODOS los 'varname'.
self.out['dVARS'] = stuff
self.out['tnorm'] = grab_time_domain(ADAP, check=True)
"""def __getattr__(self, attname):
if attname[:10]=='load_data_':
return self.attname"""
def load_files_and_timeshift_ii(self, _data_handler, obs_check=None):
"""
INPUT
-----
* _data_handler:
class that handles the i/o of the database related to 'data_name'.
* obs_check:
if not None, is a list of strings related to the names of
the observables of our interest. The idea is to make
sure that we are asking for variables that are included
in our database `self.VARS`.
"""
read_flag = 'read_'+self.data_name # e.g. self.read_Auger
if not(read_flag in self.__dict__.keys()): # do i know u?
setattr(self, read_flag, False) #True: if files are already read
#--- read data and mark flag as read!
if not( getattr(self, read_flag) ):
attname = 'load_data_'+self.data_name
dh = _data_handler(
input=self.gral.fnames[self.data_name],
)
# point to the method that selects data from
# a given window
self.grab_window = dh.grab_block # {method}
# grab/point-to data from disk
#NOTE: if self.FILTER['CorrShift']==True, then `self.tb` and
# `self.bd` will be shifted!
out = dh.load(data_name=self.data_name, tb=self.tb, bd=self.bd)
# attribute data pointers to `self`
for nm, value in out.iteritems():
# set `t_utc` and `VAR` to `self`
setattr(self,nm,value)
# check that we are grabbing observables of our
# interest
if obs_check is not None:
for nm in obs_check:
nm_ = nm+'.'+self.data_name
assert nm_ in self.VARS.keys(),\
" %s is not database list: %r"%(nm_, self.VARS.keys())
self.nvars = len(self.VARS.keys())
# mark as read
self.read_flag = True # True: ya lei los archivos input
#--- check weird case
assert self.data_name in self.names_ok,\
_ERROR_+" not on my list!: %s" % self.data_name+\
"\n Must be one of these: %r" % [self.names_ok]
def make_plots(self):
"""
#---- generar figuras y asciis de los perfiles promedio/mediana
"""
nBin = self.nBin
fgap = self.fgap
MCwant = self.FILTER['MCwant']
ThetaThres = self.CUTS['ThetaThres']
if self.FILTER['vsw_filter']:
v_lo, v_hi = self.CUTS['v_lo'], self.CUTS['v_hi']
else:
v_lo, v_hi = 0.0, 0.0 #estos valores significan q no hay filtro
if self.FILTER['z_filter_on']:
z_lo, z_hi = self.CUTS['z_lo'], self.CUTS['z_hi']
else:
z_lo, z_hi = 0.0, 0.0
if self.FILTER['B_filter']:
B_lo, B_hi = self.CUTS['B_lo'], self.CUTS['B_hi']
else:
B_lo, B_hi = 0.0, 0.0 #estos valores significan q no hay filtro
if self.FILTER['filter_dR.icme']:
dR_lo, dR_hi = self.CUTS['dR_lo'], self.CUTS['dR_hi']
else:
dR_lo, dR_hi = 0.0, 0.0 #estos valores significan q no hay filtro
nbin = (1+nBin['before']+nBin['after'])*nBin['bins_per_utime'] # [1] nro de bines q quiero en mi perfil promedio
#-------------------- prefijos:
# prefijo para filtro Wang:
if self.FILTER['wang']:
WangFlag = str(ThetaThres)
else:
WangFlag = 'NaN'
# prefijo gral para los nombres de los graficos:
if self.FILTER['CorrShift']:
prexShift = 'wShiftCorr'
else:
prexShift = 'woShiftCorr'
#-------------------------------
# nombres genericos...
DIR_FIGS = '%s/MCflag%s/%s' % (self.dir_plots, MCwant['alias'], prexShift)
DIR_FIGS += '/' + self._dirs_['suffix']
DIR_ASCII = '%s/MCflag%s/%s' % (self.dir_ascii, MCwant['alias'], prexShift)
DIR_ASCII += '/' + self._dirs_['suffix']
os.system('mkdir -p %s' % DIR_FIGS) # si no existe, lo creamos
os.system('mkdir -p %s' % DIR_ASCII) # (bis)
print ccl.On + " -------> creando: %s" % DIR_FIGS + ccl.W
print ccl.On + " -------> creando: %s" % DIR_ASCII + ccl.W
FNAMEs = 'MCflag%s_%dbefore.%dafter_fgap%1.1f' % (MCwant['alias'], nBin['before'], nBin['after'], fgap)
FNAMEs += '_Wang%s' % (WangFlag)
if self.FILTER['vsw_filter']: FNAMEs += '_vlo.%03.1f.vhi.%04.1f' % (v_lo, v_hi)
if self.FILTER['z_filter_on']: FNAMEs += '_zlo.%2.2f.zhi.%2.2f' % (z_lo, z_hi)
if self.FILTER['B_filter']: FNAMEs += '_Blo.%2.2f.Bhi.%2.2f' % (B_lo, B_hi)
if self.FILTER['filter_dR.icme']: FNAMEs += '_dRlo.%2.2f.dRhi.%2.2f' % (dR_lo, dR_hi)
if not self.FILTER['vsw_filter']:
FNAMEs += '_' # flag for post-processing, indicating
# there was no splitting
FNAME_ASCII = '%s/%s' % (DIR_ASCII, FNAMEs)
FNAME_FIGS = '%s/%s' % (DIR_FIGS, FNAMEs)
fname_nro = DIR_ASCII+'/'+'n.events_'+FNAMEs+'.txt'
#'w': write mode #'a': append mode
#---FLAG_001
if self.data_name==self.data_name_:
fnro = open(fname_nro, 'w')
else:
fnro = open(fname_nro, 'a') # si uso otra data input, voy anotando el nro
# de eventos al final del archivo 'fname_nro'
#-------------------------------------------------------------------
nvars = len(self.VARS)
for varname in self.VARS.keys():
fname_fig = '%s_%s.png' % (FNAME_FIGS, varname) #self.VARS[i][1])
print ccl.Rn+ " ------> %s" % fname_fig
ylims = self.VARS[varname]['lims'] #self.VARS[i][2]
ylabel = self.VARS[varname]['label'] #self.VARS[i][3]
average = self.out['dVARS'][varname][0]
mediana = self.out['dVARS'][varname][1] #self.out['dVARS'][i][4]
std_err = self.out['dVARS'][varname][2]
nValues = self.out['dVARS'][varname][3] # number of values aporting to each data bin
N_selec = self.out['nok'] #self.out['dVARS'][varname][0]
N_final = self.out['nEnough'][varname] #nEnough[i]
SUBTITLE = '# of selected events: %d \n\
events w/80%% of data: %d \n\
bins per time unit: %d \n\
MCflag: %s \n\
WangFlag: %s' % (N_selec, N_final, nBin['bins_per_utime'], MCwant['alias'], WangFlag)
makefig(mediana, average, std_err, nValues, self.out['tnorm'],
SUBTITLE, ylims, ylabel, fname_fig)
fdataout = '%s_%s.txt' % (FNAME_ASCII, varname) #self.VARS[i][1])
dataout = np.array([self.out['tnorm'] , mediana, average, std_err, nValues])
print " ------> %s\n" % fdataout + ccl.W
np.savetxt(fdataout, dataout.T, fmt='%12.5f')
#-------- grabamos nro de eventos selecc para esta variable
line = '%s %d %d\n' % (varname, N_final, N_selec)
fnro.write(line)
print ccl.Rn + " --> nro de eventos seleccionados: " + fname_nro + ccl.W
fnro.close()
#--- salidas (a parte de los .png)
self.DIR_ASCII = DIR_ASCII
self.FNAMEs = FNAMEs
def build_params_file(self):
"""
Construye archivo q tiene cosas de los eventos seleccionados:
- valores medios de los observables (B, Vsw, Temp, beta, etc)
- los IDs de los eventos
- duracion de los MCs y las sheaths
"""
DIR_ASCII = self.DIR_ASCII
FNAMEs = self.FNAMEs
#-------------------------------------------- begin: NC_FILE
print "\n*********************************** begin: NC_FILE"
#------- generamos registro de id's de los
# eventos q entraron en los promedios.
# Nota: un registro por variable.
fname_out = DIR_ASCII+'/'+'_stuff_'+FNAMEs+'.nc' #'./test.nc'
#---FLAG_001
if self.data_name==self.data_name_:
fout = netcdf_file(fname_out, 'w')
print "\n ----> generando: %s\n" % fname_out
else:
fout = netcdf_file(fname_out, 'a')
# modo 'a': si uso otra data input, voy anotando el nro
# de eventos al final del archivo 'fname_out'
print "\n ----> anexando en: %s\n" % fname_out
IDs = self.out['IDs']
for varname in self.VARS.keys():
print " ----> " + varname
n_events = len(IDs[varname])
dimname = 'nevents_'+varname
fout.createDimension(dimname, n_events)
print " n_events: ", n_events
prom = self.out['dVARS'][varname][4]
cc = np.isnan(prom)
print " nprom (all) : ", prom.size
prom = prom[~cc]
print " nprom (w/o nan): ", prom.size
dims = (dimname,)
write_variable(fout, varname, dims, prom, 'd',
'average_values per event')
#---------- IDs de esta variable
ids = map(int, IDs[varname])
vname = 'IDs_'+varname
write_variable(fout, vname, dims, ids, 'i',
'event IDs that enter in this parameter average')
#---------- duracion de la estructura
dtsh = np.zeros(len(ids))
dtmc = np.zeros(len(ids))
for i in range(len(ids)):
id = ids[i]
dtsh[i] = self.dt_sh[id]
dtmc[i] = self.dt_mc[id]
vname = 'dt_sheath_'+varname
write_variable(fout, vname, dims, dtsh, 'd', '[days]')
vname = 'dt_mc_'+varname
write_variable(fout, vname, dims, dtmc, 'd', '[days]')
fout.close()
print "**************************************** end: NC_FILE"
#---------------------------------------------- end: NC_FILE
def filter_events(self):
structure = self.structure
tb = self.tb
FILTER = self.FILTER
dTday = self.CUTS['dTday']
day = 86400.
AU_o_km = 1./(150.0e6)
sec_o_day = 86400.
#------------------------------------ EVENTS's PARAMETERS
#MCsig = array(f_events.variables['MC_sig'].data)# 2,1,0: MC, rotation, irregular
#Vnsh = array(f_events.variables['wang_Vsh'].data) # veloc normal del shock
ThetaSh = np.array(self.f_events.variables['wang_theta_shock'].data) # orientacion de la normal del shock
i_V = self.f_events.variables[structure+'_V'].data.copy() # velocidad de icme
i_B = self.f_events.variables[structure+'_B'].data.copy() # B del icme
i_dt = self.f_events.variables[structure+'_dt'].data.copy() # B del icme
i_dR = i_dt*(i_V*AU_o_km*sec_o_day)
# values of the observables to use for filtering
vfparam = get_fparam(self.f_events, self.fparam)
#RatePre_Names = []
#--- seteamos miembros de 'self' q se llamen 'rate_pre_...'
for vname in self.f_events.variables.keys():
if vname.startswith('rate_pre_'):
#RatePre_Names += [ vname ] # save them to make checks later
var = self.f_events.variables[vname].data.copy()
setattr(self, vname, var) # asignamos 'rate_pre_...' a 'self'
"""
self.rate_pre = self.f_events.variables['rate_pre_McMurdo'].data.copy()
self.rate_pre_Auger=self.f_events.variables['rate_pre_Auger'].data.copy()
"""
self.Afd = self.f_events.variables['A_FD'].data.copy()
#------------------------------------
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++ begin: SELECCION DE EVENTOS ++++++++++++++++++++++
#------- filter dates
BETW1998_2006 = np.ones(tb.n_icmes, dtype=bool)
if FILTER['choose_1998-2006']:
_until_jan98 = range(0, 26) # all events up to Jan/98
_after_dec06 = range(307, tb.n_icmes) # all after Dec/2006
for i in (_until_jan98 + _after_dec06):
BETW1998_2006[i] = False # 'False' to exclude events
#------- seleccionamos MCs con label-de-catalogo (lepping=2, etc)
MC_FLAG = np.ones(tb.n_icmes, dtype=bool)
for i in range(tb.n_icmes):
MC_FLAG[i] = tb.MCsig[i] in FILTER['MCwant']['flags']
#------- excluimos eventos de 2MCs
EVENTS_with_2MCs= (26, 148, 259, 295)
MCmultiple = FILTER['Mcmultiple'] #False #True para incluir eventos multi-MC
MCmulti = np.ones(tb.n_icmes, dtype=bool) # False para eventos multi-MC (SI, escribi bien)
if(~FILTER['Mcmultiple']):
for i in EVENTS_with_2MCs:
MCmulti[i] &= False
#------- orientacion del shock (catalogo Wang)
if FILTER['wang']:
ThetaThres = self.CUTS['ThetaThres']
ThetaCond = thetacond(ThetaThres, ThetaSh) # set lower threshold
#------- duration of sheaths
self.dt_mc = diff_dates(tb.tend_mc, tb.tini_mc)/day # [day]
self.dt_sh = diff_dates(tb.tini_mc, tb.tshck)/day # [day]
dt = diff_dates(self.bd.tend, self.bd.tini)/day
DURATION = dt > dTday # sheaths>0
#------- speed of icmes
if FILTER['vsw_filter']:
v_lo = self.CUTS['v_lo']
v_hi = self.CUTS['v_hi']
SpeedCond = (vfparam>=v_lo) & (vfparam<v_hi)
#------- z expansion (a. gulisano)
z_exp = self.z_exp
if FILTER['z_filter_on']:
z_lo = self.CUTS['z_lo']
z_hi = self.CUTS['z_hi']
z_cond = (z_exp>=z_lo) & (z_exp<z_hi)
#------- <B> of icmes
if FILTER['B_filter']:
B_lo = self.CUTS['B_lo']
B_hi = self.CUTS['B_hi']
BfieldCond = (i_B>=B_lo) & (i_B<B_hi)
#------- size of icmes
if FILTER['filter_dR.icme']:
dR_lo = self.CUTS['dR_lo']
dR_hi = self.CUTS['dR_hi']
"""print " ---> i_dR: \n", i_dR
print " ---> i_dt: \n", i_dt
raw_input()"""
dRicmeCond = (i_dR>=dR_lo) & (i_dR<dR_hi)
#------- filtro total
SELECC = np.ones(tb.n_icmes, dtype=bool)
SELECC &= BETW1998_2006 # nos mantenemos en este periodo de anios
SELECC &= MCmulti # nubes multiples
SELECC &= MC_FLAG # catalogo de nubes
SELECC &= DURATION # no queremos sheaths q duran 1hr xq solo aportan ruido
if FILTER['wang']: SELECC &= ThetaCond # cerca a 180 es nariz del shock
if FILTER['vsw_filter']: SELECC &= SpeedCond
if FILTER['z_filter_on']: SELECC &= z_cond
if FILTER['B_filter']: SELECC &= BfieldCond
if FILTER['filter_dR.icme']: SELECC &= dRicmeCond
self.SELECC = SELECC
self.n_SELECC = len(find(SELECC))
#self.aux['SELECC'] = self.SELECC
#+++++++++++++++++ end: SELECCION DE EVENTOS ++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
if self.n_SELECC<=0:
print ccl.Rn + "\n --------> FATAL ERROR!!!: self.n_SELECC=<0"
print " exiting....... \n" + ccl.W
raise SystemExit
def get_fparam(finp, fparam='mc_V'):
"""
you can implement more acceptable fparam values, that can
imply an operation of several keys of the finp.variable.keys()
for instance.
"""
# keys of the usual .nc file
_keys_of_netcdf_file = ['sh.mc_V', 'mc_V', 'sh.mc_B', 'mc_B']
_keys_of_netcdf_file += ['sh.i_V', 'i_V', 'sh.i_B', 'i_B']
# check if it's a valid `fparam` && extract
if fparam in _keys_of_netcdf_file:
values = finp.variables[fparam].data.copy()
else:
raise SystemExit('\n [-] Unrecognized fparam value: '+fparam+'\n')
return values
class RichTable(object):
def __init__(s, fname_rich):
s.fname_rich = fname_rich
s.tshck = []
s.tini_icme, s.tend_icme = [], []
s.tini_mc, s.tend_mc = [], []
s.Qicme = []
s.MCsig = []
s.Dst = []
def read(s):
print "\n ---> reading Richardson's table: %s" % s.fname_rich
frich = open(s.fname_rich, 'r')
print " file read."
ll, n = [], 0
for line in frich:
ll += [line.split(',')]
n +=1
print " lineas leidas: %d" % n
for i in range(1,n):
#------ fecha shock
s.tshck += [datetime.strptime(ll[i][1][1:20],"%Y-%m-%d %H:%M:%S")]
#------ fecha ini icme
ss = ll[i][2][1:11].split() # string de la fecha ini-icme
HH = int(ss[1][0:2])
MM = int(ss[1][2:4])
mm = int(ss[0].split('/')[0])
dd = int(ss[0].split('/')[1])
if mm==s.tshck[i-1].month:
yyyy = s.tshck[i-1].year
else:
yyyy = s.tshck[i-1].year + 1
s.tini_icme += [datetime(yyyy, mm, dd, HH, MM)]
#------ fecha fin icme
ss = ll[i][3][1:11].split()
HH = int(ss[1][0:2])
MM = int(ss[1][2:4])
mm = int(ss[0].split('/')[0])
dd = int(ss[0].split('/')[1])
if mm==s.tshck[i-1].month:
yyyy = s.tshck[i-1].year
elif s.tshck[i-1].month==12:
yyyy = s.tshck[i-1].year + 1
s.tend_icme += [datetime(yyyy, mm, dd, HH, MM)]
#------ fechas MCs
if ll[i][6]=='':
s.tini_mc += [nan]
s.tend_mc += [nan]
else:
hrs_ini = int(ll[i][6]) # col6 es inicio del MC
dummy = ll[i][7].split('(') # col7 es fin del MC
ndummy = len(dummy)
if ndummy==1:
hrs_end = int(ll[i][7])
else:
hrs_end = int(ll[i][7].split('(')[0][1:])
s.tini_mc += [ s.tini_icme[i-1] + timedelta(hours=hrs_ini) ]
s.tend_mc += [ s.tend_icme[i-1] + timedelta(hours=hrs_end) ]
# calidad de ICME boundaries
s.Qicme += [ ll[i][10] ] # quality of ICME boundaries
# flag de MC
s.MCsig += [ ll[i][15] ]
#if ll[i][15]=='2H':
# MCsig += [ 2 ]
#else:
# MCsig += [ int(ll[i][15]) ] # MC flag
#
s.Dst += [ int(ll[i][16]) ] # Dst
#--------------------------------------
s.MCsig = np.array(s.MCsig)
s.Dst = np.array(s.Dst)
s.n_icmes = len(s.tshck)
#
"""
col0 : id
col1 : disturbance time
col2 : ICME start
col3 : ICME end
col4 : Composition start
col5 : Composition end
col6 : MC start
col7 : MC end
col8 : BDE
col9 : BIF
col10: Quality of ICME boundaries (1=best)
col11: dV --> 'S' indica q incluye shock
col12: V_ICME
col13: V_max
col14: B
col15: MC flag --> '0', '1', '2', '2H': irregular, B-rotation, MC, or MC of "Huttunen etal05" respectively.
col16: Dst
col17: V_transit
col18: LASCO_CME --> time of associated event, generally the CME observed by SOHO/LASCO.
A veces tiene 'H' por Halo.
"""
def Add2Date(date, days, hrs=0, BadFlag=np.nan):
"""
Mapping to add `days` and `hrs` to a given
`datetime` object.
NOTE: `days` can be fractional.
"""
if type(date) is not datetime:
return BadFlag
return date + timedelta(days=days, hours=hrs)
def utc2date(t):
date_utc = datetime(1970, 1, 1, 0, 0, 0, 0)
date = date_utc + timedelta(days=(t/86400.))
return date
def date2utc(date):
date_utc = datetime(1970, 1, 1, 0, 0, 0, 0)
utcsec = (date - date_utc).total_seconds() # [utc sec]
return utcsec
def ACEepoch2utc(AceEpoch):
return AceEpoch + 820454400.0
class arg_to_datetime(argparse.Action):
"""
argparse-action to handle command-line arguments of
the form "dd/mm/yyyy" (string type), and converts
it to datetime object.
"""
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(arg_to_datetime, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
#print '%r %r %r' % (namespace, values, option_string)
dd,mm,yyyy = map(int, values.split('/'))
value = datetime(yyyy,mm,dd)
setattr(namespace, self.dest, value)
class arg_to_utcsec(argparse.Action):
"""
argparse-action to handle command-line arguments of
the form "dd/mm/yyyy" (string type), and converts
it to UTC-seconds.
"""
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(arg_to_utcsec, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
#print '%r %r %r' % (namespace, values, option_string)
dd,mm,yyyy = map(int, values.split('/'))
value = (datetime(yyyy,mm,dd)-datetime(1970,1,1)).total_seconds()
setattr(namespace, self.dest, value)
class My2DArray(object):
"""
wrapper around numpy array with:
- flexible number of rows
- records the maximum nrow requested
NOTE:
This was test for 1D and 2D arrays.
"""
def __init__(self, shape, dtype=np.float32):
self.this = np.empty(shape, dtype=dtype)
setattr(self, '__array__', self.this.__array__)
def resize_rows(self, nx_new=None):
""" Increment TWICE the size of axis=0, **without**
losing data.
"""
sh_new = np.copy(self.this.shape)
nx = self.this.shape[0]
if nx_new is None:
sh_new[0] = 2*sh_new[0]
elif nx_new<=nx:
return 0 # nothing to do
else:
sh_new[0] = nx_new
tmp = self.this.copy()
#print "----> tmp: ", tmp.shape
new = np.zeros(sh_new)
new[:nx] = tmp
self.this = new
"""
for some reason (probably due to numpy
implementation), if we don't do this, the:
>>> print self.__array__()
stucks truncated to the original size that was
set in __init__() time.
So we need to tell numpy our new resized shape!
"""
setattr(self, '__array__', self.this.__array__)
def __get__(self, instance, owner):
return self.this
def __getitem__(self, i):
return self.this[i]
def __setitem__(self, i, value):
"""
We can safely use:
>>> ma[n:n+m,:] = [...]
assuming n+m is greater than our size in axis=0.
"""
stop = i
if type(i)==slice:
stop = i.stop
elif type(i)==tuple:
if type(i[0])==slice:
"""
in case:
ma[n:n+m,:] = ...
"""
stop = i[0].stop
else:
stop = i[0]
#--- if requested row exceeds limits, duplicate
# our size in axis=0
if stop>=self.this.shape[0]:
nx_new = self.this.shape[0]
while nx_new<=stop:
nx_new *= 2
self.resize_rows(nx_new)
self.this[i] = value
#--- register the maximum nrow requested.
# NOTE here we are referring to size, and *not* row-index.
self.max_nrow_used = stop+1 # (row-size, not row-index)
def __getattr__(self, attnm):
return getattr(self.this, attnm)
def ACEepoch2date(ace_epoch):
"""
ace_epoch: seconds since 1/1/96
"""
date = datetime(1996,1,1) + timedelta(seconds=ace_epoch)
return date
def date2ACEepoch(date):
ace_o = datetime(1996,1,1)
return (date - ace_o).total_seconds()
#+++++++++++++++++++++++++++++++++
if __name__=='__main__':
print " ---> this is a library!\n"
#EOF
|
jimsrc/seatos
|
shared_lib/shared_funcs.py
|
Python
|
mit
| 54,269
|
[
"NetCDF"
] |
86e9525adb53af7cfb0a72cdf8965c6b3e60fa83fca77ffb3e90918cf2e0d089
|
from tools.load import LoadMatrix
from sg import sg
lm=LoadMatrix()
traindat=lm.load_numbers('../data/fm_train_real.dat')
testdat=lm.load_numbers('../data/fm_test_real.dat')
parameter_list=[[traindat,testdat,10,2.1,10.,1e-5,False],
[traindat,testdat,10,2.1,11.,1e-4,False]]
def classifier_libsvm_oneclass (fm_train_real=traindat,fm_test_real=testdat,
size_cache=10, width=2.1,C=10.,
epsilon=1e-5,use_bias=False):
sg('set_features', 'TRAIN', fm_train_real)
sg('set_kernel', 'GAUSSIAN', 'REAL', size_cache, width)
sg('new_classifier', 'LIBSVM_ONECLASS')
sg('svm_epsilon', epsilon)
sg('c', C)
sg('svm_use_bias', use_bias)
sg('train_classifier')
sg('set_features', 'TEST', fm_test_real)
result=sg('classify')
kernel_matrix = sg('get_kernel_matrix', 'TEST')
return result, kernel_matrix
if __name__=='__main__':
print('LibSVMOneClass')
classifier_libsvm_oneclass(*parameter_list[0])
|
chenmoshushi/shogun
|
examples/undocumented/python_static/classifier_libsvmoneclass.py
|
Python
|
gpl-3.0
| 906
|
[
"Gaussian"
] |
27ef3f3eac350a1975de9c0d1e9ba421456dff477e09f4b0cd95f412a35ca98f
|
"""
Unit tests for masquerade.
"""
import json
import pickle
from mock import patch
from nose.plugins.attrib import attr
from datetime import datetime
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils.timezone import UTC
from capa.tests.response_xml_factory import OptionResponseXMLFactory
from courseware.masquerade import (
CourseMasquerade,
MasqueradingKeyValueStore,
handle_ajax,
setup_masquerade,
get_masquerading_group_info
)
from courseware.tests.factories import StaffFactory
from courseware.tests.helpers import LoginEnrollmentTestCase, get_request_for_user
from courseware.tests.test_submitting_problems import ProblemSubmissionTestMixin
from student.tests.factories import UserFactory
from xblock.runtime import DictKeyValueStore
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import ItemFactory, CourseFactory
from xmodule.partitions.partitions import Group, UserPartition
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
class MasqueradeTestCase(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Base class for masquerade tests that sets up a test course and enrolls a user in the course.
"""
@classmethod
def setUpClass(cls):
super(MasqueradeTestCase, cls).setUpClass()
cls.course = CourseFactory.create(number='masquerade-test', metadata={'start': datetime.now(UTC())})
cls.info_page = ItemFactory.create(
category="course_info", parent_location=cls.course.location,
data="OOGIE BLOOGIE", display_name="updates"
)
cls.chapter = ItemFactory.create(
parent_location=cls.course.location,
category="chapter",
display_name="Test Section",
)
cls.sequential_display_name = "Test Masquerade Subsection"
cls.sequential = ItemFactory.create(
parent_location=cls.chapter.location,
category="sequential",
display_name=cls.sequential_display_name,
)
cls.vertical = ItemFactory.create(
parent_location=cls.sequential.location,
category="vertical",
display_name="Test Unit",
)
problem_xml = OptionResponseXMLFactory().build_xml(
question_text='The correct answer is Correct',
num_inputs=2,
weight=2,
options=['Correct', 'Incorrect'],
correct_option='Correct'
)
cls.problem_display_name = "TestMasqueradeProblem"
cls.problem = ItemFactory.create(
parent_location=cls.vertical.location,
category='problem',
data=problem_xml,
display_name=cls.problem_display_name
)
def setUp(self):
super(MasqueradeTestCase, self).setUp()
self.test_user = self.create_user()
self.login(self.test_user.email, 'test')
self.enroll(self.course, True)
def get_courseware_page(self):
"""
Returns the server response for the courseware page.
"""
url = reverse(
'courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.chapter.location.name,
'section': self.sequential.location.name,
}
)
return self.client.get(url)
def get_course_info_page(self):
"""
Returns the server response for course info page.
"""
url = reverse(
'info',
kwargs={
'course_id': unicode(self.course.id),
}
)
return self.client.get(url)
def _create_mock_json_request(self, user, body, method='POST', session=None):
"""
Returns a mock JSON request for the specified user
"""
request = get_request_for_user(user)
request.method = method
request.META = {'CONTENT_TYPE': ['application/json']}
request.body = body
request.session = session or {}
return request
def verify_staff_debug_present(self, staff_debug_expected):
"""
Verifies that the staff debug control visibility is as expected (for staff only).
"""
content = self.get_courseware_page().content
self.assertIn(self.sequential_display_name, content, "Subsection should be visible")
self.assertEqual(staff_debug_expected, 'Staff Debug Info' in content)
def get_problem(self):
"""
Returns the JSON content for the problem in the course.
"""
problem_url = reverse(
'xblock_handler',
kwargs={
'course_id': unicode(self.course.id),
'usage_id': unicode(self.problem.location),
'handler': 'xmodule_handler',
'suffix': 'problem_get'
}
)
return self.client.get(problem_url)
def verify_show_answer_present(self, show_answer_expected):
"""
Verifies that "Show Answer" is only present when expected (for staff only).
"""
problem_html = json.loads(self.get_problem().content)['html']
self.assertIn(self.problem_display_name, problem_html)
self.assertEqual(show_answer_expected, "Show Answer" in problem_html)
def verify_real_user_profile_link(self):
"""
Verifies that the 'Profile' link in the navigation dropdown is pointing
to the real user.
"""
content = self.get_courseware_page().content
self.assertIn(
'<a href="/u/{}" class="action dropdown-menuitem">Profile</a>'.format(self.test_user.username),
content,
"Profile link should point to real user",
)
@attr(shard=1)
class NormalStudentVisibilityTest(MasqueradeTestCase):
"""
Verify the course displays as expected for a "normal" student (to ensure test setup is correct).
"""
def create_user(self):
"""
Creates a normal student user.
"""
return UserFactory()
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_staff_debug_not_visible(self):
"""
Tests that staff debug control is not present for a student.
"""
self.verify_staff_debug_present(False)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_show_answer_not_visible(self):
"""
Tests that "Show Answer" is not visible for a student.
"""
self.verify_show_answer_present(False)
class StaffMasqueradeTestCase(MasqueradeTestCase):
"""
Base class for tests of the masquerade behavior for a staff member.
"""
def create_user(self):
"""
Creates a staff user.
"""
return StaffFactory(course_key=self.course.id)
def update_masquerade(self, role, group_id=None, user_name=None):
"""
Toggle masquerade state.
"""
masquerade_url = reverse(
'masquerade_update',
kwargs={
'course_key_string': unicode(self.course.id),
}
)
response = self.client.post(
masquerade_url,
json.dumps({"role": role, "group_id": group_id, "user_name": user_name}),
"application/json"
)
self.assertEqual(response.status_code, 200)
return response
@attr(shard=1)
class TestStaffMasqueradeAsStudent(StaffMasqueradeTestCase):
"""
Check for staff being able to masquerade as student.
"""
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_staff_debug_with_masquerade(self):
"""
Tests that staff debug control is not visible when masquerading as a student.
"""
# Verify staff initially can see staff debug
self.verify_staff_debug_present(True)
# Toggle masquerade to student
self.update_masquerade(role='student')
self.verify_staff_debug_present(False)
# Toggle masquerade back to staff
self.update_masquerade(role='staff')
self.verify_staff_debug_present(True)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_show_answer_for_staff(self):
"""
Tests that "Show Answer" is not visible when masquerading as a student.
"""
# Verify that staff initially can see "Show Answer".
self.verify_show_answer_present(True)
# Toggle masquerade to student
self.update_masquerade(role='student')
self.verify_show_answer_present(False)
# Toggle masquerade back to staff
self.update_masquerade(role='staff')
self.verify_show_answer_present(True)
@attr(shard=1)
class TestStaffMasqueradeAsSpecificStudent(StaffMasqueradeTestCase, ProblemSubmissionTestMixin):
"""
Check for staff being able to masquerade as a specific student.
"""
def setUp(self):
super(TestStaffMasqueradeAsSpecificStudent, self).setUp()
self.student_user = self.create_user()
self.login_student()
self.enroll(self.course, True)
def login_staff(self):
""" Login as a staff user """
self.logout()
self.login(self.test_user.email, 'test')
def login_student(self):
""" Login as a student """
self.logout()
self.login(self.student_user.email, 'test')
def submit_answer(self, response1, response2):
"""
Submit an answer to the single problem in our test course.
"""
return self.submit_question_answer(
self.problem_display_name,
{'2_1': response1, '2_2': response2}
)
def get_progress_detail(self):
"""
Return the reported progress detail for the problem in our test course.
The return value is a string like u'1/2'.
"""
return json.loads(self.look_at_question(self.problem_display_name).content)['progress_detail']
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_user_on_self_paced(self):
"""
Test masquerading as a specific user for course info page when self paced configuration
"enable_course_home_improvements" flag is set
Login as a staff user and visit course info page.
set masquerade to view same page as a specific student and revisit the course info page.
"""
# Log in as staff, and check we can see the info page.
self.login_staff()
response = self.get_course_info_page()
self.assertEqual(response.status_code, 200)
content = response.content
self.assertIn("OOGIE BLOOGIE", content)
# Masquerade as the student,enable the self paced configuration, and check we can see the info page.
SelfPacedConfiguration(enable_course_home_improvements=True).save()
self.update_masquerade(role='student', user_name=self.student_user.username)
response = self.get_course_info_page()
self.assertEqual(response.status_code, 200)
content = response.content
self.assertIn("OOGIE BLOOGIE", content)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_student(self):
"""
Test masquerading as a specific user.
We answer the problem in our test course as the student and as staff user, and we use the
progress as a proxy to determine who's state we currently see.
"""
# Answer correctly as the student, and check progress.
self.login_student()
self.submit_answer('Correct', 'Correct')
self.assertEqual(self.get_progress_detail(), u'2/2')
# Log in as staff, and check the problem is unanswered.
self.login_staff()
self.assertEqual(self.get_progress_detail(), u'0/2')
# Masquerade as the student, and check we can see the student state.
self.update_masquerade(role='student', user_name=self.student_user.username)
self.assertEqual(self.get_progress_detail(), u'2/2')
# Verify that the user dropdown links have not changed
self.verify_real_user_profile_link()
# Temporarily override the student state.
self.submit_answer('Correct', 'Incorrect')
self.assertEqual(self.get_progress_detail(), u'1/2')
# Reload the page and check we see the student state again.
self.get_courseware_page()
self.assertEqual(self.get_progress_detail(), u'2/2')
# Become the staff user again, and check the problem is still unanswered.
self.update_masquerade(role='staff')
self.assertEqual(self.get_progress_detail(), u'0/2')
# Verify the student state did not change.
self.login_student()
self.assertEqual(self.get_progress_detail(), u'2/2')
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_student_course_info(self):
"""
Test masquerading as a specific user for course info page.
We login with login_staff and check course info page content if it's working and then we
set masquerade to view same page as a specific student and test if it's working or not.
"""
# Log in as staff, and check we can see the info page.
self.login_staff()
content = self.get_course_info_page().content
self.assertIn("OOGIE BLOOGIE", content)
# Masquerade as the student, and check we can see the info page.
self.update_masquerade(role='student', user_name=self.student_user.username)
content = self.get_course_info_page().content
self.assertIn("OOGIE BLOOGIE", content)
@attr(shard=1)
class TestGetMasqueradingGroupId(StaffMasqueradeTestCase):
"""
Check for staff being able to masquerade as belonging to a group.
"""
def setUp(self):
super(TestGetMasqueradingGroupId, self).setUp()
self.user_partition = UserPartition(
0, 'Test User Partition', '',
[Group(0, 'Group 1'), Group(1, 'Group 2')],
scheme_id='cohort'
)
self.course.user_partitions.append(self.user_partition)
modulestore().update_item(self.course, self.test_user.id)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_group_masquerade(self):
"""
Tests that a staff member can masquerade as being in a particular group.
"""
# Verify that there is no masquerading group initially
group_id, user_partition_id = get_masquerading_group_info(self.test_user, self.course.id)
self.assertIsNone(group_id)
self.assertIsNone(user_partition_id)
# Install a masquerading group
request = self._create_mock_json_request(
self.test_user,
body='{"role": "student", "user_partition_id": 0, "group_id": 1}'
)
handle_ajax(request, unicode(self.course.id))
setup_masquerade(request, self.test_user, True)
# Verify that the masquerading group is returned
group_id, user_partition_id = get_masquerading_group_info(self.test_user, self.course.id)
self.assertEqual(group_id, 1)
self.assertEqual(user_partition_id, 0)
class ReadOnlyKeyValueStore(DictKeyValueStore):
"""
A KeyValueStore that raises an exception on attempts to modify it.
Used to make sure MasqueradingKeyValueStore does not try to modify the underlying KeyValueStore.
"""
def set(self, key, value):
assert False, "ReadOnlyKeyValueStore may not be modified."
def delete(self, key):
assert False, "ReadOnlyKeyValueStore may not be modified."
def set_many(self, update_dict): # pylint: disable=unused-argument
assert False, "ReadOnlyKeyValueStore may not be modified."
class FakeSession(dict):
""" Mock for Django session object. """
modified = False # We need dict semantics with a writable 'modified' property
class MasqueradingKeyValueStoreTest(TestCase):
"""
Unit tests for the MasqueradingKeyValueStore class.
"""
def setUp(self):
super(MasqueradingKeyValueStoreTest, self).setUp()
self.ro_kvs = ReadOnlyKeyValueStore({'a': 42, 'b': None, 'c': 'OpenCraft'})
self.session = FakeSession()
self.kvs = MasqueradingKeyValueStore(self.ro_kvs, self.session)
def test_all(self):
self.assertEqual(self.kvs.get('a'), 42)
self.assertEqual(self.kvs.get('b'), None)
self.assertEqual(self.kvs.get('c'), 'OpenCraft')
with self.assertRaises(KeyError):
self.kvs.get('d')
self.assertTrue(self.kvs.has('a'))
self.assertTrue(self.kvs.has('b'))
self.assertTrue(self.kvs.has('c'))
self.assertFalse(self.kvs.has('d'))
self.kvs.set_many({'a': 'Norwegian Blue', 'd': 'Giraffe'})
self.kvs.set('b', 7)
self.assertEqual(self.kvs.get('a'), 'Norwegian Blue')
self.assertEqual(self.kvs.get('b'), 7)
self.assertEqual(self.kvs.get('c'), 'OpenCraft')
self.assertEqual(self.kvs.get('d'), 'Giraffe')
for key in 'abd':
self.assertTrue(self.kvs.has(key))
self.kvs.delete(key)
with self.assertRaises(KeyError):
self.kvs.get(key)
self.assertEqual(self.kvs.get('c'), 'OpenCraft')
class CourseMasqueradeTest(TestCase):
"""
Unit tests for the CourseMasquerade class.
"""
def test_unpickling_sets_all_attributes(self):
"""
Make sure that old CourseMasquerade objects receive missing attributes when unpickled from
the session.
"""
cmasq = CourseMasquerade(7)
del cmasq.user_name
pickled_cmasq = pickle.dumps(cmasq)
unpickled_cmasq = pickle.loads(pickled_cmasq)
self.assertEqual(unpickled_cmasq.user_name, None)
|
tanmaykm/edx-platform
|
lms/djangoapps/courseware/tests/test_masquerade.py
|
Python
|
agpl-3.0
| 18,200
|
[
"VisIt"
] |
4600a4c7fa6f37233547a1f7d73d5dccdd882bcd5a517b71166fbb8430b6adbf
|
"""
DIRAC.StorageManagementSystem.Agent package
"""
|
DIRACGrid/DIRAC
|
src/DIRAC/StorageManagementSystem/Agent/__init__.py
|
Python
|
gpl-3.0
| 55
|
[
"DIRAC"
] |
324247baf53d9564fe2edbeceefb5019f883142421432aff0d155b60d82b3a35
|
import logging
import subprocess
import threading
from kalliope.core.NeuronModule import NeuronModule, MissingParameterException
logging.basicConfig()
logger = logging.getLogger("kalliope")
class AsyncRun(threading.Thread):
"""
Class used to run an asynchrone Shell command
.. notes:: Impossible to get the success code of the command
"""
def __init__(self, cmd):
self.stdout = None
self.stderr = None
self.cmd = cmd
threading.Thread.__init__(self)
def run(self):
p = subprocess.Popen(self.cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.stdout, self.stderr = p.communicate()
class Run(NeuronModule):
"""
Run a shell command in a synchron mode
"""
def __init__(self, **kwargs):
super(Run, self).__init__(**kwargs)
# get the command
self.cmd = kwargs.get('application', None)
# get if the user select a blocking command or not
self.async = kwargs.get('async', False)
self.query = kwargs.get('query', None)
if self.query is not None:
self.cmd = self.cmd + "\"" + self.query +"\""
# check parameters
if self._is_parameters_ok():
# run the command
self.cmd = "%s&" % (self.cmd.lower())
print self.cmd
if not self.async:
p = subprocess.Popen(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
self.output = output
self.returncode = p.returncode
message = {
"output": self.output,
"returncode": self.returncode
}
self.say(message)
else:
async_shell = AsyncRun(cmd=self.cmd)
async_shell.start()
def _is_parameters_ok(self):
"""
Check if received parameters are ok to perform operations in the neuron
:return: true if parameters are ok, raise an exception otherwise
.. raises:: MissingParameterException
"""
if self.cmd is None:
raise MissingParameterException("cmd parameter required")
return True
|
mrdev023/KALLIOPE_FRENCH_PERSONALISATION
|
resources/neurons/run/run.py
|
Python
|
gpl-3.0
| 2,354
|
[
"NEURON"
] |
f9f0c0a87a2ad1245cf30f220a698c8ed8445a7e58ad1d1c29dad968b566aa25
|
from PyQt5.QtWidgets import (QDialog, QRadioButton, QVBoxLayout, QLabel,
QWidget, QDialogButtonBox)
from inselect.lib.utils import debug_print
from inselect.gui.utils import HTML_LINK_TEMPLATE, HorizontalLine
from .barcode_settings import (current_settings, update_settings,
inlite_available, libdmtx_available,
zbar_available)
class BarcodeDialog(QDialog):
STYLESHEET = """
QWidget {
margin-left: 30px;
}
"""
def __init__(self, parent=None):
super(BarcodeDialog, self).__init__(parent)
settings = current_settings()
self._layout = QVBoxLayout()
prompt = QLabel(
'The "Read barcodes" command will set each box\'s "Catalog number" '
'metadata field with value(s) of any barcodes.\n'
'\n'
'Use the controls below to indicate how barcodes should be read. '
'Some options might be unavailable.')
prompt.setWordWrap(True)
self._layout.addWidget(prompt)
self._layout.addWidget(HorizontalLine())
self._radio_libdmtx = self._create_libdmtx(settings)
self._radio_zbar = self._create_zbar(settings)
(self._radio_inlite, self._inlite_1d, self._inlite_datamatrix,
self._inlite_pdf417, self._inlite_qr) = self._create_inlite(settings)
self._buttons = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
self._buttons.accepted.connect(self.accept)
self._buttons.rejected.connect(self.reject)
self._layout.addWidget(self._buttons)
self.setLayout(self._layout)
self.setWindowTitle('Read barcodes')
def _create_zbar(self, settings):
radio = QRadioButton(
'My objects are labelled with either 1D barcodes or QR codes')
radio.setChecked('zbar' == settings['engine'])
radio.setEnabled(zbar_available())
self._layout.addWidget(radio)
prompt = QLabel(HTML_LINK_TEMPLATE.format(
'Barcodes will be decoded using the open-source '
'<a href="http://zbar.sourceforge.net/">ZBar</a> library'
))
prompt.setOpenExternalLinks(True)
prompt.setStyleSheet(self.STYLESHEET)
self._layout.addWidget(prompt)
self._layout.addWidget(HorizontalLine())
return radio
def _create_libdmtx(self, settings):
radio = QRadioButton('My objects are labelled with Data Matrix barcodes')
radio.setChecked('libdmtx' == settings['engine'])
radio.setEnabled(libdmtx_available())
self._layout.addWidget(radio)
prompt = QLabel(HTML_LINK_TEMPLATE.format(
'Barcodes will be decoded using the open-source '
'<a href="http://libdmtx.sourceforge.net/">libdmtx</a> library'
))
prompt.setOpenExternalLinks(True)
prompt.setStyleSheet(self.STYLESHEET)
self._layout.addWidget(prompt)
self._layout.addWidget(HorizontalLine())
return radio
def _create_inlite(self, settings):
radio = QRadioButton(
'Either my objects are labelled with a barcode not listed above '
'or I would like the performance and reliability of a commercial '
'library')
radio.setChecked('inlite' == settings['engine'])
radio.setEnabled(inlite_available())
self._layout.addWidget(radio)
prompt = QLabel(HTML_LINK_TEMPLATE.format(
'Only available on Windows. '
'Visit <a href="http://www.inliteresearch.com/">Inlite Research</a> '
'to download and install Inlite Research\'s ClearImage library.'
))
prompt.setWordWrap(True)
prompt.setOpenExternalLinks(True)
prompt.setStyleSheet(self.STYLESHEET)
self._layout.addWidget(prompt)
prompt = QLabel('My objects are labelled with:')
format = settings['inlite-format']
radio_1d = QRadioButton('1D barcodes')
radio_1d.setChecked('1d' == format)
radio_datamatrix = QRadioButton('Data Matrix barcodes')
radio_datamatrix.setChecked('datamatrix' == format)
radio_pdf417 = QRadioButton('PDF 417 barcodes')
radio_pdf417.setChecked('pdf417' == format)
radio_qr = QRadioButton('QR codes')
radio_qr.setChecked('qrcode' == format)
layout = QVBoxLayout()
layout.addWidget(prompt)
layout.addWidget(radio_1d)
layout.addWidget(radio_datamatrix)
layout.addWidget(radio_pdf417)
layout.addWidget(radio_qr)
group = QWidget()
group.setLayout(layout)
group.setStyleSheet(self.STYLESHEET)
radio.toggled.connect(group.setEnabled)
group.setEnabled(inlite_available() and 'inlite' == settings['engine'])
self._layout.addWidget(group)
return radio, radio_1d, radio_datamatrix, radio_pdf417, radio_qr
def done(self, r):
"""QDialog virtual
"""
debug_print('BarcodeDialog.done', r)
# Necessary to avoid core dump on process exit
self._buttons.accepted.disconnect()
self._buttons.rejected.disconnect()
self._radio_inlite.toggled.disconnect()
super(BarcodeDialog, self).done(r)
def accept(self):
"""QDialog virtual
"""
debug_print('BarcodeDialog.accept')
super(BarcodeDialog, self).accept()
if self._radio_zbar.isChecked():
engine = 'zbar'
elif self._radio_libdmtx.isChecked():
engine = 'libdmtx'
else:
engine = 'inlite'
if self._inlite_1d.isChecked():
format = '1d'
elif self._inlite_datamatrix.isChecked():
format = 'datamatrix'
elif self._inlite_pdf417.isChecked():
format = 'pdf417'
else:
format = 'qrcode'
update_settings({'engine': engine, 'inlite-format': format})
|
NaturalHistoryMuseum/inselect
|
inselect/gui/plugins/barcode_dialog.py
|
Python
|
bsd-3-clause
| 6,021
|
[
"VisIt"
] |
49b8fb7e8a95031cb3998bcdf1b42b54577bc0e8fa331f1c93c41c11f9cb79ea
|
import cPickle as pickle
import pandas as pd
from trees import Tree
import csv, re
from matplotlib_venn import venn2
import matplotlib.pyplot as plt
from copy import deepcopy
import numpy as np
import seaborn as sb
from collections import defaultdict
from cobra.io.sbml import create_cobra_model_from_sbml_file
from cobra.manipulation.modify import convert_to_irreversible
from cobra.flux_analysis.parsimonious import optimize_minimal_flux
ppath = "../../proteomics-collection/"
proteomics = pd.DataFrame.from_csv(ppath+"meta_abundance[copies_fL].csv")
pFBA = pd.DataFrame.from_csv("../data/flux[mmol_gCDW_h]_projected.csv")
pFVA = pd.DataFrame.from_csv("../data/flux_variability_[mmol_gCDW_h].csv", header=[0,1]).T
protein_info = pd.read_csv('../data/protein_abundance_info.csv', sep='\t')
gc = pd.DataFrame.from_csv("../data/carbon_sources.csv")
#gc = gc[gc.reference=='Schmidt et al. 2015']
gr = gc['growth rate [h-1]'][gc.index]
fL_cell = gc['single cell volume [fL]'] /2 # fL (cell volumes are overestimated by a factor of 1.7)
fg_cell_old = pd.read_csv('../data/protein_abundance_[fg_cell].csv')
copies_cell_persist = pd.read_csv('../data/protein_abundance_persistors[copies_cell].csv')
model = create_cobra_model_from_sbml_file('../data/iJO1366.xml')
convert_to_irreversible(model)
rxns = {r.id:r for r in model.reactions}
def map_proteomics(df):
uni_to_b = {row[48:54]:row[0:5].split(';')[0].strip()
for row in open("../data/all_ecoli_genes.txt", 'r')}
df.replace(to_replace={'UPID':uni_to_b}, inplace=True)
manual_replacememnts = {
'D0EX67':'b1107',
'D4HZR9':'b2755',
'P00452-2':'b2234',
'P02919-2':'b0149',
'Q2A0K9':'b2011',
'Q5H772':'b1302',
'Q5H776':'b1298',
'Q5H777':'b1297',
'Q6E0U3':'b3183'}
df.replace(to_replace={'upid':manual_replacememnts}, inplace=True)
df.set_index('upid', inplace=True)
df.index.name = 'bnumber'
not_identified = ['B8LFD5','D8FH86','D9IX93','E1MTY0','P0CE60','P23477']
df.drop(not_identified, axis=0, inplace=True)
df.sort_index(inplace=True)
def genes_by_function(name):
tree = Tree.FromTMS(open('../data/KO_gene_hierarchy_general.tms', 'r'), 4)
f_KEGG = tree.GetNode(name).children
reader = csv.reader(open('../data/eco_mapping.csv', 'r'), delimiter='\t')
b_to_KEGG = {row[0]:row[2] for row in reader}
return {b for b,ko in b_to_KEGG.iteritems() if ko in f_KEGG}
def convert_copies_fL_to_mmol_gCDW(copies_fL):
rho = 1100 # average cell density gr/liter
DW_fraction = 0.3 # fraction of DW of cells
Avogadro = 6.02214129 # Avogadro's number "exponent-less"
mmol_L = copies_fL / (Avogadro*1e5)
mmol_gCDW = mmol_L / (rho * DW_fraction)
return mmol_gCDW
def convert_mmol_gCDW_to_mg_gCDW(mmol_gCDW):
protein_info = pd.DataFrame.from_csv('../data/ecoli_genome_info.tsv', sep='\t')
protein_g_mol = protein_info['molecular_weight[Da]']
mg_gCDW = mmol_gCDW.mul(protein_g_mol,axis=0)
mg_gCDW.replace(np.nan, 0, inplace=True)
return mg_gCDW
def get_complex_molecular_weight(model):
complexes = pd.DataFrame.from_csv('../data/enzyme_complexes.csv')
comp = list(complexes['Gene composition'].values)
comp = [dict(zip(re.findall(r"b[0-9]+", s),re.findall(r"\(([0-9]+)\)", s))) for s in comp]
protein_info = pd.DataFrame.from_csv('../data/ecoli_genome_info.tsv', sep='\t')
protein_g_mol = protein_info['molecular_weight[Da]']
all_genes = defaultdict(list)
for s in comp:
for k,v in s.iteritems():
all_genes[k].append(float(v))
for bnumber in protein_g_mol.index:
if bnumber not in all_genes.keys():
all_genes[bnumber].append(1.0)
subunit_comp = {k:np.mean(v) for k,v in all_genes.iteritems()}
r_to_weights = {}
for r in model.reactions:
isozymes = r.gene_reaction_rule.split('or')
isozymes = [re.findall(r"b[0-9]+", iso) for iso in isozymes]
weights = [sum([subunit_comp[b]*protein_g_mol[b] if b in protein_g_mol.index else np.nan
for b in iso]) for iso in isozymes]
r_to_weights[r.id] = np.mean(weights)
return r_to_weights
def convert_copies_fL_to_mg_gCDW(E):
tmp = convert_copies_fL_to_mmol_gCDW(E)
return convert_mmol_gCDW_to_mg_gCDW(tmp)
def get_umol_gCDW_min_from_pFVA(pFVA):
conds = pFVA.index.levels[0]
x = pFVA.loc[[(c, 'maximum') for c in conds]]
x.set_index(conds, inplace=True)
x = x[x>1e-10]
return (x * 1000) / 60
def gene_to_flux_carrying_rxns(V,model,use_cache=False):
if use_cache:
with open('../cache/genes_to_flux_carrying_reactions.p', 'rb') as fp:
return pickle.load(fp)
out = {}
for c in V.columns:
out[c] = {}
vc = V[c]
vc = vc[vc>0]
for g in model.genes:
rxns = {r.id for r in list(g.reactions)} & set(vc.index)
if len(rxns)>0:
out[c][g.id] = rxns
with open('../cache/genes_to_flux_carrying_reactions.p', 'wb') as fp:
pickle.dump(out, fp)
return out
def convert_SA_to_kcat(SA, MW):
# MW in units of kDa
return SA.mul(MW) / 60
def flux_carrying_reactions_to_enzymes(V,E,model,use_cache=False):
if use_cache:
with open('../cache/flux_carrying_reactions_to_enzymes.p', 'rb') as fp:
return pickle.load(fp)
try:
V = V.drop('flux_counter')
except ValueError:
print "flux couter already removed"
mapper = {}
for c in V.columns:
mapper[c] = {}
#use only flux carrying reactions in a given condition
vc = V[c]
vc = vc[vc>0]
reactions = map(str,model.reactions)
for rid in vc.index:
if rid not in reactions:
continue
r = model.reactions.get_by_id(rid)
genes = {g.id:g for g in r.genes}
# annoing gene in the model - just ignore the reaction it carries
if 's0001' in genes: continue
mapper[c][r.id] = {}
for i, (gid, g) in enumerate(genes.iteritems()):
rxns = {r.id for r in list(g.reactions)} & set(vc.index)
mapper[c][rid][gid] = float(len(rxns))
with open('../cache/flux_carrying_reactions_to_enzymes.p', 'wb') as fp:
pickle.dump(mapper, fp)
return mapper
def specific_activity(V,E,model):
mapper = flux_carrying_reactions_to_enzymes(V,E,model)
V = V.to_dict()
E = E.to_dict()
SA = {}
for c,reactions in V.iteritems():
SA[c] = {}
for r,v in reactions.iteritems():
if r in mapper[c]:
genes = mapper[c][r]
abundance = E[c]
weight = sum([abundance[e] / genes[e] for e in genes])
if np.isfinite(weight) and weight > 0:
SA[c][r] = V[c][r] / weight
else:
SA[c][r] = np.nan
SA = pd.DataFrame.from_dict(SA)
return SA
def enzyme_capacity_usage(SA):
kmax = SA.max(axis=1)
return SA.div(kmax,axis=0)
def metabolic_capacity(V,E,model):
tmp = gene_to_flux_carrying_rxns(V,model)
capacity = pd.Series({c:E.loc[tmp[c].keys()][c].sum() for c in V.columns})
return capacity
def metabolic_capacity_usage(V,E,model):
capacity = metabolic_capacity(V,E,model)
SA = specific_activity(V,E,model)
ECU = enzyme_capacity_usage(SA)
E = (V/SA).loc[SA.index]
return (ECU.mul(E)).sum() / capacity
def bootstrap_capacity_usage_error(V,E,model,iterations=10):
UC = pd.DataFrame(index=range(iterations),columns=V.columns)
for i in xrange(iterations):
newE = pd.DataFrame(index=E.index, columns=E.columns)
for c in V.columns:
x = E[c]
x = x[x>0]
rand = np.random.choice(x.values, len(x), replace=True)
newE[c][x.index] = rand
newE.replace(np.nan, 0, inplace=True)
UC.loc[i] = get_capacity_usage(V,newE,model)
return UC.std()
#def get_foldchange(V,E,gc):
#
# gr = gc['growth rate [h-1]']
#
# combs_all = [(i,j) for (i,j) in combinations(gc.index, 2) if gr[j] > gr[i]]
# delta_mu = pd.Series(data = map(lambda x: np.log2(gr[x[1]]/gr[x[0]]), combs_all),
# index = combs_all)
# delta_p = pd.DataFrame(index=reactions, columns=combs)
# delta_v = pd.DataFrame(index=reactions, columns=combs)
# for (i, j) in combs:
# delta_p[(i,j)] = np.log2(p[j] / p[i])
# delta_v[(i,j)] = np.log2(v[j] / v[i])
# return delta_p, delta_v, delta_mu
def get_surface_to_volume_ratio(length,width):
# cylinder + sphere
volume = np.pi*(length-width)*(width/2)**2 + 4/3*np.pi*(width/2)**3# um^3
surface = 2*np.pi*(length-width)*(width/2) + 4*np.pi*(width/2)**2# um^2
return surface, volume, surface/volume
def optimize_growth(model, cs):
rxns = {r.id:r for r in model.reactions}
rxns['EX_glc_e'].lower_bound = 0 # uptake of carbon source reaction is initialized
try:
rxns['EX_' + cs + '_e'].lower_bound = -1000 # redefine sole carbon source uptake reaction in mmol/gr/h
except KeyError:
print "%s is not in the model, using glucose instead" %cs
rxns['EX_glc_e'].lower_bound = -1000
rxns['Ec_biomass_iJO1366_core_53p95M'].objective_coefficient = 0
rxns['Ec_biomass_iJO1366_WT_53p95M'].objective_coefficient = 1
model.optimize()
return
def get_maximal_growth_rate(model, Vmax, condition):
Vmax = Vmax[condition].copy()
Vmax = Vmax.dropna()
Vmax = Vmax * 60 / 1000 # convert to units of mmol/gCDW/h
rxns = {r.id:r for r in model.reactions}
initial_bound = {}
for r in Vmax.index:
initial_bound[rxns[r]] = rxns[r].upper_bound
rxns[r].upper_bound = Vmax[r]
optimize_growth(model, gc['media_key'][condition])
for r,ub in initial_bound.iteritems():
r.upper_bound = ub
return model.solution.f
def get_rand_ECU(ECU,model):
reactions = [str(r) for r in model.reactions]
conds = ECU.columns
rand_ECU = pd.DataFrame(columns=conds, index=reactions)
for c in conds:
tmp = ECU[c].dropna()
rand_ECU[c] = np.random.gamma(tmp.mean(),tmp.std(),len(reactions))
return rand_ECU
def perform_pFBA(condition):
cs = gc['media_key'].loc[condition]
gr = gc['growth rate [h-1]'].loc[condition]
m = create_cobra_model_from_sbml_file('../data/iJO1366.xml')
m.reactions.get_by_id('EX_glc_e').lower_bound = 0
convert_to_irreversible(m)
reac = dict([(r.id, r) for r in m.reactions])
try:
reac['EX_' + cs + '_e'].lower_bound = -1000 # redefine sole carbon source uptake reaction in mmol/gr/h
except KeyError:
raise 'media key not in model'
reac['Ec_biomass_iJO1366_core_53p95M'].objective_coefficient = 0
reac['Ec_biomass_iJO1366_WT_53p95M'].objective_coefficient = 1
reac['Ec_biomass_iJO1366_WT_53p95M'].upper_bound = gr
print "solving pFBA for %s" %condition
optimize_minimal_flux(m, already_irreversible=True)
return pd.Series(m.solution.x_dict)
#conditions = gc.dropna(subset=['media_key']).index
#gr = gc['growth rate [h-1]'][conditions]
#gr.sort()
#conditions = gr.index
#
#mmol_gCDW_h = pd.DataFrame(columns=conditions, index=rxns.keys())
#for c in conditions:
# mmol_gCDW_h[c] = perform_pFBA(c)
#mmol_gCDW_h.to_csv("../data/flux[mmol_gCDW_h].csv")
#
#if __name__ == "__main__":
# model_fname = "../data/iJO1366.xml"
# model = create_cobra_model_from_sbml_file(model_fname)
# convert_to_irreversible(model)
# reactions = map(lambda x: x.id, model.reactions)
# fluxes = perform_pFBA(model, 'glc', 0.5, 18.5)
'''
x = x.dropna()
w = w.dropna()
ix = x.index & w.index
x = x[ix].values
w = w[ix].values
Mw = np.zeros(1000)
for i in xrange(1000):
rand = np.random.choice(range(len(x)), len(x), replace=True)
newx = x[rand]
neww = w[rand]
Mw[i] = sum(newx*neww)/sum(neww)
return np.std(Mw)
'''
# print len(fva.keys())
# return fva
#map_proteomics(copies_cell_persist)
#map_proteomics(protein_info)
#map_proteomics(fg_cell_old)
#
#x = copies_cell_persist[new_conditions]
#y = copies_cell_persist['Protein molecular weight']
#fg_cell_persist = x.mul(y,axis=0) / (6.022*1e8)
#
#fg_cell = fg_cell_old.join(fg_cell_persist, how='outer')
#fg_fL = fg_cell.div(fL_cell)
#
#mg_gCDW = fg_fL[gr.index]/(1100/3)*1000 # cell density is 1100 g/L; DW fraction is 1/3
##mg_gCDW.to_csv('../data/mg_gCDW.csv')
##
#out = protein_info.join(mg_gCDW)
#out.to_csv('../data/protein_abundance[mg_gCDW].csv', sep='\t')
#plt.figure()
#ax = plt.axes()
#old = fg_cell_old.index
#new = copies_cell_persist.index
#venn2([old, new], set_labels=('Schmidt et al.', 'Persisters'),set_colors=('#4a6b8a','#801515'),ax=ax)
#plt.tight_layout()
#plt.savefig('../res/comparing coverage.svg')
|
dandanvidi/in-vivo-enzyme-kinetics
|
scripts/helper.py
|
Python
|
mit
| 13,207
|
[
"Avogadro"
] |
5de2d161a3028c71d81edd85d57833203a474d258fb8f1826cd0a6e366f2dd29
|
#!/usr/bin/env python
#
# parse_pdb_header.py
# parses header of PDB files into a python dictionary.
# emerged from the Columba database project www.columba-db.de.
#
# author: Kristian Rother
#
# license: same as BioPython, read LICENSE.TXT from current BioPython release.
#
# last modified: 9.2.2004
#
# Added some small changes: the whole PDB file is not read in anymore, but just
# until the first ATOM record (faster). I also split parse_pdb_header into
# parse_pdb_header and parse_pdb_header_list, because parse_pdb_header_list
# can be more easily reused in PDBParser.
#
# Thomas, 19/03/04
#
# Renamed some clearly private functions to _something (ie. parse_pdb_header_list
# is now _parse_pdb_header_list)
# Thomas 9/05/04
"""Parse the header of a PDB file."""
from __future__ import print_function
import re
from Bio import File
def _get_journal(inl):
# JRNL AUTH L.CHEN,M.DOI,F.S.MATHEWS,A.Y.CHISTOSERDOV, 2BBK 7
journal = ""
for l in inl:
if re.search("\AJRNL", l):
journal += l[19:72].lower()
journal = re.sub("\s\s+", " ", journal)
return journal
def _get_references(inl):
# REMARK 1 REFERENCE 1 1CSE 11
# REMARK 1 AUTH W.BODE,E.PAPAMOKOS,D.MUSIL 1CSE 12
references = []
actref = ""
for l in inl:
if re.search("\AREMARK 1", l):
if re.search("\AREMARK 1 REFERENCE", l):
if actref != "":
actref = re.sub("\s\s+", " ", actref)
if actref != " ":
references.append(actref)
actref = ""
else:
actref += l[19:72].lower()
if actref != "":
actref = re.sub("\s\s+", " ", actref)
if actref != " ":
references.append(actref)
return references
# bring dates to format: 1909-01-08
def _format_date(pdb_date):
"""Converts dates from DD-Mon-YY to YYYY-MM-DD format."""
date = ""
year = int(pdb_date[7:])
if year < 50:
century = 2000
else:
century = 1900
date = str(century + year) + "-"
all_months = ['xxx', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul',
'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
month = str(all_months.index(pdb_date[3:6]))
if len(month) == 1:
month = '0' + month
date = date + month + '-' + pdb_date[:2]
return date
def _chop_end_codes(line):
"""Chops lines ending with ' 1CSA 14' and the like."""
return re.sub("\s\s\s\s+[\w]{4}.\s+\d*\Z", "", line)
def _chop_end_misc(line):
"""Chops lines ending with ' 14-JUL-97 1CSA' and the like."""
return re.sub("\s\s\s\s+.*\Z", "", line)
def _nice_case(line):
"""Makes A Lowercase String With Capitals."""
l = line.lower()
s = ""
i = 0
nextCap = 1
while i < len(l):
c = l[i]
if c >= 'a' and c <= 'z' and nextCap:
c = c.upper()
nextCap = 0
elif c == ' ' or c == '.' or c == ',' or c == ';' or c == ':' or c == '\t' or\
c == '-' or c == '_':
nextCap = 1
s += c
i += 1
return s
def parse_pdb_header(infile):
"""
Returns the header lines of a pdb file as a dictionary.
Dictionary keys are: head, deposition_date, release_date, structure_method,
resolution, structure_reference, journal_reference, author and
compound.
"""
header = []
with File.as_handle(infile, 'r') as f:
for l in f:
record_type = l[0:6]
if (record_type == 'ATOM ' or record_type == 'HETATM' or
record_type == 'MODEL '):
break
else:
header.append(l)
return _parse_pdb_header_list(header)
def _parse_pdb_header_list(header):
# database fields
dict = {'name': "",
'head': '',
'deposition_date': "1909-01-08",
'release_date': "1909-01-08",
'structure_method': "unknown",
'resolution': 0.0,
'structure_reference': "unknown",
'journal_reference': "unknown",
'author': "",
'compound': {'1': {'misc': ''}}, 'source': {'1': {'misc': ''}}}
dict['structure_reference'] = _get_references(header)
dict['journal_reference'] = _get_journal(header)
comp_molid = "1"
src_molid = "1"
last_comp_key = "misc"
last_src_key = "misc"
for hh in header:
h = re.sub("[\s\n\r]*\Z", "", hh) # chop linebreaks off
# key=re.sub("\s.+\s*","",h)
key = h[:6].strip()
# tail=re.sub("\A\w+\s+\d*\s*","",h)
tail = h[10:].strip()
# print("%s:%s" % (key, tail)
# From here, all the keys from the header are being parsed
if key == "TITLE":
name = _chop_end_codes(tail).lower()
if 'name' in dict:
dict['name'] += " " + name
else:
dict['name'] = name
elif key == "HEADER":
rr = re.search("\d\d-\w\w\w-\d\d", tail)
if rr is not None:
dict['deposition_date'] = _format_date(_nice_case(rr.group()))
head = _chop_end_misc(tail).lower()
dict['head'] = head
elif key == "COMPND":
tt = re.sub("\;\s*\Z", "", _chop_end_codes(tail)).lower()
# look for E.C. numbers in COMPND lines
rec = re.search('\d+\.\d+\.\d+\.\d+', tt)
if rec:
dict['compound'][comp_molid]['ec_number'] = rec.group()
tt = re.sub("\((e\.c\.)*\d+\.\d+\.\d+\.\d+\)", "", tt)
tok = tt.split(":")
if len(tok) >= 2:
ckey = tok[0]
cval = re.sub("\A\s*", "", tok[1])
if ckey == 'mol_id':
dict['compound'][cval] = {'misc': ''}
comp_molid = cval
last_comp_key = "misc"
else:
dict['compound'][comp_molid][ckey] = cval
last_comp_key = ckey
else:
dict['compound'][comp_molid][last_comp_key] += tok[0] + " "
elif key == "SOURCE":
tt = re.sub("\;\s*\Z", "", _chop_end_codes(tail)).lower()
tok = tt.split(":")
# print(tok)
if len(tok) >= 2:
ckey = tok[0]
cval = re.sub("\A\s*", "", tok[1])
if ckey == 'mol_id':
dict['source'][cval] = {'misc': ''}
comp_molid = cval
last_src_key = "misc"
else:
dict['source'][comp_molid][ckey] = cval
last_src_key = ckey
else:
dict['source'][comp_molid][last_src_key] += tok[0] + " "
elif key == "KEYWDS":
kwd = _chop_end_codes(tail).lower()
if 'keywords' in dict:
dict['keywords'] += " " + kwd
else:
dict['keywords'] = kwd
elif key == "EXPDTA":
expd = _chop_end_codes(tail)
# chop junk at end of lines for some structures
expd = re.sub('\s\s\s\s\s\s\s.*\Z', '', expd)
# if re.search('\Anmr',expd,re.IGNORECASE): expd='nmr'
# if re.search('x-ray diffraction',expd,re.IGNORECASE): expd='x-ray diffraction'
dict['structure_method'] = expd.lower()
elif key == "CAVEAT":
# make Annotation entries out of these!!!
pass
elif key == "REVDAT":
rr = re.search("\d\d-\w\w\w-\d\d", tail)
if rr is not None:
dict['release_date'] = _format_date(_nice_case(rr.group()))
elif key == "JRNL":
# print("%s:%s" % (key, tail))
if 'journal' in dict:
dict['journal'] += tail
else:
dict['journal'] = tail
elif key == "AUTHOR":
auth = _nice_case(_chop_end_codes(tail))
if 'author' in dict:
dict['author'] += auth
else:
dict['author'] = auth
elif key == "REMARK":
if re.search("REMARK 2 RESOLUTION.", hh):
r = _chop_end_codes(re.sub("REMARK 2 RESOLUTION.", '', hh))
r = re.sub("\s+ANGSTROM.*", "", r)
try:
dict['resolution'] = float(r)
except:
# print('nonstandard resolution %r' % r)
dict['resolution'] = None
else:
# print(key)
pass
if dict['structure_method'] == 'unknown':
if dict['resolution'] > 0.0:
dict['structure_method'] = 'x-ray diffraction'
return dict
if __name__ == '__main__':
# Reads a PDB file passed as argument, parses its header, extracts
# some data and returns it as a dictionary.
import sys
filename = sys.argv[1]
with open(filename, 'r') as handle:
data_dict = parse_pdb_header(handle)
# print the dictionary
for k, y in data_dict.items():
print("-" * 40)
print(k)
print(y)
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/PDB/parse_pdb_header.py
|
Python
|
apache-2.0
| 9,173
|
[
"Biopython"
] |
412c51c82f83c2613bdff640665a321454cce8410366535fe13caa8ba4c64785
|
import matplotlib as mpl
mpl.use('Agg')
import math
import matplotlib.pyplot as plt
import numpy as np
import scipy.special
import pdb
import pickle
import sys
np.set_printoptions(threshold='nan')
num_bins = 100
torsion_r48_a = pickle.load(open( "torsion.p", "rb" ))
plt.figure(1)
(n1,bins1,patch1) = plt.hist(torsion_r48_a, num_bins, label='AlkEthOH_r48 histogram', color='green', normed=1)
n1 = np.array(n1)
#for i,j in enumerate(n1):
# if j == 0:
# n1[i] = n1[i+1]
#print n1
ntotal = len(torsion_r48_a)
# figure out what the width of the kernel density is.
# the "rule-of-thumb" estimator used std, but that is for gaussian. We should use instead
# the stdev of the Gaussian-like features. Playing around with what it looks like, then something
# like 12 degress as 2 sigma? So sigma is about degrees = 3/360 * 2*pi = 0.0524
# this gives a relatively smooth PMF, without smoothing too much.
# this will of course depend on the temperature the simulation is run at.
sd = 1.06*0.0524*ntotal**(-0.2)
# create a fine grid
ngrid = 10000
kT = 0.6 # Units of kcal/mol (simulation temp was 300 K)
x = np.arange(-np.pi,np.pi,(2*np.pi)/ngrid)
y = np.zeros(ngrid)
# Easier to use a von Mises distribution than a wrapped Gaussian.
denom = 2*np.pi*scipy.special.iv(0,1/sd)
for a in torsion_r48_a:
y += np.exp(np.cos(x-a)/sd)/denom
y /= ntotal
plt.plot(x,y,label = 'kernel density estimate (KDE)')
plt.title('comparison between histogram and (KDE)')
plt.xlabel('x (radians)')
plt.ylabel('P(x)')
plt.legend()
plt.savefig('KDE.png')
pmf = -kT*np.log(y)
pmf1 = -kT*np.log(n1) # now we have the PMF
pmf1[pmf1==np.inf]=0
bins1 = np.array(bins1)
#(n1,bins1,patch1) = plt.hist(torsion_r48_a, num_bins, label='AlkEthOH_r48 histogram', color='green', normed=1)
plt.figure()
plt.hist(bins1[1:],len(bins1[1:]),weights=pmf1,label='non-smooth pmf')
plt.plot(x,pmf,label='smooth pmf')
plt.xlabel('x (radians)')
plt.ylabel('Potential of Mean Force (kT)')
plt.legend()
plt.title('Comparison of smoothed and unsmoothed pmf')
plt.savefig('PMF_smooth_vs_nonsmooth.png')
# Directly calculate fourier coefficients
# http://mathworld.wolfram.com/FourierSeries.html
# a0 = (1/pi)*integral(f(x) dx)|-inf to inf
# an = (1/pi)*integral(f(x) cos(nx) dx)|-inf to inf
# bn = (1/pi)*integral(f(x) sin(nx) dx)|-inf to inf
a0 = (1/np.pi)*sum(np.diff(bins1)*pmf1)
a_vals = [(1/np.pi)*sum(np.diff(bins1)*pmf1*np.cos(n*bins1[1:])) for n in np.arange(1,7)]
b_vals = [(1/np.pi)*sum(np.diff(bins1)*pmf1*np.sin(n*bins1[1:])) for n in np.arange(1,7)]
# adapted from http://stackoverflow.com/questions/4258106/how-to-calculate-a-fourier-series-in-numpy
# complex fourier coefficients
def cn(n,y):
c = y*np.exp(-1j*n*x)
return c.sum()/c.size
def ft(x, cn, Nh):
f = np.array([cn[i]*np.exp(1j*i*x) for i in range(1,Nh+1)])
return f.sum()+cn[0]
# generate Fourier series (complex)
Ns = 24 # needs to be adjusted
cf = np.zeros(Ns+1,dtype=complex)
for i in range(Ns+1):
cf[i] = cn(i,pmf)
cfdist = np.zeros(Ns+1,dtype=complex)
for i in range(Ns+1):
cfdist[i] = cn(i,y)
print cfdist
y1 = np.array([ft(xi,cf,Ns).real for xi in x]) # plot the fourier series approximation.
plt.figure(2)
plt.plot(x,pmf, label='pmf')
plt.plot(x,y1, label='Fourier transform')
plt.title('comparison between PMF and Fourier Transform')
plt.legend()
plt.xlabel('x (radians)')
plt.ylabel('Potential of Mean Force (kT)')
plt.savefig('PMFfitFourier.png')
y1dens = np.array([ft(xi,cfdist,Ns).real for xi in x]) # plot the fourier series approximation.
print y1dens
plt.figure(7)
plt.plot(x,y, label='Density')
plt.plot(x,y1dens, label='Fourier transform')
plt.title('comparison between Density and Fourier Transform')
plt.legend()
plt.xlabel('x (radians)')
plt.ylabel('Density function (relative likelihood)')
plt.savefig('DensityfitFourier.png')
sys.exit()
# OK, Fourier series works pretty well. But we actually want to do a
# linear least square fit to a fourier series, since we want to get
# the coefficients out. Let's use the standard LLS formulation with
# normal equations.
# http://www.math.uconn.edu/~leykekhman/courses/MATH3795/Lectures/Lecture_9_Linear_least_squares_SVD.pdf
# basis functions are 1, sin(x), cos(x), sin(2x), cos(2x), . . .
Z = np.ones([len(x),2*Ns+1])
for i in range(1,Ns+1):
Z[:,2*i-1] = np.sin(i*x)
Z[:,2*i] = np.cos(i*x)
ZM = np.matrix(Z) # easier to manipulate as a matrix
[U,S,V] = np.linalg.svd(ZM) # perform SVD - S has an interesting shape, is just 1, sqrt(2), sqrt(2). Probably has
# to do with the normalization. Still need V and U, though.
Sinv = np.matrix(np.zeros(np.shape(Z))).transpose() # get the inverse of the singular matrix.
for i in range(2*Ns+1):
Sinv[i,i] = 1/S[i]
cm = V.transpose()*Sinv*U.transpose()*np.matrix(pmf).transpose() # get the linear constants
cmdens = V.transpose()*Sinv*U.transpose()*np.matrix(y).transpose() # get the linear constants
cl = np.array(cm) # cast back to array for plotting
cldens = np.array(cmdens)
# check that it works by plotting
y2 = cl[0]*np.ones(len(x))
ans = []
bns = []
for i in range(1,Ns+1):
y2 += cl[2*i-1]*np.sin(i*x)
y2 += cl[2*i]*np.cos(i*x)
ans.append(cl[2*i])
bns.append(cl[2*i-1])
y2_dens = cldens[0]*np.ones(len(x))
ans_dens = []
bns_dens = []
for i in range(1,Ns+1):
y2_dens += cldens[2*i-1]*np.sin(i*x)
y2_dens += cldens[2*i]*np.cos(i*x)
ans_dens.append(cldens[2*i])
bns_dens.append(cldens[2*i-1])
#How different are the coeficients by the two methods?
print "Difference between Fourier series and linear fit to finite Fourier"
print "index Four Fit Diff"
for i in range(2*Ns+1):
if i==0:
cfp = cf[i].real
elif i%2==0:
cfp = 2*cf[i/2].real
elif i%2==1:
cfp = -2*cf[(i+1)/2].imag
print "{:3d} {:10.5f} {:10.5f} {:10.5f}".format(i,cfp,float(cl[i]),cfp-float(cl[i]))
print "Looks like they are the same!"
plt.figure(3)
plt.plot(x,pmf,label='pmf')
plt.plot(x,y2,label='LLS fit')
plt.title('Comparison between PMF and linear least squares fit')
plt.xlabel('x (radians)')
plt.ylabel('Potential of Mean Force (kT)')
plt.legend()
plt.savefig('PMFfitLLS.png')
plt.figure(5)
plt.plot(x,y,label='Density KDE')
plt.plot(x,y2_dens,label='LLS fit')
plt.title('Comparison between KDE Density and linear least squares fit')
plt.xlabel('x (radians)')
plt.ylabel('Density (relative likelihood)')
plt.legend()
plt.savefig('DensityfitLLS.png')
#Compare the LLS and the fourier transform directly.
plt.figure(4)
plt.plot(x,y1,label='Fouier')
plt.plot(x,y2,label='LLS fit')
plt.title('Comparison between Fourier and finite linear least squares fit')
plt.xlabel('x (radians)')
plt.ylabel('Potential of Mean Force (kT)')
plt.legend()
plt.savefig('Fourier_vs_LLS.png')
#Compare the LLS and the fourier transform directly.
plt.figure(6)
plt.plot(x,y1dens,label='Fouier')
plt.plot(x,y2_dens,label='LLS fit')
plt.title('Comparison between Fourier and finite linear least squares fit')
plt.xlabel('x (radians)')
plt.ylabel('Density (relative likelihood)')
plt.legend()
plt.savefig('Fourier_vs_LLS_density.png')
# determine the covariance matrix for the fitting parameters
dev = pmf - np.array(ZM*cm).transpose()
residuals = np.sum(dev**2)
s2 = residuals /(len(pmf) - 2*Ns+1)
cov = s2*(V.transpose()*np.linalg.inv(np.diag(S**2))*V)
print "Covariance matrix is:"
print cov
print "seem to be no nonzero off-diagonal elements! Uncorrelated! Probably because of orthogonality of Fourier series."
'''
conclusion: there is no correlation, and a linear fit to a discrete
fourier series ends up being the same thing as the first N fourier coefficients
'''
|
bmanubay/open-forcefield-tools
|
single-molecule-property-generation/torsion_fitting/fitting_example.py
|
Python
|
mit
| 7,644
|
[
"Gaussian"
] |
cc7af362d49efcdf37085442c8a88c56b59103b074853fe08ca65e4684ad8bc3
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.