text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
This example demonstrates using Mayavi as a component of a large Qt
application.
For this use, Mayavi is embedded in a QWidget. To understand this
example, please read section :ref:`builing-applications`.
"""
# First, and before importing any Enthought packages, set the ETS_TOOLKIT
# environment variable to qt4, to tell Traits that we will use Qt.
import os
os.environ['ETS_TOOLKIT'] = 'wx'
# By default, the PySide binding will be used. If you want the PyQt bindings
# to be used, you need to set the QT_API environment variable to 'pyqt'
#os.environ['QT_API'] = 'pyqt'
# To be able to use PySide or PyQt4 and not run in conflicts with traits,
# we need to import QtGui and QtCore from pyface.qt
from pyface.qt import QtGui, QtCore
# Alternatively, you can bypass this line, but you need to make sure that
# the following lines are executed before the import of PyQT:
# import sip
# sip.setapi('QString', 2)
from traits.api import HasTraits, Instance, on_trait_change
from traitsui.api import View, Item
from mayavi.core.ui.api import MayaviScene, MlabSceneModel, \
SceneEditor
################################################################################
#The actual visualization
class Visualization(HasTraits):
scene = Instance(MlabSceneModel, ())
@on_trait_change('scene.activated')
def update_plot(self):
# This function is called when the view is opened. We don't
# populate the scene when the view is not yet open, as some
# VTK features require a GLContext.
# We can do normal mlab calls on the embedded scene.
self.scene.mlab.test_points3d()
# the layout of the dialog screated
view = View(Item('scene', editor=SceneEditor(scene_class=MayaviScene),
height=250, width=300, show_label=False),
resizable=True # We need this to resize with the parent widget
)
################################################################################
# The QWidget containing the visualization, this is pure PyQt4 code.
class MayaviQWidget(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
layout = QtGui.QVBoxLayout(self)
layout.setContentsMargins(0,0,0,0)
layout.setSpacing(0)
self.visualization = Visualization()
# If you want to debug, beware that you need to remove the Qt
# input hook.
#QtCore.pyqtRemoveInputHook()
#import pdb ; pdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# The edit_traits call will generate the widget to embed.
self.ui = self.visualization.edit_traits(parent=self,
kind='subpanel').control
layout.addWidget(self.ui)
self.ui.setParent(self)
if __name__ == "__main__":
# Don't create a new QApplication, it would unhook the Events
# set by Traits on the existing QApplication. Simply use the
# '.instance()' method to retrieve the existing one.
app = QtGui.QApplication.instance()
container = QtGui.QWidget()
container.setWindowTitle("Embedding Mayavi in a PyQt4 Application")
# define a "complex" layout to test the behaviour
layout = QtGui.QGridLayout(container)
# put some stuff around mayavi
label_list = []
for i in range(3):
for j in range(3):
if (i==1) and (j==1):continue
label = QtGui.QLabel(container)
label.setText("Your QWidget at (%d, %d)" % (i,j))
label.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter)
layout.addWidget(label, i, j)
label_list.append(label)
mayavi_widget = MayaviQWidget(container)
layout.addWidget(mayavi_widget, 1, 1)
container.show()
window = QtGui.QMainWindow()
window.setCentralWidget(container)
window.show()
# Start the main event loop.
app.exec_()
|
zinka/arraytool_gui
|
misc/test.py
|
Python
|
bsd-3-clause
| 3,931
|
[
"Mayavi",
"VTK"
] |
dd6dedb04084073ddef2dd00b4335b94f94181609946165f7e6733c6c0f9e844
|
import numpy as np
import numpy.linalg as linalg
import matplotlib.pyplot as plt
import sys, os
import glob2
import os, sys
from tabulate import tabulate
# Information specific to this computation.
hr_filename = 'NiO_hr.dat'
out_suffix = '_hamiltonian.txt'
Efermi = 14.0853
# This reads the hamiltonian elements for a specific unit cell coordinate.
# Wannier90 reports them by unit cell (x,y,z, i,j) where (1,0,0, 1,1) is the overlap of
# wavefunction 1 in the 0,0,0 cell with wavefunction 1 translated by one unit cell in the x direction.
def GetOneCell(x,y,z):
hrtrim = hr[(hr[:,0]==x) & (hr[:,1]==y) & (hr[:,2]==z)]
hrtrim = hrtrim[:,3:]
H = np.zeros((num_MLWFs,num_MLWFs), dtype='complex')
for r in hrtrim[:]:
i = int(r[0])-1
j = int(r[1])-1
overlap = complex(r[2], r[3])
H[i, j] = overlap
return H
def PrintH(H):
OutStr = ''
for m in range(H.shape[0]):
# for n in range(H.shape[1]):
# OutStr += f'({float(np.real(H[m,n])):+8.3f}{float(np.real(H[m,n])):+8.3f}j) '
for n in range(H.shape[1]):
OutStr += f'{float(np.real(H[m,n])):+8.3f} '
OutStr += '\n'
for n in range(H.shape[1]):
OutStr += f'{float(np.imag(H[m,n])):+8.3f}j '
OutStr += '\n\n'
return OutStr
# Loop through all the subdirectories and proces all the hr_filename files.
G = glob2.glob('*/')
for g in G:
os.chdir(g)
print(f'Reading Hamiltonian in {g}')
with open(hr_filename, 'r') as f:
_ = f.readline() # The first line is just the date and time.
num_MLWFs = int(f.readline()) # How many wannier functions in the hamiltonian basis.
num_rpts = int(f.readline()) # How many Wigner-Seitz grid-points (unit cells) are listed.
# We will need to skip the first three lines plus the R points lines with 15 entries per line.
hamiltonian_header_lines = int(np.ceil(num_rpts/15)) + 3
# First read in the Hamiltonian file data.
hr = np.genfromtxt(hr_filename, skip_header=hamiltonian_header_lines)
OutStr = ''
# Print the eigenvalues for just the consideration of one unit cell.
H = GetOneCell(0,0,0)
OutStr += f'Single unit cell eigenvalues: \n{linalg.eig(H)[0]}'
# Now do the same but considering the 6 nearest neighbors.
H = GetOneCell(0,0,0)
H += GetOneCell(1,0,0)
H += GetOneCell(0,1,0)
H += GetOneCell(0,0,1)
H += GetOneCell(-1,0,0)
H += GetOneCell(0,-1,0)
H += GetOneCell(0,0,-1)
OutStr += f'\n\nCenter unit cell + 6 neareast neighbors eigenvalues: \n{linalg.eig(H)[0]}'
# Now build it using every element.
H = np.zeros((num_MLWFs,num_MLWFs), dtype='complex')
for r in hr[:]:
x = int(r[0])
y = int(r[1])
z = int(r[2])
i = int(r[3])-1
j = int(r[4])-1
overlap = complex(r[5], r[6])
H[i, j] += overlap
# Print out the energy levels from greatest to least,
# along with <psi_m|psi_n> so we can see what orbitals
# they represent.
E = linalg.eig(H)[0]
psi = linalg.eig(H)[1]
OutStr += '\n\nEigenvalues and eigenvectors across all cells:\n'
for i in range(len(E)):
OutStr += f'Energy = {float(np.real(E[i])):0.2f} eV\n'
# OutStr += f'Energy = {np.real(E[i]):0.2f} eV'
OutStr += f'psi=('
normval=np.abs(psi[i]).sum()
for j in range(len(E)):
OutStr += f'{np.real(psi[i][j])/normval:0.2f}, '
OutStr += ')\n'
_ = plt.hist(np.real(E)-Efermi, orientation='horizontal', bins=10)
plt.xlabel('Num States')
plt.ylabel('E$_{Fermi}$ - E')
plt.title('Eigenvalues')
plt.savefig(os.path.split(g)[0] + '_Eigenvalues.png')
OutStr += '\n\nUndiagonalized Hamiltonian Matrix:\n'
OutStr += PrintH(H)
print(OutStr)
with open(os.path.split(g)[0] + out_suffix, 'w') as outfile:
outfile.write(OutStr)
os.chdir('..')
|
ZGainsforth/QEScripts
|
Wannier/ComputeWannierHamiltonians.py
|
Python
|
mpl-2.0
| 3,938
|
[
"Wannier90"
] |
667c34c20cdc553bb5de8478781b19a49a3445a3491677a986d2f8123e532720
|
"""
Created on 6/05/2013
@author: thom
"""
import logging
import string
from rdkit.Chem import AllChem as Chem
from evaluator import Evaluator
from molecular_population import MolecularPopulation
from molecule import Molecule
class EvaluatorSummary(Evaluator):
def get_result_titles(self):
return ["Number of unique reactants", "Number of reactant types", "Maximum times a molecule was a reactant", "Length of longest molecule"]
def evaluate(self, results_filename, **kwargs):
"""Calculates some interesting statistics for the experimental run.
:rtype: Number of unique reactants, number of reactant types, maximum times a molecule was a reactant, length of longest molecule"""
results = Evaluator.load_results(results_filename)
population = MolecularPopulation(population=results['initial_population'], reactions=results['reactions'], size=100)
initial_population = population.get_slice_by_time([0])
initial_average_ke = results['initial_kinetic_energy'] / (initial_population.get_population_size() * 1.0)
final_average_ke = results['final_kinetic_energy'] / (population.get_population_size() * 1.0)
changing = len(population.get_changing_items())
changing_percent = (changing * 1.0 / len(population.get_items())) * 100.0
supplied_items = set(item for item in initial_population.get_items() if initial_population.get_quantity(item) > 0)
final_items = set(item for item in population.get_items() if population.get_quantity(item) > 0)
supplied_atom_count = sum([Molecule(i).GetNumAtoms() * initial_population.get_quantity(i) for i in supplied_items])
final_atom_count = sum([Molecule(i).GetNumAtoms() * population.get_quantity(i) for i in final_items])
iteration = collisions = 0
reactant_ids = {}
reactant_smiles = {}
keys = set()
for reaction in results['reactions']:
keys = keys.union(set(reaction.keys()))
# if ReactionNetwork._is_reaction(reaction):
for reactant in reaction['reactants']:
try:
reactant_ids[reactant['id']].append(iteration)
except:
reactant_ids[reactant['id']] = [iteration]
try:
reactant_smiles[reactant['smiles']].append(iteration)
except:
reactant_smiles[reactant['smiles']] = [iteration]
iteration += 1
assert iteration + collisions == len(results['reactions'])
logging.info("We began with {} types of items; there were {} active types at the end out of {} (both supplied and discovered) overall".format(
len(supplied_items), len(final_items), len(population.get_items())))
logging.info("The initial population size was {}; at the end, {}".format(initial_population.get_population_size(), population.get_population_size()))
logging.info("The initial average ke was {}; at the end, {}".format(initial_average_ke, final_average_ke))
logging.info("Approximately {:.2f}% of the item types ({}) changed quantity during the simulation, while {:.2f}% didn't change at all".format(
changing_percent, changing, 100 - changing_percent))
logging.info("The simulation ran for t={} and {} iterations ({} reactions and {} simple collisions)".format(results['t'], len(results['reactions']), iteration, collisions))
logging.info("Supplied atoms = {}, final atoms = {}".format(supplied_atom_count, final_atom_count))
if supplied_atom_count != final_atom_count:
logging.info("Food items detected")
logging.info("There were {} unique reactants and {} reactant types".format(len(reactant_ids), len(reactant_smiles)))
sorted_items = sorted(final_items, key=lambda t: len(t), reverse=True)
logging.info("The longest molecule type was {}".format(sorted_items[0]))
reactant_smiles_occurences = [len(occurences) for id, occurences in reactant_smiles.iteritems()]
logging.info("{} molecule types were a reactant in more than one reaction".format(len([x for x in reactant_smiles_occurences if x > 1])))
logging.info("The minimum and maximum number of times a molecule type served as a reactant was {} and {}".format(min(reactant_smiles_occurences), max(reactant_smiles_occurences)))
reactant_id_occurences = [len(occurences) for id, occurences in reactant_ids.iteritems()]
logging.info("{} molecules were a reactant in more than one reaction (should be zero)".format(len([x for x in reactant_id_occurences if x > 1])))
final_population_sorted = sorted([(item, population.get_quantity(item)) for item in final_items], key=lambda t: t[1], reverse=True)
final_population_string = ["{} x {}".format(item, quantity) for item, quantity in final_population_sorted]
logging.info("Final population = {}".format(string.join(final_population_string, ", ")))
return len(reactant_ids), len(reactant_smiles), max(reactant_smiles_occurences), len(sorted_items[0])
|
th0mmeke/toyworld
|
evaluators/evaluator_summary.py
|
Python
|
gpl-3.0
| 5,124
|
[
"RDKit"
] |
842b917ad93a379dd4e8cdda6e621433b4baf3988f59d08b9df0662b056eeaa0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('tracking', '0012_create_link_to_clinic'),
]
operations = [
migrations.CreateModel(
name='ReferringReportSetting',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('creation_time', models.DateTimeField(verbose_name='Creation Timestamp', blank=True, null=True)),
('modification_time', models.DateTimeField(verbose_name='Modification Timestamp', blank=True, null=True)),
('report_name', models.CharField(choices=[('visit_history', 'visit_history'), ('thankyou', 'thankyou')], verbose_name='Report Name', max_length=64)),
('enabled', models.BooleanField(verbose_name='Special type', default=True)),
('period', models.CharField(choices=[('daily', 'Daily'), ('weekly', 'Weekly'), ('monthly', 'Monthly'), ('quarterly', 'Quarterly'), ('yearly', 'Yearly')], verbose_name='Report Period', default='daily', max_length=16)),
('referring_entity', models.ForeignKey(to='tracking.ReferringEntity')),
],
),
migrations.AlterUniqueTogether(
name='referringreportsetting',
unique_together=set([('referring_entity', 'report_name')]),
),
migrations.AlterField(
model_name='patientvisit',
name='visit_count',
field=models.PositiveIntegerField(default=1, verbose_name='Visit Count', validators=[django.core.validators.MinValueValidator(1)]),
),
migrations.DeleteModel(
name='ThankyouMails',
),
migrations.DeleteModel(
name='EmailReport',
),
]
|
Heteroskedastic/Dr-referral-tracker
|
tracking/migrations/0013_auto_20160412_1228.py
|
Python
|
mit
| 1,914
|
[
"VisIt"
] |
1470ad4feb0940805ccea8de335daaeada6397889b8fb550d89765b380ef31ce
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Yang Gao <younggao1994@gmail.com>
#
'''
Analytical electron-phonon matrix for unrestricted kohn sham
'''
import time
import numpy as np
from pyscf import lib
from pyscf.hessian import uks as uks_hess
from pyscf.hessian import rks as rks_hess
from pyscf.hessian import rhf as rhf_hess
from pyscf.grad import rks as rks_grad
from pyscf.dft import numint
from pyscf.eph import rhf as rhf_eph
from pyscf.eph.uhf import uhf_deriv_generator
from pyscf.data.nist import MP_ME
CUTOFF_FREQUENCY = rhf_eph.CUTOFF_FREQUENCY
KEEP_IMAG_FREQUENCY = rhf_eph.KEEP_IMAG_FREQUENCY
def _get_vxc_deriv1(hessobj, mo_coeff, mo_occ, max_memory):
mol = hessobj.mol
mf = hessobj.base
if hessobj.grids is not None:
grids = hessobj.grids
else:
grids = mf.grids
if grids.coords is None:
grids.build(with_non0tab=True)
nao, nmo = mo_coeff[0].shape
ni = mf._numint
xctype = ni._xc_type(mf.xc)
aoslices = mol.aoslice_by_atom()
shls_slice = (0, mol.nbas)
ao_loc = mol.ao_loc_nr()
dm0a, dm0b = mf.make_rdm1(mo_coeff, mo_occ)
vmata = np.zeros((mol.natm,3,nao,nao))
vmatb = np.zeros((mol.natm,3,nao,nao))
max_memory = max(2000, max_memory-(vmata.size+vmatb.size)*8/1e6)
if xctype == 'LDA':
ao_deriv = 1
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):
rhoa = ni.eval_rho2(mol, ao[0], mo_coeff[0], mo_occ[0], mask, xctype)
rhob = ni.eval_rho2(mol, ao[0], mo_coeff[1], mo_occ[1], mask, xctype)
vxc, fxc = ni.eval_xc(mf.xc, (rhoa,rhob), 1, deriv=2)[1:3]
u_u, u_d, d_d = fxc[0].T
ao_dm0a = numint._dot_ao_dm(mol, ao[0], dm0a, mask, shls_slice, ao_loc)
ao_dm0b = numint._dot_ao_dm(mol, ao[0], dm0b, mask, shls_slice, ao_loc)
for ia in range(mol.natm):
p0, p1 = aoslices[ia][2:]
# First order density = rho1 * 2. *2 is not applied because + c.c. in the end
rho1a = np.einsum('xpi,pi->xp', ao[1:,:,p0:p1], ao_dm0a[:,p0:p1])
rho1b = np.einsum('xpi,pi->xp', ao[1:,:,p0:p1], ao_dm0b[:,p0:p1])
wv = u_u * rho1a + u_d * rho1b
wv *= weight
aow = np.einsum('pi,xp->xpi', ao[0], wv)
rks_grad._d1_dot_(vmata[ia], mol, aow, ao[0], mask, ao_loc, True)
wv = u_d * rho1a + d_d * rho1b
wv *= weight
aow = np.einsum('pi,xp->xpi', ao[0], wv)
rks_grad._d1_dot_(vmatb[ia], mol, aow, ao[0], mask, ao_loc, True)
ao_dm0a = ao_dm0b = aow = None
for ia in range(mol.natm):
vmata[ia] = -vmata[ia] - vmata[ia].transpose(0,2,1)
vmatb[ia] = -vmatb[ia] - vmatb[ia].transpose(0,2,1)
elif xctype == 'GGA':
ao_deriv = 2
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):
rhoa = ni.eval_rho2(mol, ao[:4], mo_coeff[0], mo_occ[0], mask, xctype)
rhob = ni.eval_rho2(mol, ao[:4], mo_coeff[1], mo_occ[1], mask, xctype)
vxc, fxc = ni.eval_xc(mf.xc, (rhoa,rhob), 1, deriv=2)[1:3]
wva, wvb = numint._uks_gga_wv0((rhoa,rhob), vxc, weight)
ao_dm0a = [numint._dot_ao_dm(mol, ao[i], dm0a, mask, shls_slice, ao_loc)
for i in range(4)]
ao_dm0b = [numint._dot_ao_dm(mol, ao[i], dm0b, mask, shls_slice, ao_loc)
for i in range(4)]
for ia in range(mol.natm):
wva = dR_rho1a = rks_hess._make_dR_rho1(ao, ao_dm0a, ia, aoslices)
wvb = dR_rho1b = rks_hess._make_dR_rho1(ao, ao_dm0b, ia, aoslices)
wva[0], wvb[0] = numint._uks_gga_wv1((rhoa,rhob), (dR_rho1a[0],dR_rho1b[0]), vxc, fxc, weight)
wva[1], wvb[1] = numint._uks_gga_wv1((rhoa,rhob), (dR_rho1a[1],dR_rho1b[1]), vxc, fxc, weight)
wva[2], wvb[2] = numint._uks_gga_wv1((rhoa,rhob), (dR_rho1a[2],dR_rho1b[2]), vxc, fxc, weight)
aow = np.einsum('npi,Xnp->Xpi', ao[:4], wva)
rks_grad._d1_dot_(vmata[ia], mol, aow, ao[0], mask, ao_loc, True)
aow = np.einsum('npi,Xnp->Xpi', ao[:4], wvb)
rks_grad._d1_dot_(vmatb[ia], mol, aow, ao[0], mask, ao_loc, True)
ao_dm0a = ao_dm0b = aow = None
for ia in range(mol.natm):
vmata[ia] = -vmata[ia] - vmata[ia].transpose(0,2,1)
vmatb[ia] = -vmatb[ia] - vmatb[ia].transpose(0,2,1)
elif xctype == 'MGGA':
raise NotImplementedError('meta-GGA')
return vmata, vmatb
def get_eph(ephobj, mo1, omega, vec, mo_rep):
if isinstance(mo1, str):
mo1 = lib.chkfile.load(mo1, 'scf_mo1')
mo1a = mo1['0']
mo1b = mo1['1']
mo1a = dict([(int(k), mo1a[k]) for k in mo1a])
mo1b = dict([(int(k), mo1b[k]) for k in mo1b])
mol = ephobj.mol
mf = ephobj.base
ni = mf._numint
ni.libxc.test_deriv_order(mf.xc, 2, raise_error=True)
omg, alpha, hyb = ni.rsh_and_hybrid_coeff(mf.xc, spin=mol.spin)
vnuc_deriv = ephobj.vnuc_generator(mol)
aoslices = mol.aoslice_by_atom()
mo_coeff, mo_occ = mf.mo_coeff, mf.mo_occ
vind = uhf_deriv_generator(mf, mf.mo_coeff, mf.mo_occ)
mem_now = lib.current_memory()[0]
max_memory = max(2000, mf.max_memory*.9-mem_now)
vxc1aoa, vxc1aob = _get_vxc_deriv1(ephobj, mf.mo_coeff, mf.mo_occ, max_memory)
nao, nmo = mo_coeff[0].shape
mocca = mo_coeff[0][:,mo_occ[0]>0]
moccb = mo_coeff[1][:,mo_occ[1]>0]
dm0a = np.dot(mocca, mocca.T)
dm0b = np.dot(moccb, moccb.T)
natoms = mol.natm
vcorea = []
vcoreb = []
for ia in range(natoms):
h1 = vnuc_deriv(ia)
moia = np.hstack((mo1a[ia], mo1b[ia]))
v1 = vind(moia)
shl0, shl1, p0, p1 = aoslices[ia]
shls_slice = (shl0, shl1) + (0, mol.nbas)*3
if abs(hyb)>1e-10:
vja, vjb, vka, vkb = \
rhf_hess._get_jk(mol, 'int2e_ip1', 3, 's2kl',
['ji->s2kl', -dm0a[:,p0:p1], #vja
'ji->s2kl', -dm0b[:,p0:p1], #vjb
'li->s1kj', -dm0a[:,p0:p1],
'li->s1kj', -dm0b[:,p0:p1]], #vka
shls_slice=shls_slice)
vhfa = vja + vjb - hyb * vka
vhfb = vjb + vja - hyb * vkb
if abs(omg) > 1e-10:
with mol.with_range_coulomb(omg):
vka, vkb = \
rhf_hess._get_jk(mol, 'int2e_ip1', 3, 's2kl',
['li->s1kj', -dm0a[:,p0:p1],
'li->s1kj', -dm0b[:,p0:p1]], # vk1
shls_slice=shls_slice)
vhfa -= (alpha-hyb) * vka
vhfb -= (alpha-hyb) * vkb
else:
vja, vjb = rhf_hess._get_jk(mol, 'int2e_ip1', 3, 's2kl',
['ji->s2kl', -dm0a[:,p0:p1],
'ji->s2kl', -dm0b[:,p0:p1]], # vj1
shls_slice=shls_slice)
vhfa = vhfb = vja + vjb
vtota = h1 + v1[0] + vxc1aoa[ia] + vhfa + vhfa.transpose(0,2,1)
vtotb = h1 + v1[1] + vxc1aob[ia] + vhfb + vhfb.transpose(0,2,1)
vcorea.append(vtota)
vcoreb.append(vtotb)
vcorea = np.asarray(vcorea).reshape(-1,nao,nao)
vcoreb = np.asarray(vcoreb).reshape(-1,nao,nao)
mass = mol.atom_mass_list() * MP_ME
vec = rhf_eph._freq_mass_weighted_vec(vec, omega, mass)
mata = np.einsum('xJ,xuv->Juv', vec, vcorea)
matb = np.einsum('xJ,xuv->Juv', vec, vcoreb)
if mo_rep:
mata = np.einsum('Juv,up,vq->Jpq', mata, mf.mo_coeff[0].conj(), mf.mo_coeff[0], optimize=True)
matb = np.einsum('Juv,up,vq->Jpq', matb, mf.mo_coeff[1].conj(), mf.mo_coeff[1], optimize=True)
return np.asarray([mata,matb])
class EPH(uks_hess.Hessian):
'''EPH for unrestricted DFT
Attributes:
cutoff_frequency : float or int
cutoff frequency in cm-1. Default is 80
keep_imag_frequency : bool
Whether to keep imaginary frequencies in the output. Default is False
Saved results
omega : numpy.ndarray
Vibrational frequencies in au.
vec : numpy.ndarray
Polarization vectors of the vibration modes
eph : numpy.ndarray
Electron phonon matrix eph[spin,j,a,b] (j in nmodes, a,b in norbs)
'''
def __init__(self, scf_method, cutoff_frequency=CUTOFF_FREQUENCY,
keep_imag_frequency=KEEP_IMAG_FREQUENCY):
uks_hess.Hessian.__init__(self, scf_method)
self.cutoff_frequency = cutoff_frequency
self.keep_imag_frequency = keep_imag_frequency
get_mode = rhf_eph.get_mode
get_eph = get_eph
vnuc_generator = rhf_eph.vnuc_generator
kernel = rhf_eph.kernel
if __name__ == '__main__':
from pyscf import gto, dft
mol = gto.M()
mol.atom = [['O', [0.000000000000, 0.000000002577,0.868557119905]],
['H', [0.000000000000,-1.456050381698,2.152719488376]],
['H', [0.000000000000, 1.456050379121,2.152719486067]]]
mol.unit = 'Bohr'
mol.basis = 'sto3g'
mol.verbose=4
mol.build() # this is a pre-computed relaxed geometry
mf = dft.UKS(mol)
mf.grids.level=6
mf.xc = 'b3lyp'
mf.conv_tol = 1e-16
mf.conv_tol_grad = 1e-10
mf.kernel()
grad = mf.nuc_grad_method().kernel()
print("Force on the atoms/au:")
print(grad)
myeph = EPH(mf)
(epha, ephb), omega = myeph.kernel()
from pyscf.eph.rks import EPH as REPH
mf = dft.RKS(mol)
mf.grids.level=6
mf.xc = 'b3lyp'
mf.conv_tol = 1e-16
mf.conv_tol_grad = 1e-10
mf.kernel()
myeph = REPH(mf)
rmat, omega = myeph.kernel()
print(np.linalg.norm(epha-ephb))
for i in range(len(rmat)):
print(min(np.linalg.norm(epha[i]-rmat[i]), np.linalg.norm(epha[i]+rmat[i])))
|
sunqm/pyscf
|
pyscf/eph/uks.py
|
Python
|
apache-2.0
| 10,792
|
[
"PySCF"
] |
2f4d295b248bb718675b7edd223dfdd9e3caa069aab8d0031157d134a37884cf
|
from __future__ import annotations
import math
import random
import pytest
from dxtbx.model import ExperimentList
from scitbx import matrix
from dials.algorithms.profile_model.gaussian_rs import (
BBoxCalculator3D,
CoordinateSystem,
transform,
)
from dials.array_family import flex
def evaluate_gaussian(x, a, x0, sx):
assert len(x) == len(x0)
assert len(x) == len(sx)
g = 0.0
for xi, x0i, sxi in zip(x, x0, sx):
g += (xi - x0i) ** 2 / (2.0 * sxi**2)
return a * math.exp(-g)
def gaussian(size, a, x0, sx):
result = flex.real(flex.grid(size))
index = [0 for i in range(len(size))]
while True:
result[index[::-1]] = evaluate_gaussian(index[::-1], a, x0, sx)
for j in range(len(size)):
index[j] += 1
if index[j] < size[::-1][j]:
break
index[j] = 0
if j == len(size) - 1:
return result
def test_forward(dials_data):
expt = ExperimentList.from_file(
dials_data("centroid_test_data").join("imported_experiments.json").strpath
)[0]
# Get the models
beam = expt.beam
detector = expt.detector
gonio = expt.goniometer
scan = expt.scan
# Set some parameters
sigma_divergence = 0.00101229
mosaicity = 0.157 * math.pi / 180
n_sigma = 3
grid_size = 7
delta_divergence = n_sigma * sigma_divergence
step_size = delta_divergence / grid_size
delta_divergence2 = delta_divergence + step_size * 0.5
delta_mosaicity = n_sigma * mosaicity
# Create the bounding box calculator
calculate_bbox = BBoxCalculator3D(
beam, detector, gonio, scan, delta_divergence2, delta_mosaicity
)
# Initialise the transform
spec = transform.TransformSpec(
beam, detector, gonio, scan, sigma_divergence, mosaicity, n_sigma + 1, grid_size
)
# tst_conservation_of_counts(self):
assert len(detector) == 1
s0 = beam.get_s0()
m2 = gonio.get_rotation_axis()
s0_length = matrix.col(beam.get_s0()).length()
# Create an s1 map
s1_map = transform.beam_vector_map(detector[0], beam, True)
for i in range(100):
# Get random x, y, z
x = random.uniform(300, 1800)
y = random.uniform(300, 1800)
z = random.uniform(0, 9)
# Get random s1, phi, panel
s1 = matrix.col(detector[0].get_pixel_lab_coord((x, y))).normalize() * s0_length
phi = scan.get_angle_from_array_index(z, deg=False)
panel = 0
# Calculate the bounding box
bbox = calculate_bbox(s1, z, panel)
x0, x1, y0, y1, z0, z1 = bbox
# Create the coordinate system
cs = CoordinateSystem(m2, s0, s1, phi)
# The grid index generator
step_size = delta_divergence / grid_size
grid_index = transform.GridIndexGenerator(
cs, x0, y0, (step_size, step_size), grid_size, s1_map
)
# Create the image
# image = flex.double(flex.grid(z1 - z0, y1 - y0, x1 - x0), 1)
image = gaussian(
(z1 - z0, y1 - y0, x1 - x0), 10.0, (z - z0, y - y0, x - x0), (2.0, 2.0, 2.0)
)
mask = flex.bool(flex.grid(image.all()), False)
for j in range(y1 - y0):
for i in range(x1 - x0):
inside = False
gx00, gy00 = grid_index(j, i)
gx01, gy01 = grid_index(j, i + 1)
gx10, gy10 = grid_index(j + 1, i)
gx11, gy11 = grid_index(j + 1, i + 1)
mingx = min([gx00, gx01, gx10, gx11])
maxgx = max([gx00, gx01, gx10, gx11])
mingy = min([gy00, gy01, gy10, gy11])
maxgy = max([gy00, gy01, gy10, gy11])
if (
mingx >= 0
and maxgx < 2 * grid_size + 1
and mingy >= 0
and maxgy < 2 * grid_size + 1
):
inside = True
for k in range(1, z1 - z0 - 1):
mask[k, j, i] = inside
# Transform the image to the grid
transformed = transform.TransformForward(
spec, cs, bbox, 0, image.as_double(), mask
)
grid = transformed.profile()
# Get the sums and ensure they're the same
eps = 1e-7
sum_grid = flex.sum(grid)
sum_image = flex.sum(flex.double(flex.select(image, flags=mask)))
assert abs(sum_grid - sum_image) <= eps
# Test passed
# tst_transform_with_background(self):
assert len(detector) == 1
s0 = beam.get_s0()
m2 = gonio.get_rotation_axis()
s0_length = matrix.col(beam.get_s0()).length()
# Create an s1 map
s1_map = transform.beam_vector_map(detector[0], beam, True)
for i in range(100):
# Get random x, y, z
x = random.uniform(300, 1800)
y = random.uniform(300, 1800)
z = random.uniform(0, 9)
# Get random s1, phi, panel
s1 = matrix.col(detector[0].get_pixel_lab_coord((x, y))).normalize() * s0_length
phi = scan.get_angle_from_array_index(z, deg=False)
panel = 0
# Calculate the bounding box
bbox = calculate_bbox(s1, z, panel)
x0, x1, y0, y1, z0, z1 = bbox
# Create the coordinate system
cs = CoordinateSystem(m2, s0, s1, phi)
# The grid index generator
step_size = delta_divergence / grid_size
grid_index = transform.GridIndexGenerator(
cs, x0, y0, (step_size, step_size), grid_size, s1_map
)
# Create the image
# image = flex.double(flex.grid(z1 - z0, y1 - y0, x1 - x0), 1)
image = gaussian(
(z1 - z0, y1 - y0, x1 - x0), 10.0, (z - z0, y - y0, x - x0), (2.0, 2.0, 2.0)
)
background = flex.random_double(len(image))
background.resize(image.accessor())
mask = flex.bool(flex.grid(image.all()), False)
for j in range(y1 - y0):
for i in range(x1 - x0):
inside = False
gx00, gy00 = grid_index(j, i)
gx01, gy01 = grid_index(j, i + 1)
gx10, gy10 = grid_index(j + 1, i)
gx11, gy11 = grid_index(j + 1, i + 1)
mingx = min([gx00, gx01, gx10, gx11])
maxgx = max([gx00, gx01, gx10, gx11])
mingy = min([gy00, gy01, gy10, gy11])
maxgy = max([gy00, gy01, gy10, gy11])
if (
mingx >= 0
and maxgx <= 2 * grid_size + 1
and mingy >= 0
and maxgy <= 2 * grid_size + 1
):
inside = True
for k in range(1, z1 - z0 - 1):
mask[k, j, i] = inside
# Transform the image to the grid
transformed = transform.TransformForward(
spec, cs, bbox, 0, image.as_double(), background.as_double(), mask
)
igrid = transformed.profile()
bgrid = transformed.background()
# Get the sums and ensure they're the same
eps = 1e-7
sum_igrid = flex.sum(igrid)
sum_bgrid = flex.sum(bgrid)
sum_image = flex.sum(flex.double(flex.select(image, flags=mask)))
sum_bkgrd = flex.sum(flex.double(flex.select(background, flags=mask)))
try:
assert abs(sum_igrid - sum_image) <= eps
assert abs(sum_bgrid - sum_bkgrd) <= eps
except Exception:
print("Failed for: ", (x, y, z))
raise
def test_forward_no_model(dials_data):
expt = ExperimentList.from_file(
dials_data("centroid_test_data").join("imported_experiments.json").strpath
)[0]
# Get the models
beam = expt.beam
detector = expt.detector
gonio = expt.goniometer
scan = expt.scan
scan.set_image_range((0, 1000))
# Set some parameters
sigma_divergence = 0.00101229
mosaicity = 0.157 * math.pi / 180
n_sigma = 3
grid_size = 20
delta_divergence = n_sigma * sigma_divergence
step_size = delta_divergence / grid_size
delta_divergence2 = delta_divergence + step_size * 0.5
delta_mosaicity = n_sigma * mosaicity
# Create the bounding box calculator
calculate_bbox = BBoxCalculator3D(
beam, detector, gonio, scan, delta_divergence2, delta_mosaicity
)
# Initialise the transform
spec = transform.TransformSpec(
beam, detector, gonio, scan, sigma_divergence, mosaicity, n_sigma + 1, grid_size
)
# tst_conservation_of_counts(self):
random.seed(0)
assert len(detector) == 1
s0 = beam.get_s0()
m2 = gonio.get_rotation_axis()
s0_length = matrix.col(beam.get_s0()).length()
# Create an s1 map
s1_map = transform.beam_vector_map(detector[0], beam, True)
for i in range(100):
# Get random x, y, z
x = random.uniform(300, 1800)
y = random.uniform(300, 1800)
z = random.uniform(500, 600)
# Get random s1, phi, panel
s1 = matrix.col(detector[0].get_pixel_lab_coord((x, y))).normalize() * s0_length
phi = scan.get_angle_from_array_index(z, deg=False)
panel = 0
# Calculate the bounding box
bbox = calculate_bbox(s1, z, panel)
x0, x1, y0, y1, z0, z1 = bbox
# Create the coordinate system
cs = CoordinateSystem(m2, s0, s1, phi)
if abs(cs.zeta()) < 0.1:
continue
# The grid index generator
step_size = delta_divergence / grid_size
grid_index = transform.GridIndexGenerator(
cs, x0, y0, (step_size, step_size), grid_size, s1_map
)
# Create the image
# image = flex.double(flex.grid(z1 - z0, y1 - y0, x1 - x0), 1)
image = gaussian(
(z1 - z0, y1 - y0, x1 - x0), 10.0, (z - z0, y - y0, x - x0), (2.0, 2.0, 2.0)
)
mask = flex.bool(flex.grid(image.all()), False)
for j in range(y1 - y0):
for i in range(x1 - x0):
inside = False
gx00, gy00 = grid_index(j, i)
gx01, gy01 = grid_index(j, i + 1)
gx10, gy10 = grid_index(j + 1, i)
gx11, gy11 = grid_index(j + 1, i + 1)
mingx = min([gx00, gx01, gx10, gx11])
maxgx = max([gx00, gx01, gx10, gx11])
mingy = min([gy00, gy01, gy10, gy11])
maxgy = max([gy00, gy01, gy10, gy11])
if (
mingx >= 0
and maxgx < 2 * grid_size + 1
and mingy >= 0
and maxgy < 2 * grid_size + 1
):
inside = True
for k in range(1, z1 - z0 - 1):
mask[k, j, i] = inside
# Transform the image to the grid
transformed = transform.TransformForwardNoModel(
spec, cs, bbox, 0, image.as_double(), mask
)
grid = transformed.profile()
# Get the sums and ensure they're the same
eps = 1e-7
sum_grid = flex.sum(grid)
sum_image = flex.sum(flex.double(flex.select(image, flags=mask)))
assert abs(sum_grid - sum_image) <= eps
mask = flex.bool(flex.grid(image.all()), True)
transformed = transform.TransformForwardNoModel(
spec, cs, bbox, 0, image.as_double(), mask
)
grid = transformed.profile()
# Boost the bbox to make sure all intensity is included
x0, x1, y0, y1, z0, z1 = bbox
bbox2 = (x0 - 10, x1 + 10, y0 - 10, y1 + 10, z0 - 10, z1 + 10)
# Do the reverse transform
transformed = transform.TransformReverseNoModel(spec, cs, bbox2, 0, grid)
image2 = transformed.profile()
# Check the sum of pixels are the same
sum_grid = flex.sum(grid)
sum_image = flex.sum(image2)
assert abs(sum_grid - sum_image) <= eps
# Do the reverse transform
transformed = transform.TransformReverseNoModel(spec, cs, bbox, 0, grid)
image2 = transformed.profile()
from dials.algorithms.statistics import pearson_correlation_coefficient
cc = pearson_correlation_coefficient(image.as_1d().as_double(), image2.as_1d())
assert cc >= 0.99
# if cc < 0.99:
# print cc, bbox
# from matplotlib import pylab
# pylab.plot(image.as_numpy_array()[(z1-z0)/2,(y1-y0)/2,:])
# pylab.show()
# pylab.plot(image2.as_numpy_array()[(z1-z0)/2,(y1-y0)/2,:])
# pylab.show()
# pylab.plot((image.as_double()-image2).as_numpy_array()[(z1-z0)/2,(y1-y0)/2,:])
# pylab.show()
def test_forward_panel_edge(dials_data):
expt = ExperimentList.from_file(
dials_data("centroid_test_data").join("imported_experiments.json").strpath
)[0]
# Get the models
beam = expt.beam
detector = expt.detector
gonio = expt.goniometer
scan = expt.scan
# Set some parameters
sigma_divergence = 0.00101229
mosaicity = 0.157 * math.pi / 180
n_sigma = 3
grid_size = 7
delta_divergence = n_sigma * sigma_divergence
step_size = delta_divergence / grid_size
delta_divergence2 = delta_divergence + step_size * 0.5
delta_mosaicity = n_sigma * mosaicity
# Create the bounding box calculator
calculate_bbox = BBoxCalculator3D(
beam, detector, gonio, scan, delta_divergence2, delta_mosaicity
)
# Initialise the transform
spec = transform.TransformSpec(
beam, detector, gonio, scan, sigma_divergence, mosaicity, n_sigma + 1, grid_size
)
assert len(detector) == 1
s0 = beam.get_s0()
m2 = gonio.get_rotation_axis()
s0_length = matrix.col(beam.get_s0()).length()
image_size = detector[0].get_image_size()
refl_xy = [
(0, 0),
(2, 3),
(4, 1000),
(1000, 5),
(image_size[0] - 1, image_size[1] - 1),
(image_size[0] - 2, 1),
(1, image_size[1] - 5),
(1000, image_size[1] - 4),
(image_size[0] - 3, 1000),
]
for x, y in refl_xy:
z = random.uniform(0, 9)
# Get random s1, phi, panel
s1 = matrix.col(detector[0].get_pixel_lab_coord((x, y))).normalize() * s0_length
phi = scan.get_angle_from_array_index(z, deg=False)
panel = 0
# Calculate the bounding box
bbox = calculate_bbox(s1, z, panel)
x0, x1, y0, y1, z0, z1 = bbox
# Create the coordinate system
cs = CoordinateSystem(m2, s0, s1, phi)
# Create the image
image = gaussian(
(z1 - z0, y1 - y0, x1 - x0), 10.0, (z - z0, y - y0, x - x0), (2.0, 2.0, 2.0)
)
# Mask for the foreground pixels
refl_mask = image > 1e-3
bg = flex.double(image.accessor())
# Shoebox mask, i.e. mask out pixels that are outside the panel bounds
shoebox_mask = flex.bool(image.accessor(), False)
for j in range(y1 - y0):
for i in range(x1 - x0):
if (
j + y0 >= 0
and j + y0 < image_size[1]
and i + x0 >= 0
and i + x0 < image_size[0]
):
for k in range(z1 - z0):
shoebox_mask[k, j, i] = True
mask = refl_mask & shoebox_mask
# from matplotlib import pyplot as plt
# fig, axes = plt.subplots(ncols=refl_mask.focus()[0], nrows=4)
# for i in range(refl_mask.focus()[0]):
# axes[0, i].imshow(image.as_numpy_array()[i])
# axes[1, i].imshow(refl_mask.as_numpy_array()[i])
# axes[2, i].imshow(shoebox_mask.as_numpy_array()[i])
# axes[3, i].imshow(mask.as_numpy_array()[i])
# plt.show()
# Transform the image to the grid
transformed = transform.TransformForward(
spec, cs, bbox, 0, image.as_double(), bg, refl_mask
)
grid = transformed.profile()
mask = refl_mask & shoebox_mask
# assert only pixels within the panel were transformed
assert flex.sum(grid) == pytest.approx(
flex.sum(image.select(mask.as_1d())), rel=0.01
)
# The total transformed counts should be less than the (unmasked) image counts
assert flex.sum(grid) < flex.sum(image)
# Transform the image to the grid, this time without a background
transformed = transform.TransformForward(
spec, cs, bbox, 0, image.as_double(), refl_mask
)
grid = transformed.profile()
mask = refl_mask & shoebox_mask
# assert only pixels within the panel were transformed
assert flex.sum(grid) == pytest.approx(
flex.sum(image.select(mask.as_1d())), rel=0.01
)
# The total transformed counts should be less than the (unmasked) image counts
assert flex.sum(grid) < flex.sum(image)
|
dials/dials
|
tests/algorithms/reflection_basis/test_transform.py
|
Python
|
bsd-3-clause
| 16,940
|
[
"Gaussian"
] |
a1c8a712e70b9d003b7352ab1eabb85fe6524722ca27f1bb95ddf6ea5c536c58
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Cubesphere-like netcdf saving benchmarks.
Where possible benchmarks should be parameterised for two sizes of input data:
* minimal: enables detection of regressions in parts of the run-time that do
NOT scale with data size.
* large: large enough to exclusively detect regressions in parts of the
run-time that scale with data size. Aim for benchmark time ~20x
that of the minimal benchmark.
"""
from iris import save
from iris.experimental.ugrid import save_mesh
from . import TrackAddedMemoryAllocation
from .generate_data import make_cube_like_2d_cubesphere
class NetcdfSave:
params = [[1, 600], [False, True]]
param_names = ["cubesphere-N", "is_unstructured"]
def setup(self, n_cubesphere, is_unstructured):
self.cube = make_cube_like_2d_cubesphere(
n_cube=n_cubesphere, with_mesh=is_unstructured
)
def _save_data(self, cube, do_copy=True):
if do_copy:
# Copy the cube, to avoid distorting the results by changing it
# Because we known that older Iris code realises lazy coords
cube = cube.copy()
save(cube, "tmp.nc")
def _save_mesh(self, cube):
# In this case, we are happy that the mesh is *not* modified
save_mesh(cube.mesh, "mesh.nc")
def time_netcdf_save_cube(self, n_cubesphere, is_unstructured):
self._save_data(self.cube)
def time_netcdf_save_mesh(self, n_cubesphere, is_unstructured):
if is_unstructured:
self._save_mesh(self.cube)
def track_addedmem_netcdf_save(self, n_cubesphere, is_unstructured):
cube = self.cube.copy() # Do this outside the testing block
with TrackAddedMemoryAllocation() as mb:
self._save_data(cube, do_copy=False)
return mb.addedmem_mb()
# Declare a 'Mb' unit for all 'track_addedmem_..' type benchmarks
for attr in dir(NetcdfSave):
if attr.startswith("track_addedmem_"):
getattr(NetcdfSave, attr).unit = "Mb"
|
SciTools/iris
|
benchmarks/benchmarks/netcdf_save.py
|
Python
|
lgpl-3.0
| 2,201
|
[
"NetCDF"
] |
0d0b8ce48524a59ff488055fb54cb073e95fa2e630eb8a3b9a192a72bc067b14
|
import unittest
from pymatgen.util.plotting import periodic_table_heatmap, van_arkel_triangle
from pymatgen.util.testing import PymatgenTest
import matplotlib
class FuncTestCase(PymatgenTest):
def test_plot_periodic_heatmap(self):
random_data = {'Te': 0.11083818874391202, 'Au': 0.7575629917425387,
'Th': 1.2475885304040335, 'Ni': -2.0354391922547705}
plt = periodic_table_heatmap(random_data, cmap="plasma")
plt = periodic_table_heatmap(random_data)
plt = periodic_table_heatmap(random_data, max_row=7)
plt = periodic_table_heatmap(random_data, max_row=10)
def test_van_arkel_triangle(self):
random_list = [("Fe", "C"), ("Ni", "F")]
plt = van_arkel_triangle(random_list)
plt = van_arkel_triangle(random_list, annotate=True)
if __name__ == "__main__":
unittest.main()
|
dongsenfo/pymatgen
|
pymatgen/util/tests/test_plotting.py
|
Python
|
mit
| 872
|
[
"pymatgen"
] |
5b891422f704abab5eedca9d30132fd7db37cef5b5bc9e466df3ca6d88e31617
|
#!/usr/bin/python
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2015 Esteban Tovagliari, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import print_function
import os
import sys
if len(sys.argv) != 2:
print("Usage: {0} [path-to-oslc]".format(sys.argv[0]))
sys.exit(0)
oslc_cmd = sys.argv[1]
include_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), "include")
for dirpath, dirnames, filenames in os.walk("."):
for filename in filenames:
if filename.endswith(".osl"):
src_filepath = os.path.join(dirpath, filename)
dest_dir = os.path.join("..", dirpath)
dst_filename = filename.replace(".osl", ".oso")
dst_filepath = os.path.join(dest_dir, dst_filename)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
retcode = os.system("{0} -v -I{1} -o {2} {3}".format(oslc_cmd, include_dir, dst_filepath, src_filepath))
if retcode != 0:
print("Compilation of {0} failed with error code {1}. Stopping.".format(src_filepath, retcode))
sys.exit(retcode)
|
haggi/appleseed-maya
|
module/shaders/src/compile_shaders.py
|
Python
|
mit
| 2,330
|
[
"VisIt"
] |
703aa49b17aabd3ebe510a8ac2503b4bd92d73a77b50c2d1f28da81d3c87b15c
|
config = {
# environment this app is running on: localhost, testing, production
'environment': "localhost",
# webapp2 sessions
'webapp2_extras.sessions' : {'secret_key': '_PUT_KEY_HERE_YOUR_SECRET_KEY_'},
# webapp2 authentication
'webapp2_extras.auth' : {'user_model': 'boilerplate.models.User',
'cookie_name': 'session_name'},
# jinja2 templates
'webapp2_extras.jinja2' : {'template_path': ['templates','boilerplate/templates', 'admin/templates'],
'environment_args': {'extensions': ['jinja2.ext.i18n']}},
# application name
'app_name' : "City Watchers",
# the default language code for the application.
# should match whatever language the site uses when i18n is disabled
'app_lang' : 'pt_BR',
# Locale code = <language>_<territory> (ie 'en_US')
# to pick locale codes see http://cldr.unicode.org/index/cldr-spec/picking-the-right-language-code
# also see http://www.sil.org/iso639-3/codes.asp
# Language codes defined under iso 639-1 http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
# Territory codes defined under iso 3166-1 alpha-2 http://en.wikipedia.org/wiki/ISO_3166-1
# disable i18n if locales array is empty or None
'locales' : ['en_US', 'es_ES', 'it_IT', 'zh_CN', 'id_ID', 'fr_FR', 'de_DE', 'ru_RU', 'pt_BR', 'cs_CZ'],
'contact_sender' : "dev.citywatchers@gmail.com",
'contact_recipient' : "dev.citywatchers@gmail.com",
# Password AES Encryption Parameters
'aes_key' : "12_24_32_BYTES_KEY_FOR_PASSWORDS",
'salt' : "_PUT_SALT_HERE_TO_SHA512_PASSWORDS_",
# get your own consumer key and consumer secret by registering at https://dev.twitter.com/apps
# callback url must be: http://[YOUR DOMAIN]/login/twitter/complete
'twitter_consumer_key' : 'PUT_YOUR_TWITTER_CONSUMER_KEY_HERE',
'twitter_consumer_secret' : 'PUT_YOUR_TWITTER_CONSUMER_SECRET_HERE',
#Facebook Login
# get your own consumer key and consumer secret by registering at https://developers.facebook.com/apps
#Very Important: set the site_url= your domain in the application settings in the facebook app settings page
# callback url must be: http://[YOUR DOMAIN]/login/facebook/complete
'fb_api_key' : 'PUT_YOUR_FACEBOOK_PUBLIC_KEY_HERE',
'fb_secret' : 'PUT_YOUR_FACEBOOK_PUBLIC_KEY_HERE',
#Linkedin Login
#Get you own api key and secret from https://www.linkedin.com/secure/developer
'linkedin_api' : 'PUT_YOUR_LINKEDIN_PUBLIC_KEY_HERE',
'linkedin_secret' : 'PUT_YOUR_LINKEDIN_PUBLIC_KEY_HERE',
# Github login
# Register apps here: https://github.com/settings/applications/new
'github_server' : 'github.com',
'github_redirect_uri' : 'http://www.example.com/social_login/github/complete',
'github_client_id' : 'PUT_YOUR_GITHUB_CLIENT_ID_HERE',
'github_client_secret' : 'PUT_YOUR_GITHUB_CLIENT_SECRET_HERE',
# get your own recaptcha keys by registering at http://www.google.com/recaptcha/
'captcha_public_key' : "6Lduvt0SAAAAAPtLAk34sIc6zQD3Tu2VeL5HNDSI",
'captcha_private_key' : "6Lduvt0SAAAAACB6T9SZL-l8Wdx2MYEIOtFr2P3a",
# Leave blank "google_analytics_domain" if you only want Analytics code
'google_analytics_domain' : "YOUR_PRIMARY_DOMAIN (e.g. google.com)",
'google_analytics_code' : "UA-XXXXX-X",
# add status codes and templates used to catch and display errors
# if a status code is not listed here it will use the default app engine
# stacktrace error page or browser error page
'error_templates' : {
403: 'errors/default_error.html',
404: 'errors/default_error.html',
500: 'errors/default_error.html',
},
# Enable Federated login (OpenID and OAuth)
# Google App Engine Settings must be set to Authentication Options: Federated Login
'enable_federated_login' : True,
# jinja2 base layout template
'base_layout' : 'base.html',
# send error emails to developers
'send_mail_developer' : True,
# fellas' list
'developers' : (
('Pedro Pimenta', 'pedro.a.m.pimenta@gmail.com'),
),
# If true, it will write in datastore a log of every email sent
'log_email' : True,
# If true, it will write in datastore a log of every visit
'log_visit' : True,
# ----> ADD MORE CONFIGURATION OPTIONS HERE <----
}
|
rittersport3/CityWatchers
|
config/localhost.py
|
Python
|
lgpl-3.0
| 4,064
|
[
"VisIt"
] |
a2c320dcf0201c70bbf41f395b3895d13b42ccd3c8aa7822c77942f6da2263be
|
"""
Views for the verification flow
"""
import datetime
import decimal
import json
import logging
import urllib
from pytz import UTC
from ipware.ip import get_ip
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.db import transaction
from django.http import HttpResponse, HttpResponseBadRequest, Http404
from django.contrib.auth.models import User
from django.shortcuts import redirect
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _, ugettext_lazy
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.views.generic.base import View, RedirectView
import analytics
from eventtracking import tracker
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, UsageKey
from commerce.utils import audit_log
from course_modes.models import CourseMode
from courseware.url_helpers import get_redirect_url
from edx_rest_api_client.exceptions import SlumberBaseException
from edxmako.shortcuts import render_to_response, render_to_string
from embargo import api as embargo_api
from microsite_configuration import microsite
from openedx.core.djangoapps.commerce.utils import ecommerce_api_client
from openedx.core.djangoapps.user_api.accounts import NAME_MIN_LENGTH
from openedx.core.djangoapps.user_api.accounts.api import update_account_settings
from openedx.core.djangoapps.user_api.errors import UserNotFound, AccountValidationError
from openedx.core.djangoapps.credit.api import set_credit_requirement_status
from student.models import CourseEnrollment
from shoppingcart.models import Order, CertificateItem
from shoppingcart.processors import (
get_signed_purchase_params, get_purchase_endpoint
)
from lms.djangoapps.verify_student.ssencrypt import has_valid_signature
from lms.djangoapps.verify_student.models import (
VerificationDeadline,
SoftwareSecurePhotoVerification,
VerificationCheckpoint,
VerificationStatus,
IcrvStatusEmailsConfiguration,
)
from lms.djangoapps.verify_student.image import decode_image_data, InvalidImageData
from util.json_request import JsonResponse
from util.date_utils import get_default_time_display
from util.db import outer_atomic
from xmodule.modulestore.django import modulestore
from django.contrib.staticfiles.storage import staticfiles_storage
log = logging.getLogger(__name__)
class PayAndVerifyView(View):
"""
View for the "verify and pay" flow.
This view is somewhat complicated, because the user
can enter it from a number of different places:
* From the "choose your track" page.
* After completing payment.
* From the dashboard in order to complete verification.
* From the dashboard in order to upgrade to a verified track.
The page will display different steps and requirements
depending on:
* Whether the user has submitted a photo verification recently.
* Whether the user has paid for the course.
* How the user reached the page (mostly affects messaging)
We are also super-paranoid about how users reach this page.
If they somehow aren't enrolled, or the course doesn't exist,
or they've unenrolled, or they've already paid/verified,
... then we try to redirect them to the page with the
most appropriate messaging (including the dashboard).
Note that this page does NOT handle re-verification
(photo verification that was denied or had an error);
that is handled by the "reverify" view.
"""
# Step definitions
#
# These represent the numbered steps a user sees in
# the verify / payment flow.
#
# Steps can either be:
# - displayed or hidden
# - complete or incomplete
#
# For example, when a user enters the verification/payment
# flow for the first time, the user will see steps
# for both payment and verification. As the user
# completes these steps (for example, submitting a photo)
# the steps will be marked "complete".
#
# If a user has already verified for another course,
# then the verification steps will be hidden,
# since the user has already completed them.
#
# If a user re-enters the flow from another application
# (for example, after completing payment through
# a third-party payment processor), then the user
# will resume the flow at an intermediate step.
#
INTRO_STEP = 'intro-step'
MAKE_PAYMENT_STEP = 'make-payment-step'
PAYMENT_CONFIRMATION_STEP = 'payment-confirmation-step'
FACE_PHOTO_STEP = 'face-photo-step'
ID_PHOTO_STEP = 'id-photo-step'
REVIEW_PHOTOS_STEP = 'review-photos-step'
ENROLLMENT_CONFIRMATION_STEP = 'enrollment-confirmation-step'
ALL_STEPS = [
INTRO_STEP,
MAKE_PAYMENT_STEP,
PAYMENT_CONFIRMATION_STEP,
FACE_PHOTO_STEP,
ID_PHOTO_STEP,
REVIEW_PHOTOS_STEP,
ENROLLMENT_CONFIRMATION_STEP
]
PAYMENT_STEPS = [
MAKE_PAYMENT_STEP,
PAYMENT_CONFIRMATION_STEP
]
VERIFICATION_STEPS = [
FACE_PHOTO_STEP,
ID_PHOTO_STEP,
REVIEW_PHOTOS_STEP,
ENROLLMENT_CONFIRMATION_STEP
]
# These steps can be skipped using the ?skip-first-step GET param
SKIP_STEPS = [
INTRO_STEP,
]
STEP_TITLES = {
INTRO_STEP: ugettext_lazy("Intro"),
MAKE_PAYMENT_STEP: ugettext_lazy("Make payment"),
PAYMENT_CONFIRMATION_STEP: ugettext_lazy("Payment confirmation"),
FACE_PHOTO_STEP: ugettext_lazy("Take photo"),
ID_PHOTO_STEP: ugettext_lazy("Take a photo of your ID"),
REVIEW_PHOTOS_STEP: ugettext_lazy("Review your info"),
ENROLLMENT_CONFIRMATION_STEP: ugettext_lazy("Enrollment confirmation"),
}
# Messages
#
# Depending on how the user entered reached the page,
# we will display different text messaging.
# For example, we show users who are upgrading
# slightly different copy than users who are verifying
# for the first time.
#
FIRST_TIME_VERIFY_MSG = 'first-time-verify'
VERIFY_NOW_MSG = 'verify-now'
VERIFY_LATER_MSG = 'verify-later'
UPGRADE_MSG = 'upgrade'
PAYMENT_CONFIRMATION_MSG = 'payment-confirmation'
# Requirements
#
# These explain to the user what he or she
# will need to successfully pay and/or verify.
#
# These are determined by the steps displayed
# to the user; for example, if the user does not
# need to complete the verification steps,
# then the photo ID and webcam requirements are hidden.
#
ACCOUNT_ACTIVATION_REQ = "account-activation-required"
PHOTO_ID_REQ = "photo-id-required"
WEBCAM_REQ = "webcam-required"
STEP_REQUIREMENTS = {
ID_PHOTO_STEP: [PHOTO_ID_REQ, WEBCAM_REQ],
FACE_PHOTO_STEP: [WEBCAM_REQ],
}
# Deadline types
VERIFICATION_DEADLINE = "verification"
UPGRADE_DEADLINE = "upgrade"
@method_decorator(login_required)
def get(
self, request, course_id,
always_show_payment=False,
current_step=None,
message=FIRST_TIME_VERIFY_MSG
):
"""
Render the payment and verification flow.
Arguments:
request (HttpRequest): The request object.
course_id (unicode): The ID of the course the user is trying
to enroll in.
Keyword Arguments:
always_show_payment (bool): If True, show the payment steps
even if the user has already paid. This is useful
for users returning to the flow after paying.
current_step (string): The current step in the flow.
message (string): The messaging to display.
Returns:
HttpResponse
Raises:
Http404: The course does not exist or does not
have a verified mode.
"""
# Parse the course key
# The URL regex should guarantee that the key format is valid.
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key)
# Verify that the course exists
if course is None:
log.warn(u"Could not find course with ID %s.", course_id)
raise Http404
# Check whether the user has access to this course
# based on country access rules.
redirect_url = embargo_api.redirect_if_blocked(
course_key,
user=request.user,
ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return redirect(redirect_url)
# If the verification deadline has passed
# then show the user a message that he/she can't verify.
#
# We're making the assumptions (enforced in Django admin) that:
#
# 1) Only verified modes have verification deadlines.
#
# 2) If set, verification deadlines are always AFTER upgrade deadlines, because why would you
# let someone upgrade into a verified track if they can't complete verification?
#
verification_deadline = VerificationDeadline.deadline_for_course(course.id)
response = self._response_if_deadline_passed(course, self.VERIFICATION_DEADLINE, verification_deadline)
if response is not None:
log.info(u"Verification deadline for '%s' has passed.", course.id)
return response
# Retrieve the relevant course mode for the payment/verification flow.
#
# WARNING: this is technical debt! A much better way to do this would be to
# separate out the payment flow and use the product SKU to figure out what
# the user is trying to purchase.
#
# Nonetheless, for the time being we continue to make the really ugly assumption
# that at some point there was a paid course mode we can query for the price.
relevant_course_mode = self._get_paid_mode(course_key)
# If we can find a relevant course mode, then log that we're entering the flow
# Otherwise, this course does not support payment/verification, so respond with a 404.
if relevant_course_mode is not None:
if CourseMode.is_verified_mode(relevant_course_mode):
log.info(
u"Entering payment and verification flow for user '%s', course '%s', with current step '%s'.",
request.user.id, course_id, current_step
)
else:
log.info(
u"Entering payment flow for user '%s', course '%s', with current step '%s'",
request.user.id, course_id, current_step
)
else:
# Otherwise, there has never been a verified/paid mode,
# so return a page not found response.
log.warn(
u"No paid/verified course mode found for course '%s' for verification/payment flow request",
course_id
)
raise Http404
# If the user is trying to *pay* and the upgrade deadline has passed,
# then they shouldn't be able to enter the flow.
#
# NOTE: This should match the availability dates used by the E-Commerce service
# to determine whether a user can purchase a product. The idea is that if the service
# won't fulfill the order, we shouldn't even let the user get into the payment flow.
#
user_is_trying_to_pay = message in [self.FIRST_TIME_VERIFY_MSG, self.UPGRADE_MSG]
if user_is_trying_to_pay:
upgrade_deadline = relevant_course_mode.expiration_datetime
response = self._response_if_deadline_passed(course, self.UPGRADE_DEADLINE, upgrade_deadline)
if response is not None:
log.info(u"Upgrade deadline for '%s' has passed.", course.id)
return response
# Check whether the user has verified, paid, and enrolled.
# A user is considered "paid" if he or she has an enrollment
# with a paid course mode (such as "verified").
# For this reason, every paid user is enrolled, but not
# every enrolled user is paid.
# If the course mode is not verified(i.e only paid) then already_verified is always True
already_verified = (
self._check_already_verified(request.user)
if CourseMode.is_verified_mode(relevant_course_mode)
else True
)
already_paid, is_enrolled = self._check_enrollment(request.user, course_key)
# Redirect the user to a more appropriate page if the
# messaging won't make sense based on the user's
# enrollment / payment / verification status.
redirect_response = self._redirect_if_necessary(
message,
already_verified,
already_paid,
is_enrolled,
course_key
)
if redirect_response is not None:
return redirect_response
display_steps = self._display_steps(
always_show_payment,
already_verified,
already_paid,
relevant_course_mode
)
requirements = self._requirements(display_steps, request.user.is_active)
if current_step is None:
current_step = display_steps[0]['name']
# Allow the caller to skip the first page
# This is useful if we want the user to be able to
# use the "back" button to return to the previous step.
# This parameter should only work for known skip-able steps
if request.GET.get('skip-first-step') and current_step in self.SKIP_STEPS:
display_step_names = [step['name'] for step in display_steps]
current_step_idx = display_step_names.index(current_step)
if (current_step_idx + 1) < len(display_steps):
current_step = display_steps[current_step_idx + 1]['name']
courseware_url = ""
if not course.start or course.start < datetime.datetime.today().replace(tzinfo=UTC):
courseware_url = reverse(
'course_root',
kwargs={'course_id': unicode(course_key)}
)
full_name = (
request.user.profile.name
if request.user.profile.name
else ""
)
# If the user set a contribution amount on another page,
# use that amount to pre-fill the price selection form.
contribution_amount = request.session.get(
'donation_for_course', {}
).get(unicode(course_key), '')
# Remember whether the user is upgrading
# so we can fire an analytics event upon payment.
request.session['attempting_upgrade'] = (message == self.UPGRADE_MSG)
# Determine the photo verification status
verification_good_until = self._verification_valid_until(request.user)
# get available payment processors
if relevant_course_mode.sku:
# transaction will be conducted via ecommerce service
processors = ecommerce_api_client(request.user).payment.processors.get()
else:
# transaction will be conducted using legacy shopping cart
processors = [settings.CC_PROCESSOR_NAME]
# Render the top-level page
context = {
'contribution_amount': contribution_amount,
'course': course,
'course_key': unicode(course_key),
'checkpoint_location': request.GET.get('checkpoint'),
'course_mode': relevant_course_mode,
'courseware_url': courseware_url,
'current_step': current_step,
'disable_courseware_js': True,
'display_steps': display_steps,
'is_active': json.dumps(request.user.is_active),
'message_key': message,
'platform_name': settings.PLATFORM_NAME,
'processors': processors,
'requirements': requirements,
'user_full_name': full_name,
'verification_deadline': (
get_default_time_display(verification_deadline)
if verification_deadline else ""
),
'already_verified': already_verified,
'verification_good_until': verification_good_until,
'capture_sound': staticfiles_storage.url("audio/camera_capture.wav"),
'nav_hidden': True,
'is_ab_testing': 'begin-flow' in request.path,
}
return render_to_response("verify_student/pay_and_verify.html", context)
def _redirect_if_necessary(
self,
message,
already_verified,
already_paid,
is_enrolled,
course_key
):
"""Redirect the user to a more appropriate page if necessary.
In some cases, a user may visit this page with
verification / enrollment / payment state that
we don't anticipate. For example, a user may unenroll
from the course after paying for it, then visit the
"verify now" page to complete verification.
When this happens, we try to redirect the user to
the most appropriate page.
Arguments:
message (string): The messaging of the page. Should be a key
in `MESSAGES`.
already_verified (bool): Whether the user has submitted
a verification request recently.
already_paid (bool): Whether the user is enrolled in a paid
course mode.
is_enrolled (bool): Whether the user has an active enrollment
in the course.
course_key (CourseKey): The key for the course.
Returns:
HttpResponse or None
"""
url = None
course_kwargs = {'course_id': unicode(course_key)}
if already_verified and already_paid:
# If they've already paid and verified, there's nothing else to do,
# so redirect them to the dashboard.
if message != self.PAYMENT_CONFIRMATION_MSG:
url = reverse('dashboard')
elif message in [self.VERIFY_NOW_MSG, self.VERIFY_LATER_MSG, self.PAYMENT_CONFIRMATION_MSG]:
if is_enrolled:
# If the user is already enrolled but hasn't yet paid,
# then the "upgrade" messaging is more appropriate.
if not already_paid:
url = reverse('verify_student_upgrade_and_verify', kwargs=course_kwargs)
else:
# If the user is NOT enrolled, then send him/her
# to the first time verification page.
url = reverse('verify_student_start_flow', kwargs=course_kwargs)
elif message == self.UPGRADE_MSG:
if is_enrolled:
if already_paid:
# If the student has paid, but not verified, redirect to the verification flow.
url = reverse('verify_student_verify_now', kwargs=course_kwargs)
else:
url = reverse('verify_student_start_flow', kwargs=course_kwargs)
# Redirect if necessary, otherwise implicitly return None
if url is not None:
return redirect(url)
def _get_paid_mode(self, course_key):
"""
Retrieve the paid course mode for a course.
The returned course mode may or may not be expired.
Unexpired modes are preferred to expired modes.
Arguments:
course_key (CourseKey): The location of the course.
Returns:
CourseMode tuple
"""
# Retrieve all the modes at once to reduce the number of database queries
all_modes, unexpired_modes = CourseMode.all_and_unexpired_modes_for_courses([course_key])
# Retrieve the first mode that matches the following criteria:
# * Unexpired
# * Price > 0
# * Not credit
for mode in unexpired_modes[course_key]:
if mode.min_price > 0 and not CourseMode.is_credit_mode(mode):
return mode
# Otherwise, find the first expired mode
for mode in all_modes[course_key]:
if mode.min_price > 0:
return mode
# Otherwise, return None and so the view knows to respond with a 404.
return None
def _display_steps(self, always_show_payment, already_verified, already_paid, course_mode):
"""Determine which steps to display to the user.
Includes all steps by default, but removes steps
if the user has already completed them.
Arguments:
always_show_payment (bool): If True, display the payment steps
even if the user has already paid.
already_verified (bool): Whether the user has submitted
a verification request recently.
already_paid (bool): Whether the user is enrolled in a paid
course mode.
Returns:
list
"""
display_steps = self.ALL_STEPS
remove_steps = set()
if already_verified or not CourseMode.is_verified_mode(course_mode):
remove_steps |= set(self.VERIFICATION_STEPS)
if already_paid and not always_show_payment:
remove_steps |= set(self.PAYMENT_STEPS)
else:
# The "make payment" step doubles as an intro step,
# so if we're showing the payment step, hide the intro step.
remove_steps |= set([self.INTRO_STEP])
return [
{
'name': step,
'title': unicode(self.STEP_TITLES[step]),
}
for step in display_steps
if step not in remove_steps
]
def _requirements(self, display_steps, is_active):
"""Determine which requirements to show the user.
For example, if the user needs to submit a photo
verification, tell the user that she will need
a photo ID and a webcam.
Arguments:
display_steps (list): The steps to display to the user.
is_active (bool): If False, adds a requirement to activate the user account.
Returns:
dict: Keys are requirement names, values are booleans
indicating whether to show the requirement.
"""
all_requirements = {
self.ACCOUNT_ACTIVATION_REQ: not is_active,
self.PHOTO_ID_REQ: False,
self.WEBCAM_REQ: False,
}
display_steps = set(step['name'] for step in display_steps)
for step, step_requirements in self.STEP_REQUIREMENTS.iteritems():
if step in display_steps:
for requirement in step_requirements:
all_requirements[requirement] = True
return all_requirements
def _verification_valid_until(self, user, date_format="%m/%d/%Y"):
"""
Check whether the user has a valid or pending verification.
Arguments:
user:
date_format: optional parameter for formatting datetime
object to string in response
Returns:
datetime object in string format
"""
photo_verifications = SoftwareSecurePhotoVerification.verification_valid_or_pending(user)
# return 'expiration_datetime' of latest photo verification if found,
# otherwise implicitly return ''
if photo_verifications:
return photo_verifications[0].expiration_datetime.strftime(date_format)
return ''
def _check_already_verified(self, user):
"""Check whether the user has a valid or pending verification.
Note that this includes cases in which the user's verification
has not been accepted (either because it hasn't been processed,
or there was an error).
This should return True if the user has done their part:
submitted photos within the expiration period.
"""
return SoftwareSecurePhotoVerification.user_has_valid_or_pending(user)
def _check_enrollment(self, user, course_key):
"""Check whether the user has an active enrollment and has paid.
If a user is enrolled in a paid course mode, we assume
that the user has paid.
Arguments:
user (User): The user to check.
course_key (CourseKey): The key of the course to check.
Returns:
Tuple `(has_paid, is_active)` indicating whether the user
has paid and whether the user has an active account.
"""
enrollment_mode, is_active = CourseEnrollment.enrollment_mode_for_user(user, course_key)
has_paid = False
if enrollment_mode is not None and is_active:
all_modes = CourseMode.modes_for_course_dict(course_key, include_expired=True)
course_mode = all_modes.get(enrollment_mode)
has_paid = (course_mode and course_mode.min_price > 0)
return (has_paid, bool(is_active))
def _response_if_deadline_passed(self, course, deadline_name, deadline_datetime):
"""
Respond with some error messaging if the deadline has passed.
Arguments:
course (Course): The course the user is trying to enroll in.
deadline_name (str): One of the deadline constants.
deadline_datetime (datetime): The deadline.
Returns: HttpResponse or None
"""
if deadline_name not in [self.VERIFICATION_DEADLINE, self.UPGRADE_DEADLINE]:
log.error("Invalid deadline name %s. Skipping check for whether the deadline passed.", deadline_name)
return None
deadline_passed = (
deadline_datetime is not None and
deadline_datetime < datetime.datetime.now(UTC)
)
if deadline_passed:
context = {
'course': course,
'deadline_name': deadline_name,
'deadline': (
get_default_time_display(deadline_datetime)
if deadline_datetime else ""
)
}
return render_to_response("verify_student/missed_deadline.html", context)
def checkout_with_ecommerce_service(user, course_key, course_mode, processor):
""" Create a new basket and trigger immediate checkout, using the E-Commerce API. """
course_id = unicode(course_key)
try:
api = ecommerce_api_client(user)
# Make an API call to create the order and retrieve the results
result = api.baskets.post({
'products': [{'sku': course_mode.sku}],
'checkout': True,
'payment_processor_name': processor
})
# Pass the payment parameters directly from the API response.
return result.get('payment_data')
except SlumberBaseException:
params = {'username': user.username, 'mode': course_mode.slug, 'course_id': course_id}
log.exception('Failed to create order for %(username)s %(mode)s mode of %(course_id)s', params)
raise
finally:
audit_log(
'checkout_requested',
course_id=course_id,
mode=course_mode.slug,
processor_name=processor,
user_id=user.id
)
def checkout_with_shoppingcart(request, user, course_key, course_mode, amount):
""" Create an order and trigger checkout using shoppingcart."""
cart = Order.get_cart_for_user(user)
cart.clear()
enrollment_mode = course_mode.slug
CertificateItem.add_to_order(cart, course_key, amount, enrollment_mode)
# Change the order's status so that we don't accidentally modify it later.
# We need to do this to ensure that the parameters we send to the payment system
# match what we store in the database.
# (Ordinarily we would do this client-side when the user submits the form, but since
# the JavaScript on this page does that immediately, we make the change here instead.
# This avoids a second AJAX call and some additional complication of the JavaScript.)
# If a user later re-enters the verification / payment flow, she will create a new order.
cart.start_purchase()
callback_url = request.build_absolute_uri(
reverse("shoppingcart.views.postpay_callback")
)
payment_data = {
'payment_processor_name': settings.CC_PROCESSOR_NAME,
'payment_page_url': get_purchase_endpoint(),
'payment_form_data': get_signed_purchase_params(
cart,
callback_url=callback_url,
extra_data=[unicode(course_key), course_mode.slug]
),
}
return payment_data
@require_POST
@login_required
def create_order(request):
"""
This endpoint is named 'create_order' for backward compatibility, but its
actual use is to add a single product to the user's cart and request
immediate checkout.
"""
course_id = request.POST['course_id']
course_id = CourseKey.from_string(course_id)
donation_for_course = request.session.get('donation_for_course', {})
contribution = request.POST.get("contribution", donation_for_course.get(unicode(course_id), 0))
try:
amount = decimal.Decimal(contribution).quantize(decimal.Decimal('.01'), rounding=decimal.ROUND_DOWN)
except decimal.InvalidOperation:
return HttpResponseBadRequest(_("Selected price is not valid number."))
current_mode = None
sku = request.POST.get('sku', None)
if sku:
try:
current_mode = CourseMode.objects.get(sku=sku)
except CourseMode.DoesNotExist:
log.exception(u'Failed to find CourseMode with SKU [%s].', sku)
if not current_mode:
# Check if there are more than 1 paid(mode with min_price>0 e.g verified/professional/no-id-professional) modes
# for course exist then choose the first one
paid_modes = CourseMode.paid_modes_for_course(course_id)
if paid_modes:
if len(paid_modes) > 1:
log.warn(u"Multiple paid course modes found for course '%s' for create order request", course_id)
current_mode = paid_modes[0]
# Make sure this course has a paid mode
if not current_mode:
log.warn(u"Create order requested for course '%s' without a paid mode.", course_id)
return HttpResponseBadRequest(_("This course doesn't support paid certificates"))
if CourseMode.is_professional_mode(current_mode):
amount = current_mode.min_price
if amount < current_mode.min_price:
return HttpResponseBadRequest(_("No selected price or selected price is below minimum."))
if current_mode.sku:
# if request.POST doesn't contain 'processor' then the service's default payment processor will be used.
payment_data = checkout_with_ecommerce_service(
request.user,
course_id,
current_mode,
request.POST.get('processor')
)
else:
payment_data = checkout_with_shoppingcart(request, request.user, course_id, current_mode, amount)
if 'processor' not in request.POST:
# (XCOM-214) To be removed after release.
# the absence of this key in the POST payload indicates that the request was initiated from
# a stale js client, which expects a response containing only the 'payment_form_data' part of
# the payment data result.
payment_data = payment_data['payment_form_data']
return HttpResponse(json.dumps(payment_data), content_type="application/json")
class SubmitPhotosView(View):
"""
End-point for submitting photos for verification.
"""
@method_decorator(transaction.non_atomic_requests)
def dispatch(self, *args, **kwargs): # pylint: disable=missing-docstring
return super(SubmitPhotosView, self).dispatch(*args, **kwargs)
@method_decorator(login_required)
@method_decorator(outer_atomic(read_committed=True))
def post(self, request):
"""
Submit photos for verification.
This end-point is used for the following cases:
* Initial verification through the pay-and-verify flow.
* Initial verification initiated from a checkpoint within a course.
* Re-verification initiated from a checkpoint within a course.
POST Parameters:
face_image (str): base64-encoded image data of the user's face.
photo_id_image (str): base64-encoded image data of the user's photo ID.
full_name (str): The user's full name, if the user is requesting a name change as well.
course_key (str): Identifier for the course, if initiated from a checkpoint.
checkpoint (str): Location of the checkpoint in the course.
"""
# If the user already has an initial verification attempt, we can re-use the photo ID
# the user submitted with the initial attempt. This is useful for the in-course reverification
# case in which users submit only the face photo and have it matched against their ID photos
# submitted with the initial verification.
initial_verification = SoftwareSecurePhotoVerification.get_initial_verification(request.user)
# Validate the POST parameters
params, response = self._validate_parameters(request, bool(initial_verification))
if response is not None:
return response
# If necessary, update the user's full name
if "full_name" in params:
response = self._update_full_name(request.user, params["full_name"])
if response is not None:
return response
# Retrieve the image data
# Validation ensures that we'll have a face image, but we may not have
# a photo ID image if this is a reverification.
face_image, photo_id_image, response = self._decode_image_data(
params["face_image"], params.get("photo_id_image")
)
if response is not None:
return response
# Submit the attempt
attempt = self._submit_attempt(request.user, face_image, photo_id_image, initial_verification)
# If this attempt was submitted at a checkpoint, then associate
# the attempt with the checkpoint.
submitted_at_checkpoint = "checkpoint" in params and "course_key" in params
if submitted_at_checkpoint:
checkpoint = self._associate_attempt_with_checkpoint(
request.user, attempt,
params["course_key"],
params["checkpoint"]
)
# If the submission came from an in-course checkpoint
if initial_verification is not None and submitted_at_checkpoint:
self._fire_event(request.user, "edx.bi.reverify.submitted", {
"category": "verification",
"label": unicode(params["course_key"]),
"checkpoint": checkpoint.checkpoint_name,
})
# Send a URL that the client can redirect to in order
# to return to the checkpoint in the courseware.
redirect_url = get_redirect_url(params["course_key"], params["checkpoint"])
return JsonResponse({"url": redirect_url})
# Otherwise, the submission came from an initial verification flow.
else:
self._fire_event(request.user, "edx.bi.verify.submitted", {"category": "verification"})
self._send_confirmation_email(request.user)
redirect_url = None
return JsonResponse({})
def _validate_parameters(self, request, has_initial_verification):
"""
Check that the POST parameters are valid.
Arguments:
request (HttpRequest): The request object.
has_initial_verification (bool): Whether the user has an initial verification attempt.
Returns:
HttpResponse or None
"""
# Pull out the parameters we care about.
params = {
param_name: request.POST[param_name]
for param_name in [
"face_image",
"photo_id_image",
"course_key",
"checkpoint",
"full_name"
]
if param_name in request.POST
}
# If the user already has an initial verification attempt, then we don't
# require the user to submit a photo ID image, since we can re-use the photo ID
# image from the initial attempt.
# If we don't have an initial verification OR a photo ID image, something has gone
# terribly wrong in the JavaScript. Log this as an error so we can track it down.
if "photo_id_image" not in params and not has_initial_verification:
log.error(
(
"User %s does not have an initial verification attempt "
"and no photo ID image data was provided. "
"This most likely means that the JavaScript client is not "
"correctly constructing the request to submit photos."
), request.user.id
)
return None, HttpResponseBadRequest(
_("Photo ID image is required if the user does not have an initial verification attempt.")
)
# The face image is always required.
if "face_image" not in params:
msg = _("Missing required parameter face_image")
return None, HttpResponseBadRequest(msg)
# If provided, parse the course key and checkpoint location
if "course_key" in params:
try:
params["course_key"] = CourseKey.from_string(params["course_key"])
except InvalidKeyError:
return None, HttpResponseBadRequest(_("Invalid course key"))
if "checkpoint" in params:
try:
params["checkpoint"] = UsageKey.from_string(params["checkpoint"]).replace(
course_key=params["course_key"]
)
except InvalidKeyError:
return None, HttpResponseBadRequest(_("Invalid checkpoint location"))
return params, None
def _update_full_name(self, user, full_name):
"""
Update the user's full name.
Arguments:
user (User): The user to update.
full_name (unicode): The user's updated full name.
Returns:
HttpResponse or None
"""
try:
update_account_settings(user, {"name": full_name})
except UserNotFound:
return HttpResponseBadRequest(_("No profile found for user"))
except AccountValidationError:
msg = _(
"Name must be at least {min_length} characters long."
).format(min_length=NAME_MIN_LENGTH)
return HttpResponseBadRequest(msg)
def _decode_image_data(self, face_data, photo_id_data=None):
"""
Decode image data sent with the request.
Arguments:
face_data (str): base64-encoded face image data.
Keyword Arguments:
photo_id_data (str): base64-encoded photo ID image data.
Returns:
tuple of (str, str, HttpResponse)
"""
try:
# Decode face image data (used for both an initial and re-verification)
face_image = decode_image_data(face_data)
# Decode the photo ID image data if it's provided
photo_id_image = (
decode_image_data(photo_id_data)
if photo_id_data is not None else None
)
return face_image, photo_id_image, None
except InvalidImageData:
msg = _("Image data is not valid.")
return None, None, HttpResponseBadRequest(msg)
def _submit_attempt(self, user, face_image, photo_id_image=None, initial_verification=None):
"""
Submit a verification attempt.
Arguments:
user (User): The user making the attempt.
face_image (str): Decoded face image data.
Keyword Arguments:
photo_id_image (str or None): Decoded photo ID image data.
initial_verification (SoftwareSecurePhotoVerification): The initial verification attempt.
"""
attempt = SoftwareSecurePhotoVerification(user=user)
# We will always have face image data, so upload the face image
attempt.upload_face_image(face_image)
# If an ID photo wasn't submitted, re-use the ID photo from the initial attempt.
# Earlier validation rules ensure that at least one of these is available.
if photo_id_image is not None:
attempt.upload_photo_id_image(photo_id_image)
elif initial_verification is None:
# Earlier validation should ensure that we never get here.
log.error(
"Neither a photo ID image or initial verification attempt provided. "
"Parameter validation in the view should prevent this from happening!"
)
# Submit the attempt
attempt.mark_ready()
attempt.submit(copy_id_photo_from=initial_verification)
return attempt
def _associate_attempt_with_checkpoint(self, user, attempt, course_key, usage_id):
"""
Associate the verification attempt with a checkpoint within a course.
Arguments:
user (User): The user making the attempt.
attempt (SoftwareSecurePhotoVerification): The verification attempt.
course_key (CourseKey): The identifier for the course.
usage_key (UsageKey): The location of the checkpoint within the course.
Returns:
VerificationCheckpoint
"""
checkpoint = VerificationCheckpoint.get_or_create_verification_checkpoint(course_key, usage_id)
checkpoint.add_verification_attempt(attempt)
VerificationStatus.add_verification_status(checkpoint, user, "submitted")
return checkpoint
def _send_confirmation_email(self, user):
"""
Send an email confirming that the user submitted photos
for initial verification.
"""
context = {
'full_name': user.profile.name,
'platform_name': microsite.get_value("PLATFORM_NAME", settings.PLATFORM_NAME)
}
subject = _("Verification photos received")
message = render_to_string('emails/photo_submission_confirmation.txt', context)
from_address = microsite.get_value('default_from_email', settings.DEFAULT_FROM_EMAIL)
to_address = user.email
try:
send_mail(subject, message, from_address, [to_address], fail_silently=False)
except: # pylint: disable=bare-except
# We catch all exceptions and log them.
# It would be much, much worse to roll back the transaction due to an uncaught
# exception than to skip sending the notification email.
log.exception("Could not send notification email for initial verification for user %s", user.id)
def _fire_event(self, user, event_name, parameters):
"""
Fire an analytics event.
Arguments:
user (User): The user who submitted photos.
event_name (str): Name of the analytics event.
parameters (dict): Event parameters.
Returns: None
"""
if settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
context = {
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
analytics.track(user.id, event_name, parameters, context=context)
def _compose_message_reverification_email(
course_key, user_id, related_assessment_location, status, request
): # pylint: disable=invalid-name
"""
Compose subject and message for photo reverification email.
Args:
course_key(CourseKey): CourseKey object
user_id(str): User Id
related_assessment_location(str): Location of reverification XBlock
photo_verification(QuerySet): Queryset of SoftwareSecure objects
status(str): Approval status
is_secure(Bool): Is running on secure protocol or not
Returns:
None if any error occurred else Tuple of subject and message strings
"""
try:
usage_key = UsageKey.from_string(related_assessment_location)
reverification_block = modulestore().get_item(usage_key)
course = modulestore().get_course(course_key)
redirect_url = get_redirect_url(course_key, usage_key.replace(course_key=course_key))
subject = "Re-verification Status"
context = {
"status": status,
"course_name": course.display_name_with_default_escaped,
"assessment": reverification_block.related_assessment
}
# Allowed attempts is 1 if not set on verification block
allowed_attempts = reverification_block.attempts + 1
used_attempts = VerificationStatus.get_user_attempts(user_id, course_key, related_assessment_location)
left_attempts = allowed_attempts - used_attempts
is_attempt_allowed = left_attempts > 0
verification_open = True
if reverification_block.due:
verification_open = timezone.now() <= reverification_block.due
context["left_attempts"] = left_attempts
context["is_attempt_allowed"] = is_attempt_allowed
context["verification_open"] = verification_open
context["due_date"] = get_default_time_display(reverification_block.due)
context['platform_name'] = settings.PLATFORM_NAME
context["used_attempts"] = used_attempts
context["allowed_attempts"] = allowed_attempts
context["support_link"] = microsite.get_value('email_from_address', settings.CONTACT_EMAIL)
re_verification_link = reverse(
'verify_student_incourse_reverify',
args=(
unicode(course_key),
related_assessment_location
)
)
context["course_link"] = request.build_absolute_uri(redirect_url)
context["reverify_link"] = request.build_absolute_uri(re_verification_link)
message = render_to_string('emails/reverification_processed.txt', context)
log.info(
"Sending email to User_Id=%s. Attempts left for this user are %s. "
"Allowed attempts %s. "
"Due Date %s",
str(user_id), left_attempts, allowed_attempts, str(reverification_block.due)
)
return subject, message
# Catch all exception to avoid raising back to view
except: # pylint: disable=bare-except
log.exception("The email for re-verification sending failed for user_id %s", user_id)
def _send_email(user_id, subject, message):
""" Send email to given user
Args:
user_id(str): User Id
subject(str): Subject lines of emails
message(str): Email message body
Returns:
None
"""
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
user = User.objects.get(id=user_id)
user.email_user(subject, message, from_address)
def _set_user_requirement_status(attempt, namespace, status, reason=None):
"""Sets the status of a credit requirement for the user,
based on a verification checkpoint.
"""
checkpoint = None
try:
checkpoint = VerificationCheckpoint.objects.get(photo_verification=attempt)
except VerificationCheckpoint.DoesNotExist:
log.error("Unable to find checkpoint for user with id %d", attempt.user.id)
if checkpoint is not None:
try:
set_credit_requirement_status(
attempt.user.username,
checkpoint.course_id,
namespace,
checkpoint.checkpoint_location,
status=status,
reason=reason,
)
except Exception: # pylint: disable=broad-except
# Catch exception if unable to add credit requirement
# status for user
log.error("Unable to add Credit requirement status for user with id %d", attempt.user.id)
@require_POST
@csrf_exempt # SS does its own message signing, and their API won't have a cookie value
def results_callback(request):
"""
Software Secure will call this callback to tell us whether a user is
verified to be who they said they are.
"""
body = request.body
try:
body_dict = json.loads(body)
except ValueError:
log.exception("Invalid JSON received from Software Secure:\n\n{}\n".format(body))
return HttpResponseBadRequest("Invalid JSON. Received:\n\n{}".format(body))
if not isinstance(body_dict, dict):
log.error("Reply from Software Secure is not a dict:\n\n{}\n".format(body))
return HttpResponseBadRequest("JSON should be dict. Received:\n\n{}".format(body))
headers = {
"Authorization": request.META.get("HTTP_AUTHORIZATION", ""),
"Date": request.META.get("HTTP_DATE", "")
}
has_valid_signature(
"POST",
headers,
body_dict,
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"],
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_SECRET_KEY"]
)
_response, access_key_and_sig = headers["Authorization"].split(" ")
access_key = access_key_and_sig.split(":")[0]
# This is what we should be doing...
#if not sig_valid:
# return HttpResponseBadRequest("Signature is invalid")
# This is what we're doing until we can figure out why we disagree on sigs
if access_key != settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"]:
return HttpResponseBadRequest("Access key invalid")
receipt_id = body_dict.get("EdX-ID")
result = body_dict.get("Result")
reason = body_dict.get("Reason", "")
error_code = body_dict.get("MessageType", "")
try:
attempt = SoftwareSecurePhotoVerification.objects.get(receipt_id=receipt_id)
except SoftwareSecurePhotoVerification.DoesNotExist:
log.error("Software Secure posted back for receipt_id %s, but not found", receipt_id)
return HttpResponseBadRequest("edX ID {} not found".format(receipt_id))
if result == "PASS":
log.debug("Approving verification for %s", receipt_id)
attempt.approve()
status = "approved"
_set_user_requirement_status(attempt, 'reverification', 'satisfied')
elif result == "FAIL":
log.debug("Denying verification for %s", receipt_id)
attempt.deny(json.dumps(reason), error_code=error_code)
status = "denied"
_set_user_requirement_status(
attempt, 'reverification', 'failed', json.dumps(reason)
)
elif result == "SYSTEM FAIL":
log.debug("System failure for %s -- resetting to must_retry", receipt_id)
attempt.system_error(json.dumps(reason), error_code=error_code)
status = "error"
log.error("Software Secure callback attempt for %s failed: %s", receipt_id, reason)
else:
log.error("Software Secure returned unknown result %s", result)
return HttpResponseBadRequest(
"Result {} not understood. Known results: PASS, FAIL, SYSTEM FAIL".format(result)
)
checkpoints = VerificationCheckpoint.objects.filter(photo_verification=attempt).all()
VerificationStatus.add_status_from_checkpoints(checkpoints=checkpoints, user=attempt.user, status=status)
# Trigger ICRV email only if ICRV status emails config is enabled
icrv_status_emails = IcrvStatusEmailsConfiguration.current()
if icrv_status_emails.enabled and checkpoints:
user_id = attempt.user.id
course_key = checkpoints[0].course_id
related_assessment_location = checkpoints[0].checkpoint_location
subject, message = _compose_message_reverification_email(
course_key, user_id, related_assessment_location, status, request
)
_send_email(user_id, subject, message)
return HttpResponse("OK!")
class ReverifyView(View):
"""
Reverification occurs when a user's initial verification is denied
or expires. When this happens, users can re-submit photos through
the re-verification flow.
Unlike in-course reverification, this flow requires users to submit
*both* face and ID photos. In contrast, during in-course reverification,
students submit only face photos, which are matched against the ID photo
the user submitted during initial verification.
"""
@method_decorator(login_required)
def get(self, request):
"""
Render the reverification flow.
Most of the work is done client-side by composing the same
Backbone views used in the initial verification flow.
"""
status, _ = SoftwareSecurePhotoVerification.user_status(request.user)
# If the user has no initial verification or if the verification
# process is still ongoing 'pending' or expired then allow the user to
# submit the photo verification.
# A photo verification is marked as 'pending' if its status is either
# 'submitted' or 'must_retry'.
if status in ["none", "must_reverify", "expired", "pending"]:
context = {
"user_full_name": request.user.profile.name,
"platform_name": settings.PLATFORM_NAME,
"capture_sound": staticfiles_storage.url("audio/camera_capture.wav"),
}
return render_to_response("verify_student/reverify.html", context)
else:
context = {
"status": status
}
return render_to_response("verify_student/reverify_not_allowed.html", context)
class InCourseReverifyView(View):
"""
The in-course reverification view.
In-course reverification occurs while a student is taking a course.
At points in the course, students are prompted to submit face photos,
which are matched against the ID photos the user submitted during their
initial verification.
Students are prompted to enter this flow from an "In Course Reverification"
XBlock (courseware component) that course authors add to the course.
See https://github.com/edx/edx-reverification-block for more details.
"""
@method_decorator(login_required)
def get(self, request, course_id, usage_id):
"""Display the view for face photo submission.
Args:
request(HttpRequest): HttpRequest object
course_id(str): A string of course id
usage_id(str): Location of Reverification XBlock in courseware
Returns:
HttpResponse
"""
user = request.user
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key)
if course is None:
log.error(u"Could not find course '%s' for in-course reverification.", course_key)
raise Http404
try:
checkpoint = VerificationCheckpoint.objects.get(course_id=course_key, checkpoint_location=usage_id)
except VerificationCheckpoint.DoesNotExist:
log.error(
u"No verification checkpoint exists for the "
u"course '%s' and checkpoint location '%s'.",
course_key, usage_id
)
raise Http404
initial_verification = SoftwareSecurePhotoVerification.get_initial_verification(user)
if not initial_verification:
return self._redirect_to_initial_verification(user, course_key, usage_id)
# emit the reverification event
self._track_reverification_events('edx.bi.reverify.started', user.id, course_id, checkpoint.checkpoint_name)
context = {
'course_key': unicode(course_key),
'course_name': course.display_name_with_default_escaped,
'checkpoint_name': checkpoint.checkpoint_name,
'platform_name': settings.PLATFORM_NAME,
'usage_id': usage_id,
'capture_sound': staticfiles_storage.url("audio/camera_capture.wav"),
}
return render_to_response("verify_student/incourse_reverify.html", context)
def _track_reverification_events(self, event_name, user_id, course_id, checkpoint):
"""Track re-verification events for a user against a reverification
checkpoint of a course.
Arguments:
event_name (str): Name of event being tracked
user_id (str): The ID of the user
course_id (unicode): ID associated with the course
checkpoint (str): Checkpoint name
Returns:
None
"""
log.info(
u"In-course reverification: event %s occurred for user '%s' in course '%s' at checkpoint '%s'",
event_name, user_id, course_id, checkpoint
)
if settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(
user_id,
event_name,
{
'category': "verification",
'label': unicode(course_id),
'checkpoint': checkpoint
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
def _redirect_to_initial_verification(self, user, course_key, checkpoint):
"""
Redirect because the user does not have an initial verification.
We will redirect the user to the initial verification flow,
passing the identifier for this checkpoint. When the user
submits a verification attempt, it will count for *both*
the initial and checkpoint verification.
Arguments:
user (User): The user who made the request.
course_key (CourseKey): The identifier for the course for which
the user is attempting to re-verify.
checkpoint (string): Location of the checkpoint in the courseware.
Returns:
HttpResponse
"""
log.info(
u"User %s does not have an initial verification, so "
u"he/she will be redirected to the \"verify later\" flow "
u"for the course %s.",
user.id, course_key
)
base_url = reverse('verify_student_verify_now', kwargs={'course_id': unicode(course_key)})
params = urllib.urlencode({"checkpoint": checkpoint})
full_url = u"{base}?{params}".format(base=base_url, params=params)
return redirect(full_url)
|
wwj718/edx-platform
|
lms/djangoapps/verify_student/views.py
|
Python
|
agpl-3.0
| 59,314
|
[
"VisIt"
] |
a9de5496446e43f9d46a94a5d50f80cd8d640715a7b34e9e4cf987b9cc2d1cd0
|
from contextlib import contextmanager
import os
import sys
import unittest
from rdkit import Chem
from rdkit import RDConfig
from rdkit.Chem.ChemUtils.SDFToCSV import Convert, initParser
from io import StringIO
class TestCase(unittest.TestCase):
def test1(self):
fName = os.path.join(RDConfig.RDDataDir, 'NCI', 'first_200.props.sdf')
suppl = Chem.SDMolSupplier(fName)
io = StringIO()
try:
Convert(suppl, io)
except Exception:
import traceback
traceback.print_exc()
self.fail('conversion failed')
txt = io.getvalue()
lines = txt.split('\n')
if not lines[-1]:
del lines[-1]
self.assertTrue(len(lines) == 201, 'bad num lines: %d' % len(lines))
line0 = lines[0].split(',')
self.assertEqual(len(line0), 20)
self.assertTrue(line0[0] == 'SMILES')
def test2(self):
fName = os.path.join(RDConfig.RDDataDir, 'NCI', 'first_200.props.sdf')
suppl = Chem.SDMolSupplier(fName)
io = StringIO()
try:
Convert(suppl, io, keyCol='AMW', stopAfter=5)
except Exception:
import traceback
traceback.print_exc()
self.fail('conversion failed')
txt = io.getvalue()
lines = [line for line in txt.split('\n') if line.strip() != '']
self.assertTrue(len(lines) == 6, 'bad num lines: %d' % len(lines))
line0 = lines[0].split(',')
self.assertEqual(len(line0), 20)
self.assertTrue(line0[0] == 'AMW')
self.assertTrue(line0[1] == 'SMILES')
def test_parser(self):
parser = initParser()
# User want's help
with self.assertRaises(SystemExit), outputRedirect() as (out, err):
parser.parse_args(['-h'])
self.assertNotEqual(out.getvalue(), '')
self.assertEqual(err.getvalue(), '')
# Missing input file
with self.assertRaises(SystemExit), outputRedirect() as (out, err):
parser.parse_args([])
self.assertEqual(out.getvalue(), '')
self.assertNotEqual(err.getvalue(), '')
# Input file doesn't exist
with self.assertRaises(SystemExit), outputRedirect() as (out, err):
parser.parse_args(['incorrectFilename'])
self.assertEqual(out.getvalue(), '')
self.assertNotEqual(err.getvalue(), '')
@contextmanager
def outputRedirect():
""" Redirect standard output and error to String IO and return """
try:
_stdout, _stderr = sys.stdout, sys.stderr
sys.stdout = sStdout = StringIO()
sys.stderr = sStderr = StringIO()
yield (sStdout, sStderr)
finally:
sys.stdout, sys.stderr = _stdout, _stderr
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
bp-kelley/rdkit
|
rdkit/Chem/ChemUtils/UnitTestSDFToCSV.py
|
Python
|
bsd-3-clause
| 2,817
|
[
"RDKit"
] |
bff420244220ef4988d9565002cc5744eb53153f37de46cb136127d440b88b56
|
#!/usr/bin/python
"""
prepare_data - auxiliary script aiming to prepare the data structure to
reconstruct a number of gene trees i.e. a phylome
Copyright (C) 2016 - Salvador Capella-Gutierrez, Toni Gabaldon
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
## To guarantee compatibility with python3.4
from __future__ import print_function
desc = """
--
prepare_data - Copyright (C) 2016 Salvador Capella-Gutierrez
[salcagu_at_gmail.com], Toni Gabaldon [tgabaldon_at_crg.es]
This program comes with ABSOLUTELY NO WARRANTY;
This is free software, and you are welcome to redistribute it
under certain conditions;
--
Auxiliary script to prepare the data struture for executing several times
a given phylogenetic pipeline
"""
import os
import sys
import shutil
import argparse
from Bio import SeqIO
from module_utils import splitSequence, lookForFile, lookForDirectory
from version import __version__, __revision__, __build__
__version = ("v%s rev:%s [BUILD:%s]") % (__version__, __revision__, __build__)
if __name__ == "__main__":
usage = ("\n\npython %(prog)s --db sequences_file --folder ROOT_folder "
+ "--config config_file --script PHYLOMIZER script path [other_options]\n")
parser = argparse.ArgumentParser(description = desc, usage = usage,
formatter_class = argparse.RawTextHelpFormatter)
parser.add_argument("--folder", dest = "outDir", type = str, default = ".",
help = "Set the ROOT directory for the whole data structure")
parser.add_argument("--size", dest = "dirSize", type = int, default = 1000,
help = "Set the number of seed proteins per subfolder in the DATA folder")
## Set species code to be detected from the input database. It could be either
## the first letter from the sequence ID, the tag after the first "_", or it
## could be empty to take all sequences in the input file
parser.add_argument("--seed_sp", dest = "seed", type = str, default = "",
help = "Species TAG to detect the seed species in the sequences database. "
+ "It could be \n1) the first 3 letter of each sequence, 2) the TAG after "
+ "the 1st \"_\", or 3) it could be empty to take all input sequences")
parser.add_argument("--script", dest = "script", required = True, type = str,
help = "Set the path for the pipeline.py script")
parser.add_argument("--interpreter", dest = "python", default = "python", \
type = str, help = "Set the path to the PYTHON interpreter")
## Set the same parameters for the phylomizer script. On this way, a command-
## line with all parameters will be generated by this auxiliary script
parser.add_argument("--min_seqs", dest = "minSeqs", type = str, default= None,
help = "Set the minimum sequences number to reconstruct an alignment/tree."
+ "\nThis parameter overwrites whatever is set on the config file.")
parser.add_argument("--max_hits", dest = "maxHits", type = str, default= None,
help = "Set the maximum accepted homology hits after filtering for e-value/"
+ "coverage.\nThis parameter overwrites whatever is set on the config file.")
parser.add_argument("-p", "--prefix", dest = "prefix", type = str, default = \
"", help = "Set the prefix for all output files generated by the pipeline")
parser.add_argument("-r", "--replace", dest = "replace", default = False, \
action = "store_true", help = "Over-write any previously generated file")
parser.add_argument("--no_force_seed", dest = "forcedSeed", default = True, \
action = "store_false", help = "Avoid forcing the inclusion of the sequence"
+ " used for the homology search\nThis parameter overwrites whatever is set"
+ "on the config file")
## Some files will be copied to the data structure for ensuring all data is
## stored at the same ROOT directory
parser.add_argument("-c", "--config", dest = "configFile", default = None, \
type = str, help = "Input configuration file")
parser.add_argument("-d", "--db", dest = "dbFile", type = str, default = None,
help = "Input file containing the target sequence database")
parser.add_argument("--cds", dest = "cdsFile", type = str, default = None,
help = "Input file containing CDS corresponding to input protein seqs")
parser.add_argument("--copy", dest = "copy", action = "store_false", default \
= True, help = "Avoid copying database and configuration files to the ROOT "
+ "folder")
## If no arguments are given, just show the help and finish
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
## Check whether the ROOT directory already exist or not ...
if lookForDirectory(args.outDir, False):
sys.exit(("ERROR: Output ROOT folder already exist '%s'") % (args.outDir))
args.outDir = os.path.abspath(args.outDir)
## ... and try to create it in case it doesn't exist
if not lookForDirectory(args.outDir, create = True):
sys.exit(("ERROR: ROOT folder '%s' cannot be created") % (args.outDir))
## Create folders to store the jobs file and (potentially) the configuration
## file and input databases
lookForDirectory(os.path.join(args.outDir, "jobs"))
lookForDirectory(os.path.join(args.outDir, "Data"))
lookForDirectory(os.path.join(args.outDir, "BlastDB"))
## Check parameters related to files / directories
if not lookForFile(os.path.abspath(args.script)):
sys.exit(("ERROR: Check input SCRIPT file '%s'") % (args.script))
args.script = os.path.abspath(args.script)
## Databases and configuration files will be, by default, copied into the new
## data structure. It will guarantee to have everything under the same ROOT
## folder
if not lookForFile(os.path.abspath(args.configFile)):
sys.exit(("ERROR: Check input CONFIG file '%s'") % (args.configFile))
args.configFile = os.path.abspath(args.configFile)
config = ("%s/jobs/%s") % (args.outDir, os.path.split(args.configFile)[1]) \
if args.copy else args.configFile
if not lookForFile(os.path.abspath(args.dbFile)):
sys.exit(("ERROR: Check input TARGET SEQUENCES file '%s'") % (args.dbFile))
args.dbFile = os.path.abspath(args.dbFile)
db = ("%s/BlastDB/%s") % (args.outDir, os.path.split(args.dbFile)[1]) if \
args.copy else args.dbFile
cds = None
if args.cdsFile:
if not lookForFile(os.path.abspath(args.cdsFile)):
sys.exit(("ERROR: Check input CDS file '%s'") % (args.cdsFile))
args.cdsFile = os.path.abspath(args.cdsFile)
cds = ("%s/BlastDB/%s") % (args.outDir, os.path.split(args.cdsFile)[1]) \
if args.copy else args.cdsFile
## Check some additional parameters
if args.dirSize < 1:
sys.exit(("ERROR: Check your subfolder DATA size \"%d\"") % (args.dirSize))
## Read input BLAST DB file and check whether predefined seed species
## is in the database
proteome = {}
for record in SeqIO.parse(args.dbFile, "fasta"):
sp = record.id.split("_")[1] if record.id.find("_") != -1 else record.id[:3]
## If there is no TAG, take any sequence. Otherwise, try to detect the
## species TAG either from the first 3 letter of each sequence ID or from
## the tag after the 1st "_"
if args.seed and args.seed != sp:
continue
## Remove STOP codons located at the sequence last position
seq = str(record.seq[:-1] if str(record.seq)[-1] == "*" else record.seq)
proteome.setdefault(record.id, splitSequence(seq))
## Check whether there are sequences for the seed species
if len(proteome) == 0:
sys.exit(("\nERROR: Check Species TAG '%s'. No sequences detected") % \
(seed_species))
total = "{:,}".format(len(proteome))
## Generate a master command-line which will be later added the input FASTA
## file and the output directory
master_cmd = ("%s %s --db %s --config ") % (args.python, args.script, db)
master_cmd += ("%s%s") % (config, (" --cds %s") % (cds) if cds else "")
master_cmd += (" --min_seqs %s") % (args.minSeqs) if args.minSeqs else ""
master_cmd += (" --max_hits %s") % (args.maxHits) if args.maxHits else ""
master_cmd += (" --prefix %s") % (args.prefix) if args.prefix else ""
master_cmd += (" --no_force_seed") if not args.forcedSeed else ""
master_cmd += (" --replace") if args.replace else ""
n = 0
data_folder = os.path.join(args.outDir, "Data")
jobsFile = open(os.path.join(args.outDir, "jobs/jobs.pipeline"), "w")
## Dump sequences in the output directory
for record in sorted(proteome):
## Create a subdirectory every N's sequences.
if (n % args.dirSize) == 0:
cDir = ("%s-%s") % (str(n + 1).zfill(5), str(n + args.dirSize).zfill(5))
if n > 0:
print (("INFO: Already processed %s/%s") % ("{:,}".format(n), total), \
file = sys.stderr)
## Get specific sequence folder
current = os.path.join(os.path.join(data_folder, cDir), record)
lookForDirectory(current)
## Create FASTA file containing the sequence
inFile = os.path.join(current, ("%s.fasta") % (record))
oFile = open(inFile, "w")
print ((">%s\n%s") % (record, proteome[record]), file = oFile)
oFile.close()
del proteome[record]
print (("%s --in %s --out %s") % (master_cmd, inFile, current), file = \
jobsFile)
## Increase counter to ensure there are only 'args.dirSize' sequences
## for each folder
n += 1
jobsFile.close()
ref = os.path.join(args.outDir, "jobs/jobs.pipeline")
print (("INFO: Already processed %s/%s\n---") % ("{:,}".format(n), total), \
file = sys.stderr)
print (("INFO: Jobs have been dumped into '%s'") % (ref), file = sys.stderr)
print (("---\nINFO: Before running the pipeline, make sure you have formatted"
+ " your sequences database by using appropriate tools e.g. formatdb"), \
file = sys.stderr)
## Just copy databases and configuration files to the ROOT project folder
if args.copy:
shutil.copy2(args.dbFile, db)
shutil.copy2(args.configFile, config)
if cds:
shutil.copy2(args.cdsFile, cds)
|
Gabaldonlab/phylomizer
|
source/prepare_data.py
|
Python
|
gpl-3.0
| 10,555
|
[
"BLAST"
] |
f02dd2a6a22fa4d07442e6f23c07e277e36163de48816b9e6ded64635f08e07e
|
# Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Consumer class that builds a Structure object.
This is used by the PDBParser and MMCIFparser classes.
"""
import warnings
# SMCRA hierarchy
from Bio.PDB.Structure import Structure
from Bio.PDB.Model import Model
from Bio.PDB.Chain import Chain
from Bio.PDB.Residue import Residue, DisorderedResidue
from Bio.PDB.Atom import Atom, DisorderedAtom
from Bio.PDB.PDBExceptions import \
PDBConstructionException, PDBConstructionWarning
class StructureBuilder(object):
"""
Deals with contructing the Structure object. The StructureBuilder class is used
by the PDBParser classes to translate a file to a Structure object.
"""
def __init__(self):
self.line_counter=0
self.header={}
def _is_completely_disordered(self, residue):
"Return 1 if all atoms in the residue have a non blank altloc."
atom_list=residue.get_unpacked_list()
for atom in atom_list:
altloc=atom.get_altloc()
if altloc==" ":
return 0
return 1
# Public methods called by the Parser classes
def set_header(self, header):
self.header=header
def set_line_counter(self, line_counter):
"""
The line counter keeps track of the line in the PDB file that
is being parsed.
Arguments:
o line_counter - int
"""
self.line_counter=line_counter
def init_structure(self, structure_id):
"""Initiate a new Structure object with given id.
Arguments:
o id - string
"""
self.structure=Structure(structure_id)
def init_model(self, model_id, serial_num = None):
"""Initiate a new Model object with given id.
Arguments:
o id - int
o serial_num - int
"""
self.model=Model(model_id,serial_num)
self.structure.add(self.model)
def init_chain(self, chain_id):
"""Initiate a new Chain object with given id.
Arguments:
o chain_id - string
"""
if self.model.has_id(chain_id):
self.chain=self.model[chain_id]
warnings.warn("WARNING: Chain %s is discontinuous at line %i."
% (chain_id, self.line_counter),
PDBConstructionWarning)
else:
self.chain=Chain(chain_id)
self.model.add(self.chain)
def init_seg(self, segid):
"""Flag a change in segid.
Arguments:
o segid - string
"""
self.segid=segid
def init_residue(self, resname, field, resseq, icode):
"""
Initiate a new Residue object.
Arguments:
o resname - string, e.g. "ASN"
o field - hetero flag, "W" for waters, "H" for
hetero residues, otherwise blank.
o resseq - int, sequence identifier
o icode - string, insertion code
"""
if field!=" ":
if field=="H":
# The hetero field consists of H_ + the residue name (e.g. H_FUC)
field="H_"+resname
res_id=(field, resseq, icode)
if field==" ":
if self.chain.has_id(res_id):
# There already is a residue with the id (field, resseq, icode).
# This only makes sense in the case of a point mutation.
warnings.warn("WARNING: Residue ('%s', %i, '%s') "
"redefined at line %i."
% (field, resseq, icode, self.line_counter),
PDBConstructionWarning)
duplicate_residue=self.chain[res_id]
if duplicate_residue.is_disordered()==2:
# The residue in the chain is a DisorderedResidue object.
# So just add the last Residue object.
if duplicate_residue.disordered_has_id(resname):
# The residue was already made
self.residue=duplicate_residue
duplicate_residue.disordered_select(resname)
else:
# Make a new residue and add it to the already
# present DisorderedResidue
new_residue=Residue(res_id, resname, self.segid)
duplicate_residue.disordered_add(new_residue)
self.residue=duplicate_residue
return
else:
# Make a new DisorderedResidue object and put all
# the Residue objects with the id (field, resseq, icode) in it.
# These residues each should have non-blank altlocs for all their atoms.
# If not, the PDB file probably contains an error.
if not self._is_completely_disordered(duplicate_residue):
# if this exception is ignored, a residue will be missing
self.residue=None
raise PDBConstructionException(\
"Blank altlocs in duplicate residue %s ('%s', %i, '%s')" \
% (resname, field, resseq, icode))
self.chain.detach_child(res_id)
new_residue=Residue(res_id, resname, self.segid)
disordered_residue=DisorderedResidue(res_id)
self.chain.add(disordered_residue)
disordered_residue.disordered_add(duplicate_residue)
disordered_residue.disordered_add(new_residue)
self.residue=disordered_residue
return
residue=Residue(res_id, resname, self.segid)
self.chain.add(residue)
self.residue=residue
def init_atom(self, name, coord, b_factor, occupancy, altloc, fullname,
serial_number=None, element=None):
"""
Initiate a new Atom object.
Arguments:
o name - string, atom name, e.g. CA, spaces should be stripped
o coord - Numeric array (Float0, size 3), atomic coordinates
o b_factor - float, B factor
o occupancy - float
o altloc - string, alternative location specifier
o fullname - string, atom name including spaces, e.g. " CA "
o element - string, upper case, e.g. "HG" for mercury
"""
residue=self.residue
# if residue is None, an exception was generated during
# the construction of the residue
if residue is None:
return
# First check if this atom is already present in the residue.
# If it is, it might be due to the fact that the two atoms have atom
# names that differ only in spaces (e.g. "CA.." and ".CA.",
# where the dots are spaces). If that is so, use all spaces
# in the atom name of the current atom.
if residue.has_id(name):
duplicate_atom=residue[name]
# atom name with spaces of duplicate atom
duplicate_fullname=duplicate_atom.get_fullname()
if duplicate_fullname!=fullname:
# name of current atom now includes spaces
name=fullname
warnings.warn("Atom names %r and %r differ "
"only in spaces at line %i."
% (duplicate_fullname, fullname,
self.line_counter),
PDBConstructionWarning)
atom=self.atom=Atom(name, coord, b_factor, occupancy, altloc,
fullname, serial_number, element)
if altloc!=" ":
# The atom is disordered
if residue.has_id(name):
# Residue already contains this atom
duplicate_atom=residue[name]
if duplicate_atom.is_disordered()==2:
duplicate_atom.disordered_add(atom)
else:
# This is an error in the PDB file:
# a disordered atom is found with a blank altloc
# Detach the duplicate atom, and put it in a
# DisorderedAtom object together with the current
# atom.
residue.detach_child(name)
disordered_atom=DisorderedAtom(name)
residue.add(disordered_atom)
disordered_atom.disordered_add(atom)
disordered_atom.disordered_add(duplicate_atom)
residue.flag_disordered()
warnings.warn("WARNING: disordered atom found "
"with blank altloc before line %i.\n"
% self.line_counter,
PDBConstructionWarning)
else:
# The residue does not contain this disordered atom
# so we create a new one.
disordered_atom=DisorderedAtom(name)
residue.add(disordered_atom)
# Add the real atom to the disordered atom, and the
# disordered atom to the residue
disordered_atom.disordered_add(atom)
residue.flag_disordered()
else:
# The atom is not disordered
residue.add(atom)
def set_anisou(self, anisou_array):
"Set anisotropic B factor of current Atom."
self.atom.set_anisou(anisou_array)
def set_siguij(self, siguij_array):
"Set standard deviation of anisotropic B factor of current Atom."
self.atom.set_siguij(siguij_array)
def set_sigatm(self, sigatm_array):
"Set standard deviation of atom position of current Atom."
self.atom.set_sigatm(sigatm_array)
def get_structure(self):
"Return the structure."
# first sort everything
# self.structure.sort()
# Add the header dict
self.structure.header=self.header
return self.structure
def set_symmetry(self, spacegroup, cell):
pass
|
bryback/quickseq
|
genescript/Bio/PDB/StructureBuilder.py
|
Python
|
mit
| 10,408
|
[
"Biopython"
] |
cc6e5cb5bbfb190890dd0e0da0a053e5b83a71c568e2070bd600af734cdaaa1d
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGtrellis(RPackage):
"""Genome level Trellis graph visualizes genomic data conditioned by
genomic categories (e.g. chromosomes). For each genomic category,
multiple dimensional data which are represented as tracks describe
different features from different aspects. This package provides high
flexibility to arrange genomic categories and to add self-defined
graphics in the plot."""
homepage = "https://bioconductor.org/packages/gtrellis/"
url = "https://git.bioconductor.org/packages/gtrellis"
list_url = homepage
version('1.8.0', git='https://git.bioconductor.org/packages/gtrellis', commit='f813b420a008c459f63a2a13e5e64c5507c4c472')
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-circlize', type=('build', 'run'))
depends_on('r-getoptlong', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@1.8.0')
|
lgarren/spack
|
var/spack/repos/builtin/packages/r-gtrellis/package.py
|
Python
|
lgpl-2.1
| 2,224
|
[
"Bioconductor"
] |
58554cee863752814abd57fbd7bdb589e4bde4e57b2014e12cafe40802a88ea6
|
"""
@created_at 2014-07-17
@author Exequiel Fuentes <efulet@gmail.com>
@author Brian Keith <briankeithn@gmail.com>
Basado en el trabajo de Juan Bekios-Calfa <juan.bekios@ucn.cl>
"""
# Se recomienda seguir los siguientes estandares:
# 1. Para codificacion: PEP 8 - Style Guide for Python Code (http://legacy.python.org/dev/peps/pep-0008/)
# 2. Para documentacion: PEP 257 - Docstring Conventions (http://legacy.python.org/dev/peps/pep-0257/)
import numpy as np
from naive_bayes_classifier_exception import NaiveBayesClassifierException
class NaiveBayesClassifier:
"""Esta clase abstracta define los metodos que deben ser implementados por un
clasificador bayesiano binario.
"""
def fit(self, training_set, training_set_classes):
"""Este metodo entrena el clasificador bayesiano.
:param training_set: Conjunto de valores de ejemplos de entrenamiento.
:param training_set_classes: Conjunto de clases a las que pertenecen los ejemplos de entrenamiento.
"""
raise NotImplementedError
def predict(self, testing_set):
"""Este metodo utiliza el clasificador ya entrenado para clasificar un conjunto de datos de prueba.
:param testing_set: Conjunto de valores de prueba.
:returns Conjunto de valores de clases asociados a cada elemento del conjunto de prueba.
"""
raise NotImplementedError
def score(self, testing_set, testing_set_classes):
"""Este metodo calcula la precision que tiene el clasificador sobre un conjunto de datos de prueba.
:param testing_set: Conjunto de valores de prueba.
:param training_set_classes: Conjunto de clases a las que pertenecen los datos de prueba.
:returns La precision como un valor entre 0 y 1.
"""
raise NotImplementedError
class FKNaiveBayesClassifier(NaiveBayesClassifier):
"""Esta clase implementa los metodos de una clasificador bayesiano.
"""
def __init__(self):
"""Crea una instancia de la clase FKNaiveBayesClassifier.
"""
self._var_positive = None
self._var_negative = None
self._p_positive = None
self._p_negative = None
self._mu_positive = None
self._mu_negative = None
def fit(self, training_set, training_set_classes):
# Se separan los elementos positivos de los negativos.
lda_data_positive = training_set[training_set_classes == 1]
lda_data_negative = training_set[training_set_classes == 0]
# Se estiman las medias.
self._mu_positive = np.mean(lda_data_positive)
self._mu_negative = np.mean(lda_data_negative)
# Se estiman las varianzas...
self._var_positive = np.var(lda_data_positive)
self._var_negative = np.var(lda_data_negative)
# Se estima la probabilidad a priori (p_negative se obtendria con el complemento)
self._p_positive = float(len(lda_data_positive)) / len(training_set)
self._p_negative = 1 - self._p_positive
def predict(self, testing_set):
# Se inicializan las variables requeridas por el clasificador.
n = len(testing_set)
y_predicted = [None] * n
# Clase positiva...
log_p_positive = np.log(self._p_positive)
pdf_positive = - 0.5 * np.sum(np.log(np.pi * self._var_positive))
pdf_positive -= 0.5 * np.sum(((testing_set - self._mu_positive) ** 2) /
self._var_positive, 1)
positive_discriminant = log_p_positive + pdf_positive
# Clase negativa...
log_p_negative = np.log(self._p_negative)
pdf_negative = - 0.5 * np.sum(np.log(np.pi * self._var_negative))
pdf_negative -= 0.5 * np.sum(((testing_set - self._mu_negative) ** 2) /
self._var_negative, 1)
negative_discriminant = log_p_negative + pdf_negative
# Se retorna el conjunto de predicciones para cada caso de prueba.
return [int(i) for i in positive_discriminant > negative_discriminant]
def score(self, testing_set, testing_set_classes):
# Se calcula la precision.
testing_pred = self.predict(testing_set)
mislabeled_points = (testing_pred != testing_set_classes).sum()
score = 1 - float(mislabeled_points) / len(testing_set)
#Se retorna el valor calculado.
return score
|
efulet/pca
|
pca/lib/naive_bayes_classifier.py
|
Python
|
mit
| 4,417
|
[
"Brian"
] |
9574c089fa70b012127e35e5f4a14abf7bb940cc0241014fe9ac8ba8297835d1
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 6 12:45:40 2015
@author: miles
"""
import numpy as np
from utils import *
from cochlea_model import *
from brian.hears import LinearFilterbank
from brian.stdunits import kHz, Hz, ms
def LowPass_filter_(fc,fs):
TWOPI = 2*np.pi
c = 2.0 * fs
c1LP = ( c - TWOPI*fc ) / ( c + TWOPI*fc )
c2LP = TWOPI*fc / (TWOPI*fc + c)
b = np.array([c2LP,c2LP])
a = np.array([1,-c1LP])
return b,a
def filter_coeff_one2n(a,n):
import sympy
import numpy as np
x = sympy.symbols("x")
formula = (a[0] + a[1]*x) ** n
newa = formula.expand().as_poly().all_coeffs()
return np.array(np.float64(newa[::-1]))
class LowPass_filter(LinearFilterbank):
def __init__(self,source,cf,fc,gain,order):
nch = len(cf)
TWOPI = 2*np.pi
self.samplerate = source.samplerate
c = 2.0 * self.samplerate
c1LP = ( c/Hz - TWOPI*fc ) / ( c/Hz + TWOPI*fc )
c2LP = TWOPI*fc/Hz / (TWOPI*fc + c/Hz)
b_temp = np.array([c2LP,c2LP])
a_temp = np.array([1,-c1LP])
filt_b = np.tile(b_temp.reshape([2,1]),[nch,1,order])
filt_a = np.tile(a_temp.reshape([2,1]),[nch,1,order])
filt_b[:,:,0] = filt_b[:,:,0]*gain
LinearFilterbank.__init__(self, source, filt_b, filt_a)
def IHC_transduction(x,slope,asym,sign):
corner = 80
strength = 20.0e6/10**(corner/20)
xx = sign*np.log(1.0+strength*abs(x))*slope
ind = x<0
splx = 20*np.log10(-x[ind]/20e-6);
asym_t = asym-(asym-1)/(1+np.exp(splx/5.0));
xx[ind] = -1/asym_t*xx[ind]
return xx
def zilany2014_run_synapse(vihc,ff,args,channels):
from cochlea.zilany2014 import _zilany2014
fs = args['fs']
powerlaw = args['powerlaw']
anf_num = args['anf_num']
ffGn = args['ffGn']
duration = vihc.shape[0] / fs
anf_types = np.repeat(['hsr', 'msr', 'lsr'], anf_num)
nested = []
for i in channels:
cf = ff[i]
vihc_canal = vihc[:,i]
synout = {}
trains = []
for anf_type in anf_types:
if anf_type not in synout:
### Run synapse
synout[anf_type] = _zilany2014.run_synapse(
fs=fs,
vihc=vihc_canal,
cf=cf,
anf_type=anf_type,
powerlaw=powerlaw,
ffGn=ffGn
)
### Run spike generator
spikes = _zilany2014.run_spike_generator(
synout=synout[anf_type],
fs=fs,
)
trains.append({
'spikes': spikes,
'duration': duration,
'cf': cf,
'type': anf_type
})
nested.append(trains)
return synout,nested
def synapse(vihc,ff,args,channels):
from cochlea.zilany2014 import _zilany2014
fs = args['fs']
powerlaw = args['powerlaw']
ffGn = args['ffGn']
duration = vihc.shape[0] / fs
anf_types = ['hsr', 'msr', 'lsr']
synout = {}
for anf_type in anf_types:
synout[anf_type ] = np.zeros((len(channels),vihc.shape[0]))
for i in channels:
cf = ff[i]
vihc_canal = vihc[:,i]
for anf_type in anf_types:
syn = _zilany2014.run_synapse(
fs=fs,
vihc=vihc_canal,
cf=cf,
anf_type=anf_type,
powerlaw=powerlaw,
ffGn=ffGn
)
synout[anf_type ][i,:] = syn
return synout
def ihc(Y,fs,Yscale=1e2,fcut=3000):
from scipy.signal import lfilter
blop,alop = LowPass_filter_(fcut,fs)
alop = filter_coeff_one2n(alop,7)
blop = filter_coeff_one2n(blop,7)
Yihc = IHC_transduction(Y*Yscale,slope = 0.1,asym = 3.0,sign=1)
vihc = lfilter(blop,alop,Yihc.T).T
return vihc
def ihc_synapse(Y,data,Yscale=0.7e2,fcut=3000,vihc_scale=1.5,channels=0):
fs = int( data['fs']/data['dec'] )
vihc = ihc(Y,fs,Yscale=Yscale,fcut=fcut)*vihc_scale
ff = np.flipud( np.logspace(np.log10(data['fmin']),np.log10(data['fmax']),data['nchan']))
args = { 'fs': fs, 'powerlaw': 'approximate','ffGn': False}
if channels==0:
channels = xrange(data['nchan'])
synout = synapse(vihc,ff,args,channels)
return synout,vihc
def ihcan_synapse(Y,data,Yscale=0.7e2,vihc_scale=1.5,anf_num = (100,50,30),channels=0):
import itertools
import pandas as pd
ff = np.flipud( np.logspace(np.log10(data['fmin']),np.log10(data['fmax']),data['nchan']))
vihc = ihc(Y,data['fs'],Yscale=Yscale)*vihc_scale
args = { 'fs': data['fs'], 'anf_num': anf_num, 'powerlaw': 'approximate','ffGn': False}
if channels==0:
channels = xrange(data['nchan'])
synout,nested = zilany2014_run_synapse(vihc,ff,args,channels)
trains = itertools.chain(*nested)
anf = pd.DataFrame(list(trains))
return synout,anf
def pure2periphery(pure_tone,data,channels=0):
X = pure2cochlea(pure_tone,data)
Y = X['Y']
syn,vihc = ihc_synapse(Y,data,channels=channels)
return {'Y':Y,'syn':syn}
def pure2periphery_(pure_tone,data,periphery_data={}):
X = pure2cochlea(pure_tone,data)
Y = X['Y']
anf_num = (50,30,20)
channels = periphery_data['channels']
anf = ihcan_synapse(Y,data,Yscale=1e2,vihc_scale=2,anf_num =anf_num,channels=channels)
return {'Y':Y,'anf':anf}
|
pabloriera/pycochlea
|
pycochlea/periphery.py
|
Python
|
gpl-2.0
| 5,668
|
[
"Brian"
] |
0f7f43cd2c258e12985c48d7d49125216004118a9cf999a29f150888530920df
|
# #
# Copyright 2013-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Unit tests for eb command line options.
@author: Kenneth Hoste (Ghent University)
"""
import glob
import os
import re
import shutil
import stat
import sys
import tempfile
import textwrap
from distutils.version import LooseVersion
from unittest import TextTestRunner
import easybuild.main
import easybuild.tools.build_log
import easybuild.tools.options
import easybuild.tools.toolchain
from easybuild.base import fancylogger
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import BUILD, CUSTOM, DEPENDENCIES, EXTENSIONS, FILEMANAGEMENT, LICENSE
from easybuild.framework.easyconfig import MANDATORY, MODULES, OTHER, TOOLCHAIN
from easybuild.framework.easyconfig.easyconfig import EasyConfig, get_easyblock_class, robot_find_easyconfig
from easybuild.framework.easyconfig.parser import EasyConfigParser
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import DEFAULT_MODULECLASSES
from easybuild.tools.config import find_last_log, get_build_log_path, get_module_syntax, module_classes
from easybuild.tools.environment import modify_env
from easybuild.tools.filetools import adjust_permissions, change_dir, copy_dir, copy_file, download_file
from easybuild.tools.filetools import is_patch_file, mkdir, move_file, parse_http_header_fields_urlpat
from easybuild.tools.filetools import read_file, remove_dir, remove_file, which, write_file
from easybuild.tools.github import GITHUB_RAW, GITHUB_EB_MAIN, GITHUB_EASYCONFIGS_REPO
from easybuild.tools.github import URL_SEPARATOR, fetch_github_token
from easybuild.tools.modules import Lmod
from easybuild.tools.options import EasyBuildOptions, parse_external_modules_metadata, set_tmpdir, use_color
from easybuild.tools.py2vs3 import URLError, reload, sort_looseversions
from easybuild.tools.toolchain.utilities import TC_CONST_PREFIX
from easybuild.tools.run import run_cmd
from easybuild.tools.systemtools import HAVE_ARCHSPEC
from easybuild.tools.version import VERSION
from test.framework.utilities import EnhancedTestCase, TestLoaderFiltered, init_config
try:
import pycodestyle # noqa
except ImportError:
try:
import pep8 # noqa
except ImportError:
pass
EXTERNAL_MODULES_METADATA = """[foobar/1.2.3]
name = foo, bar
version = 1.2.3, 3.2.1
prefix = FOOBAR_DIR
[foobar/2.0]
name = foobar
version = 2.0
prefix = FOOBAR_PREFIX
[foo]
name = Foo
prefix = /foo
[bar/1.2.3]
name = bar
version = 1.2.3
"""
# test account, for which a token may be available
GITHUB_TEST_ACCOUNT = 'easybuild_test'
class CommandLineOptionsTest(EnhancedTestCase):
"""Testcases for command line options."""
logfile = None
def setUp(self):
"""Set up test."""
super(CommandLineOptionsTest, self).setUp()
self.github_token = fetch_github_token(GITHUB_TEST_ACCOUNT)
self.orig_terminal_supports_colors = easybuild.tools.options.terminal_supports_colors
self.orig_os_getuid = easybuild.main.os.getuid
self.orig_experimental = easybuild.tools.build_log.EXPERIMENTAL
def tearDown(self):
"""Clean up after test."""
easybuild.main.os.getuid = self.orig_os_getuid
easybuild.tools.options.terminal_supports_colors = self.orig_terminal_supports_colors
easybuild.tools.build_log.EXPERIMENTAL = self.orig_experimental
super(CommandLineOptionsTest, self).tearDown()
def purge_environment(self):
"""Remove any leftover easybuild variables"""
for var in os.environ.keys():
# retain $EASYBUILD_IGNORECONFIGFILES, to make sure the test is isolated from system-wide config files!
if var.startswith('EASYBUILD_') and var != 'EASYBUILD_IGNORECONFIGFILES':
del os.environ[var]
def test_help_short(self, txt=None):
"""Test short help message."""
if txt is None:
topt = EasyBuildOptions(
go_args=['-h'],
go_nosystemexit=True, # when printing help, optparse ends with sys.exit
go_columns=100, # fix col size for reproducible unittest output
help_to_string=True, # don't print to stdout, but to StingIO fh,
prog='easybuildoptions_test', # generate as if called from generaloption.py
)
outtxt = topt.parser.help_to_file.getvalue()
else:
outtxt = txt
self.assertTrue(re.search(' -h ', outtxt), "Only short options included in short help")
self.assertTrue(re.search("show short help message and exit", outtxt), "Documentation included in short help")
self.assertEqual(re.search("--short-help ", outtxt), None, "Long options not included in short help")
self.assertEqual(re.search("Software search and build options", outtxt), None,
"Not all option groups included in short help (1)")
self.assertEqual(re.search("Regression test options", outtxt), None,
"Not all option groups included in short help (2)")
def test_help_long(self):
"""Test long help message."""
topt = EasyBuildOptions(
go_args=['-H'],
go_nosystemexit=True, # when printing help, optparse ends with sys.exit
go_columns=200, # fix col size for reproducible unittest output
help_to_string=True, # don't print to stdout, but to StingIO fh,
prog='easybuildoptions_test', # generate as if called from generaloption.py
)
outtxt = topt.parser.help_to_file.getvalue()
self.assertTrue(re.search("-H OUTPUT_FORMAT, --help=OUTPUT_FORMAT", outtxt),
"Long documentation expanded in long help")
self.assertTrue(re.search("show short help message and exit", outtxt),
"Documentation included in long help")
self.assertTrue(re.search("Software search and build options", outtxt),
"Not all option groups included in short help (1)")
self.assertTrue(re.search("Regression test options", outtxt),
"Not all option groups included in short help (2)")
# for boolean options, we mention in the help text how to disable them
regex = re.compile("default: True; disable with --disable-cleanup-builddir", re.M)
self.assertTrue(regex.search(outtxt), "Pattern '%s' found in: %s" % (regex.pattern, outtxt))
def test_help_rst(self):
"""Test generating --help in RST output format."""
self.mock_stderr(True)
self.mock_stdout(True)
self.eb_main(['--help=rst'], raise_error=True)
stderr, stdout = self.get_stderr(), self.get_stdout()
self.mock_stderr(False)
self.mock_stdout(False)
self.assertFalse(stderr)
patterns = [
r"^Basic options\n-------------",
r"^``--fetch``[ ]*Allow downloading sources",
]
for pattern in patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(stdout), "Pattern '%s' should be found in: %s" % (regex.pattern, stdout))
def test_no_args(self):
"""Test using no arguments."""
outtxt = self.eb_main([])
error_msg = "ERROR.* Please provide one or multiple easyconfig files,"
error_msg += " or use software build options to make EasyBuild search for easyconfigs"
regex = re.compile(error_msg)
self.assertTrue(regex.search(outtxt), "Pattern '%s' found in: %s" % (regex.pattern, outtxt))
def test_debug(self):
"""Test enabling debug logging."""
error_tmpl = "%s log messages are included when using %s: %s"
for debug_arg in ['-d', '--debug']:
args = [
'nosuchfile.eb',
debug_arg,
]
outtxt = self.eb_main(args)
for log_msg_type in ['DEBUG', 'INFO', 'ERROR']:
res = re.search(' %s ' % log_msg_type, outtxt)
self.assertTrue(res, error_tmpl % (log_msg_type, debug_arg, outtxt))
def test_info(self):
"""Test enabling info logging."""
for info_arg in ['--info']:
args = [
'nosuchfile.eb',
info_arg,
]
outtxt = self.eb_main(args)
error_tmpl = "%s log messages are included when using %s ( out: %s)"
for log_msg_type in ['INFO', 'ERROR']:
res = re.search(' %s ' % log_msg_type, outtxt)
self.assertTrue(res, error_tmpl % (log_msg_type, info_arg, outtxt))
for log_msg_type in ['DEBUG']:
res = re.search(' %s ' % log_msg_type, outtxt)
self.assertTrue(not res, "%s log messages are *not* included when using %s" % (log_msg_type, info_arg))
def test_quiet(self):
"""Test enabling quiet logging (errors only)."""
for quiet_arg in ['--quiet']:
args = ['nosuchfile.eb', quiet_arg]
out = self.eb_main(args)
for log_msg_type in ['ERROR']:
res = re.search(' %s ' % log_msg_type, out)
msg = "%s log messages are included when using %s (out: %s)" % (log_msg_type, quiet_arg, out)
self.assertTrue(res, msg)
for log_msg_type in ['DEBUG', 'INFO']:
res = re.search(' %s ' % log_msg_type, out)
msg = "%s log messages are *not* included when using %s (out: %s)" % (log_msg_type, quiet_arg, out)
self.assertTrue(not res, msg)
def test_force(self):
"""Test forcing installation even if the module is already available."""
# use GCC-4.6.3.eb easyconfig file that comes with the tests
eb_file = os.path.join(os.path.dirname(__file__), 'easyconfigs', 'test_ecs', 'g', 'GCC', 'GCC-4.6.3.eb')
# check log message without --force
args = [
eb_file,
'--debug',
]
outtxt, error_thrown = self.eb_main(args, return_error=True)
error_msg = "No error is thrown if software is already installed (error_thrown: %s)" % error_thrown
self.assertTrue(not error_thrown, error_msg)
already_msg = "GCC/4.6.3 is already installed"
error_msg = "Already installed message without --force, outtxt: %s" % outtxt
self.assertTrue(re.search(already_msg, outtxt), error_msg)
# clear log file
write_file(self.logfile, '')
# check that --force and --rebuild work
for arg in ['--force', '--rebuild']:
outtxt = self.eb_main([eb_file, '--debug', arg])
self.assertTrue(not re.search(already_msg, outtxt), "Already installed message not there with %s" % arg)
def test_skip(self):
"""Test skipping installation of module (--skip, -k)."""
# use toy-0.0.eb easyconfig file that comes with the tests
topdir = os.path.abspath(os.path.dirname(__file__))
toy_ec = os.path.join(topdir, 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb')
# check log message with --skip for existing module
args = [
toy_ec,
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--force',
'--debug',
]
self.eb_main(args, do_build=True)
args.append('--skip')
self.mock_stdout(True)
outtxt = self.eb_main(args, do_build=True, verbose=True)
self.mock_stdout(False)
found_msg = "Module toy/0.0 found.\n[^\n]+Going to skip actual main build"
found = re.search(found_msg, outtxt, re.M)
self.assertTrue(found, "Module found message present with --skip, outtxt: %s" % outtxt)
# cleanup for next test
write_file(self.logfile, '')
os.chdir(self.cwd)
# check log message with --skip for non-existing module
args = [
toy_ec,
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--try-software-version=1.2.3.4.5.6.7.8.9',
'--try-amend=sources=toy-0.0.tar.gz,toy-0.0.tar.gz', # hackish, but fine
'--force',
'--debug',
'--skip',
]
outtxt = self.eb_main(args, do_build=True, verbose=True)
found_msg = "Module toy/1.2.3.4.5.6.7.8.9 found."
found = re.search(found_msg, outtxt)
self.assertTrue(not found, "Module found message not there with --skip for non-existing modules: %s" % outtxt)
not_found_msg = "No module toy/1.2.3.4.5.6.7.8.9 found. Not skipping anything."
not_found = re.search(not_found_msg, outtxt)
self.assertTrue(not_found, "Module not found message there with --skip for non-existing modules: %s" % outtxt)
toy_mod_glob = os.path.join(self.test_installpath, 'modules', 'all', 'toy', '*')
for toy_mod in glob.glob(toy_mod_glob):
remove_file(toy_mod)
self.assertFalse(glob.glob(toy_mod_glob))
# make sure that sanity check is *NOT* skipped under --skip
test_ec = os.path.join(self.test_prefix, 'test.eb')
test_ec_txt = read_file(toy_ec)
regex = re.compile(r"sanity_check_paths = \{(.|\n)*\}", re.M)
test_ec_txt = regex.sub("sanity_check_paths = {'files': ['bin/nosuchfile'], 'dirs': []}", test_ec_txt)
write_file(test_ec, test_ec_txt)
args = [
test_ec,
'--skip',
'--force',
]
error_pattern = "Sanity check failed: no file found at 'bin/nosuchfile'"
self.assertErrorRegex(EasyBuildError, error_pattern, self.eb_main, args, do_build=True, raise_error=True)
# check use of skipsteps to skip sanity check
test_ec_txt += "\nskipsteps = ['sanitycheck']\n"
write_file(test_ec, test_ec_txt)
self.eb_main(args, do_build=True, raise_error=True)
self.assertEqual(len(glob.glob(toy_mod_glob)), 1)
def test_skip_test_step(self):
"""Test skipping testing the build (--skip-test-step)."""
topdir = os.path.abspath(os.path.dirname(__file__))
toy_ec = os.path.join(topdir, 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0-test.eb')
# check log message without --skip-test-step
args = [
toy_ec,
'--extended-dry-run',
'--force',
'--debug',
]
self.mock_stdout(True)
outtxt = self.eb_main(args, do_build=True)
self.mock_stdout(False)
found_msg = "Running method test_step part of step test"
found = re.search(found_msg, outtxt)
test_run_msg = "execute make_test dummy_cmd as a command for running unit tests"
self.assertTrue(found, "Message about test step being run is present, outtxt: %s" % outtxt)
found = re.search(test_run_msg, outtxt)
self.assertTrue(found, "Test execution command is present, outtxt: %s" % outtxt)
# And now with the argument
args.append('--skip-test-step')
self.mock_stdout(True)
outtxt = self.eb_main(args, do_build=True)
self.mock_stdout(False)
found_msg = "Skipping test step"
found = re.search(found_msg, outtxt)
self.assertTrue(found, "Message about test step being skipped is present, outtxt: %s" % outtxt)
found = re.search(test_run_msg, outtxt)
self.assertFalse(found, "Test execution command is NOT present, outtxt: %s" % outtxt)
def test_ignore_test_failure(self):
"""Test ignore failing tests (--ignore-test-failure)."""
topdir = os.path.abspath(os.path.dirname(__file__))
# This EC uses a `runtest` command which does not exist and hence will make the test step fail
toy_ec = os.path.join(topdir, 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0-test.eb')
args = [toy_ec, '--ignore-test-failure', '--force']
with self.mocked_stdout_stderr() as (_, stderr):
outtxt = self.eb_main(args, do_build=True)
msg = 'Test failure ignored'
self.assertTrue(re.search(msg, outtxt),
"Ignored test failure message in log should be found, outtxt: %s" % outtxt)
self.assertTrue(re.search(msg, stderr.getvalue()),
"Ignored test failure message in stderr should be found, stderr: %s" % stderr.getvalue())
# Passing skip and ignore options is disallowed
args.append('--skip-test-step')
error_pattern = 'Found both ignore-test-failure and skip-test-step enabled'
self.assertErrorRegex(EasyBuildError, error_pattern, self.eb_main, args, do_build=True, raise_error=True)
def test_job(self):
"""Test submitting build as a job."""
# use gzip-1.4.eb easyconfig file that comes with the tests
eb_file = os.path.join(os.path.dirname(__file__), 'easyconfigs', 'test_ecs', 'g', 'gzip', 'gzip-1.4.eb')
def check_args(job_args, passed_args=None):
"""Check whether specified args yield expected result."""
if passed_args is None:
passed_args = job_args[:]
# clear log file
write_file(self.logfile, '')
args = [
eb_file,
'--job',
] + job_args
outtxt = self.eb_main(args)
job_msg = r"INFO.* Command template for jobs: .* && eb %%\(spec\)s.* %s.*\n" % ' .*'.join(passed_args)
assertmsg = "Info log msg with job command template for --job (job_msg: %s, outtxt: %s)" % (job_msg, outtxt)
self.assertTrue(re.search(job_msg, outtxt), assertmsg)
# options passed are reordered, so order here matters to make tests pass
check_args(['--debug'])
check_args(['--debug', '--stop=configure', '--try-software-name=foo'],
passed_args=['--debug', "--stop='configure'"])
check_args(['--debug', '--robot-paths=/tmp/foo:/tmp/bar'],
passed_args=['--debug', "--robot-paths='/tmp/foo:/tmp/bar'"])
# --robot has preference over --robot-paths, --robot is not passed down
check_args(['--debug', '--robot-paths=/tmp/foo', '--robot=%s' % self.test_prefix],
passed_args=['--debug', "--robot-paths='%s:/tmp/foo'" % self.test_prefix])
# 'zzz' prefix in the test name is intentional to make this test run last,
# since it fiddles with the logging infrastructure which may break things
def test_zzz_logtostdout(self):
"""Testing redirecting log to stdout."""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
for stdout_arg in ['--logtostdout', '-l']:
args = [
'--software-name=somethingrandom',
'--robot', '.',
'--debug',
stdout_arg,
]
self.mock_stdout(True)
self.eb_main(args, logfile=dummylogfn)
stdout = self.get_stdout()
self.mock_stdout(False)
# make sure we restore
fancylogger.logToScreen(enable=False, stdout=True)
error_msg = "Log messages are printed to stdout when %s is used (stdout: %s)" % (stdout_arg, stdout)
self.assertTrue(len(stdout) > 100, error_msg)
topdir = os.path.dirname(os.path.abspath(__file__))
toy_ecfile = os.path.join(topdir, 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb')
self.logfile = None
self.mock_stdout(True)
self.eb_main([toy_ecfile, '--debug', '-l', '--force'], do_build=True, raise_error=True)
stdout = self.get_stdout()
self.mock_stdout(False)
self.assertTrue("Auto-enabling streaming output" in stdout)
self.assertTrue("== (streaming) output for command 'gcc toy.c -o toy':" in stdout)
if os.path.exists(dummylogfn):
os.remove(dummylogfn)
def test_avail_easyconfig_constants(self):
"""Test listing available easyconfig file constants."""
def run_test(fmt=None):
"""Helper function to test --avail-easyconfig-constants."""
args = ['--avail-easyconfig-constants']
if fmt is not None:
args.append('--output-format=%s' % fmt)
self.mock_stderr(True)
self.mock_stdout(True)
self.eb_main(args, verbose=True, raise_error=True)
stderr, stdout = self.get_stderr(), self.get_stdout()
self.mock_stderr(False)
self.mock_stdout(False)
self.assertFalse(stderr)
if fmt == 'rst':
pattern_lines = [
r'^``ARCH``\s*``(aarch64|ppc64le|x86_64)``\s*CPU architecture .*',
r'^``EXTERNAL_MODULE``.*',
r'^``HOME``.*',
r'``OS_NAME``.*',
r'``OS_PKG_IBVERBS_DEV``.*',
]
else:
pattern_lines = [
r'^\s*ARCH: (aarch64|ppc64le|x86_64) \(CPU architecture .*\)',
r'^\s*EXTERNAL_MODULE:.*',
r'^\s*HOME:.*',
r'\s*OS_NAME: .*',
r'\s*OS_PKG_IBVERBS_DEV: .*',
]
regex = re.compile('\n'.join(pattern_lines), re.M)
self.assertTrue(regex.search(stdout), "Pattern '%s' should match in: %s" % (regex.pattern, stdout))
for fmt in [None, 'txt', 'rst']:
run_test(fmt=fmt)
def test_avail_easyconfig_templates(self):
"""Test listing available easyconfig file templates."""
def run_test(fmt=None):
"""Helper function to test --avail-easyconfig-templates."""
args = ['--avail-easyconfig-templates']
if fmt is not None:
args.append('--output-format=%s' % fmt)
self.mock_stderr(True)
self.mock_stdout(True)
self.eb_main(args, verbose=True, raise_error=True)
stderr, stdout = self.get_stderr(), self.get_stdout()
self.mock_stderr(False)
self.mock_stdout(False)
self.assertFalse(stderr)
if fmt == 'rst':
pattern_lines = [
r'^``%\(version_major\)s``\s+Major version\s*$',
r'^``%\(cudaver\)s``\s+full version for CUDA\s*$',
r'^``%\(pyshortver\)s``\s+short version for Python \(<major>.<minor>\)\s*$',
r'^\* ``%\(name\)s``$',
r'^``%\(namelower\)s``\s+lower case of value of name\s*$',
r'^``%\(arch\)s``\s+System architecture \(e.g. x86_64, aarch64, ppc64le, ...\)\s*$',
r'^``%\(cuda_cc_space_sep\)s``\s+Space-separated list of CUDA compute capabilities\s*$',
r'^``SOURCE_TAR_GZ``\s+Source \.tar\.gz bundle\s+``%\(name\)s-%\(version\)s.tar.gz``\s*$',
]
else:
pattern_lines = [
r'^\s+%\(version_major\)s: Major version$',
r'^\s+%\(cudaver\)s: full version for CUDA$',
r'^\s+%\(pyshortver\)s: short version for Python \(<major>.<minor>\)$',
r'^\s+%\(name\)s$',
r'^\s+%\(namelower\)s: lower case of value of name$',
r'^\s+%\(arch\)s: System architecture \(e.g. x86_64, aarch64, ppc64le, ...\)$',
r'^\s+%\(cuda_cc_space_sep\)s: Space-separated list of CUDA compute capabilities$',
r'^\s+SOURCE_TAR_GZ: Source \.tar\.gz bundle \(%\(name\)s-%\(version\)s.tar.gz\)$',
]
for pattern_line in pattern_lines:
regex = re.compile(pattern_line, re.M)
self.assertTrue(regex.search(stdout), "Pattern '%s' should match in: %s" % (regex.pattern, stdout))
for fmt in [None, 'txt', 'rst']:
run_test(fmt=fmt)
def test_avail_easyconfig_params(self):
"""Test listing available easyconfig parameters."""
def run_test(custom=None, extra_params=[], fmt=None):
"""Inner function to run actual test in current setting."""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
avail_args = [
'-a',
'--avail-easyconfig-params',
]
for avail_arg in avail_args:
# clear log
write_file(self.logfile, '')
args = [
'--unittest-file=%s' % self.logfile,
avail_arg,
]
if fmt is not None:
args.append('--output-format=%s' % fmt)
if custom is not None:
args.extend(['-e', custom])
self.eb_main(args, logfile=dummylogfn, verbose=True, raise_error=True)
logtxt = read_file(self.logfile)
# check whether all parameter types are listed
par_types = [BUILD, DEPENDENCIES, EXTENSIONS, FILEMANAGEMENT,
LICENSE, MANDATORY, MODULES, OTHER, TOOLCHAIN]
if custom is not None:
par_types.append(CUSTOM)
for param_type in [x[1] for x in par_types]:
# regex for parameter group title, matches both txt and rst formats
regex = re.compile("%s.*\n%s" % (param_type, '-' * len(param_type)), re.I)
tup = (param_type, avail_arg, args, logtxt)
msg = "Parameter type %s is featured in output of eb %s (args: %s): %s" % tup
self.assertTrue(regex.search(logtxt), msg)
ordered_params = ['name', 'toolchain', 'version', 'versionsuffix']
params = ordered_params + ['buildopts', 'sources', 'start_dir', 'dependencies', 'group',
'exts_list', 'moduleclass', 'buildstats'] + extra_params
# check a couple of easyconfig parameters
param_start = 0
for param in params:
# regex for parameter name (with optional '*') & description, matches both txt and rst formats
regex = re.compile(r"^[`]*%s(?:\*)?[`]*\s+\w+" % param, re.M)
tup = (param, avail_arg, args, regex.pattern, logtxt)
msg = "Parameter %s is listed with help in output of eb %s (args: %s, regex: %s): %s" % tup
res = regex.search(logtxt)
self.assertTrue(res, msg)
if param in ordered_params:
# check whether this parameter is listed after previous one
self.assertTrue(param_start < res.start(0), "%s is in expected order in: %s" % (param, logtxt))
param_start = res.start(0)
if os.path.exists(dummylogfn):
os.remove(dummylogfn)
for fmt in [None, 'txt', 'rst']:
run_test(fmt=fmt)
run_test(custom='EB_foo', extra_params=['foo_extra1', 'foo_extra2'], fmt=fmt)
run_test(custom='bar', extra_params=['bar_extra1', 'bar_extra2'], fmt=fmt)
run_test(custom='EB_foofoo', extra_params=['foofoo_extra1', 'foofoo_extra2'], fmt=fmt)
# double underscore to make sure it runs first, which is required to detect certain types of bugs,
# e.g. running with non-initialized EasyBuild config (truly mimicing 'eb --list-toolchains')
def test__list_toolchains(self):
"""Test listing known compiler toolchains."""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
args = [
'--list-toolchains',
'--unittest-file=%s' % self.logfile,
]
self.eb_main(args, logfile=dummylogfn, raise_error=True)
info_msg = r"INFO List of known toolchains \(toolchainname: module\[,module\.\.\.\]\):"
logtxt = read_file(self.logfile)
self.assertTrue(re.search(info_msg, logtxt), "Info message with list of known toolchains found in: %s" % logtxt)
# toolchain elements should be in alphabetical order
tcs = {
'system': [],
'goalf': ['ATLAS', 'BLACS', 'FFTW', 'GCC', 'OpenMPI', 'ScaLAPACK'],
'intel': ['icc', 'ifort', 'imkl', 'impi'],
}
for tc, tcelems in tcs.items():
res = re.findall(r"^\s*%s: .*" % tc, logtxt, re.M)
self.assertTrue(res, "Toolchain %s is included in list of known compiler toolchains" % tc)
# every toolchain should only be mentioned once
n = len(res)
self.assertEqual(n, 1, "Toolchain %s is only mentioned once (count: %d)" % (tc, n))
# make sure definition is correct (each element only named once, in alphabetical order)
self.assertEqual("\t%s: %s" % (tc, ', '.join(tcelems)), res[0])
if os.path.exists(dummylogfn):
os.remove(dummylogfn)
def test_list_toolchains_rst(self):
"""Test --list-toolchains --output-format=rst."""
args = [
'--list-toolchains',
'--output-format=rst',
]
self.mock_stderr(True)
self.mock_stdout(True)
self.eb_main(args, raise_error=True)
stderr, stdout = self.get_stderr(), self.get_stdout().strip()
self.mock_stderr(False)
self.mock_stdout(False)
self.assertFalse(stderr)
title = "List of known toolchains"
# separator line: starts/ends with sequence of '=', 4 spaces in between columns
sep_line = r'=(=+\s{4})+[=]+='
col_names = ['Name', r'Compiler\(s\)', 'MPI', 'Linear algebra', 'FFT']
col_names_line = r'\s+'.join(col_names) + r'\s*'
patterns = [
# title
'^' + title + '\n' + '-' * len(title) + '\n',
# header
'\n' + '\n'.join([sep_line, col_names_line, sep_line]) + '\n',
# compiler-only GCC toolchain
r"\n\*\*GCC\*\*\s+GCC\s+\*\(none\)\*\s+\*\(none\)\*\s+\*\(none\)\*\s*\n",
# gompi compiler + MPI toolchain
r"\n\*\*gompi\*\*\s+GCC\s+OpenMPI\s+\*\(none\)\*\s+\*\(none\)\*\s*\n",
# full 'foss' toolchain
r"\*\*foss\*\*\s+GCC\s+OpenMPI\s+OpenBLAS,\s+ScaLAPACK\s+FFTW\s*\n",
# compiler-only iccifort toolchain
r"\*\*iccifort\*\*\s+icc,\s+ifort\s+\*\(none\)\*\s+\*\(none\)\*\s+\*\(none\)\*\s*\n",
# full 'intel' toolchain (imkl appears twice, in linalg + FFT columns)
r"\*\*intel\*\*\s+icc,\s+ifort\s+impi\s+imkl\s+imkl\s*\n",
# fosscuda toolchain, also lists CUDA in compilers column
r"\*\*fosscuda\*\*\s+GCC,\s+CUDA\s+OpenMPI\s+OpenBLAS,\s+ScaLAPACK\s+FFTW\s*\n",
# system toolchain: 'none' in every column
r"\*\*system\*\*\s+\*\(none\)\*\s+\*\(none\)\*\s+\*\(none\)\*\s+\*\(none\)\*\s*\n",
# Cray special case
r"\n\*\*CrayGNU\*\*\s+PrgEnv-gnu\s+cray-mpich\s+cray-libsci\s+\*\(none\)\*\s*\n",
# footer
'\n' + sep_line + '$',
]
for pattern in patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(stdout), "Pattern '%s' should be found in: %s" % (regex.pattern, stdout))
def test_avail_lists(self):
"""Test listing available values of certain types."""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
name_items = {
'modules-tools': ['EnvironmentModulesC', 'Lmod'],
'module-naming-schemes': ['EasyBuildMNS', 'HierarchicalMNS', 'CategorizedHMNS'],
}
for (name, items) in name_items.items():
args = [
'--avail-%s' % name,
'--unittest-file=%s' % self.logfile,
]
self.eb_main(args, logfile=dummylogfn)
logtxt = read_file(self.logfile)
words = name.replace('-', ' ')
info_msg = r"INFO List of supported %s:" % words
self.assertTrue(re.search(info_msg, logtxt), "Info message with list of available %s" % words)
for item in items:
res = re.findall(r"^\s*%s" % item, logtxt, re.M)
self.assertTrue(res, "%s is included in list of available %s" % (item, words))
# every item should only be mentioned once
n = len(res)
self.assertEqual(n, 1, "%s is only mentioned once (count: %d)" % (item, n))
if os.path.exists(dummylogfn):
os.remove(dummylogfn)
def test_avail_cfgfile_constants(self):
"""Test --avail-cfgfile-constants."""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
# copy test easyconfigs to easybuild/easyconfigs subdirectory of temp directory
# to check whether easyconfigs install path is auto-included in robot path
tmpdir = tempfile.mkdtemp(prefix='easybuild-easyconfigs-pkg-install-path')
mkdir(os.path.join(tmpdir, 'easybuild'), parents=True)
test_ecs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs')
copy_dir(test_ecs_dir, os.path.join(tmpdir, 'easybuild', 'easyconfigs'))
orig_sys_path = sys.path[:]
sys.path.insert(0, tmpdir) # prepend to give it preference over possible other installed easyconfigs pkgs
args = [
'--avail-cfgfile-constants',
'--unittest-file=%s' % self.logfile,
]
self.eb_main(args, logfile=dummylogfn)
logtxt = read_file(self.logfile)
cfgfile_constants = {
'DEFAULT_ROBOT_PATHS': os.path.join(tmpdir, 'easybuild', 'easyconfigs'),
}
for cst_name, cst_value in cfgfile_constants.items():
cst_regex = re.compile(r"^\*\s%s:\s.*\s\[value: .*%s.*\]" % (cst_name, cst_value), re.M)
tup = (cst_regex.pattern, logtxt)
self.assertTrue(cst_regex.search(logtxt), "Pattern '%s' in --avail-cfgfile_constants output: %s" % tup)
if os.path.exists(dummylogfn):
os.remove(dummylogfn)
sys.path[:] = orig_sys_path
# use test_000_* to ensure this test is run *first*,
# before any tests that pick up additional easyblocks (which are difficult to clean up)
def test_000_list_easyblocks(self):
"""Test listing easyblock hierarchy."""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
# simple view
for list_arg in ['--list-easyblocks', '--list-easyblocks=simple']:
# clear log
write_file(self.logfile, '')
args = [
list_arg,
'--unittest-file=%s' % self.logfile,
]
self.eb_main(args, logfile=dummylogfn, raise_error=True)
logtxt = read_file(self.logfile)
expected = '\n'.join([
r'EasyBlock',
r'\|-- bar',
r'\|-- ConfigureMake',
r'\| \|-- MakeCp',
r'\|-- EB_EasyBuildMeta',
r'\|-- EB_FFTW',
r'\|-- EB_foo',
r'\| \|-- EB_foofoo',
r'\|-- EB_GCC',
r'\|-- EB_HPL',
r'\|-- EB_libtoy',
r'\|-- EB_OpenBLAS',
r'\|-- EB_OpenMPI',
r'\|-- EB_ScaLAPACK',
r'\|-- EB_toy_buggy',
r'\|-- ExtensionEasyBlock',
r'\| \|-- DummyExtension',
r'\| \|-- EB_toy',
r'\| \| \|-- EB_toy_eula',
r'\| \| \|-- EB_toytoy',
r'\| \|-- Toy_Extension',
r'\|-- ModuleRC',
r'\|-- PythonBundle',
r'\|-- Toolchain',
r'Extension',
r'\|-- ExtensionEasyBlock',
r'\| \|-- DummyExtension',
r'\| \|-- EB_toy',
r'\| \| \|-- EB_toy_eula',
r'\| \| \|-- EB_toytoy',
r'\| \|-- Toy_Extension',
])
regex = re.compile(expected, re.M)
self.assertTrue(regex.search(logtxt), "Pattern '%s' found in: %s" % (regex.pattern, logtxt))
# clear log
write_file(self.logfile, '')
# detailed view
args = [
'--list-easyblocks=detailed',
'--unittest-file=%s' % self.logfile,
]
self.eb_main(args, logfile=dummylogfn)
logtxt = read_file(self.logfile)
patterns = [
r"EasyBlock\s+\(easybuild.framework.easyblock\)\n",
r"\|--\s+EB_foo\s+\(easybuild.easyblocks.foo @ .*/sandbox/easybuild/easyblocks/f/foo.py\)\n" +
r"\|\s+\|--\s+EB_foofoo\s+\(easybuild.easyblocks.foofoo @ .*/sandbox/easybuild/easyblocks/f/foofoo.py\)\n",
r"\|--\s+bar\s+\(easybuild.easyblocks.generic.bar @ .*/sandbox/easybuild/easyblocks/generic/bar.py\)\n",
]
for pat in patterns:
msg = "Pattern '%s' is found in output of --list-easyblocks: %s" % (pat, logtxt)
self.assertTrue(re.search(pat, logtxt), msg)
if os.path.exists(dummylogfn):
os.remove(dummylogfn)
def test_search(self):
"""Test searching for easyconfigs."""
test_easyconfigs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs')
# simple search
args = [
'--search=gzip',
'--robot=%s' % test_easyconfigs_dir,
]
self.mock_stdout(True)
self.eb_main(args, testing=False)
txt = self.get_stdout()
self.mock_stdout(False)
for ec in ["gzip-1.4.eb", "gzip-1.4-GCC-4.6.3.eb"]:
regex = re.compile(r" \* \S*%s$" % ec, re.M)
self.assertTrue(regex.search(txt), "Found pattern '%s' in: %s" % (regex.pattern, txt))
# search w/ regex
args = [
'--search=^gcc.*2.eb',
'--robot=%s' % test_easyconfigs_dir,
]
self.mock_stdout(True)
self.eb_main(args, testing=False)
txt = self.get_stdout()
self.mock_stdout(False)
for ec in ['GCC-4.8.2.eb', 'GCC-4.9.2.eb']:
regex = re.compile(r" \* \S*%s$" % ec, re.M)
self.assertTrue(regex.search(txt), "Found pattern '%s' in: %s" % (regex.pattern, txt))
gcc_ecs = [
'GCC-4.6.3.eb',
'GCC-4.6.4.eb',
'GCC-4.8.2.eb',
'GCC-4.8.3.eb',
'GCC-4.9.2.eb',
'GCC-6.4.0-2.28.eb',
]
# test --search-filename
args = [
'--search-filename=^gcc',
'--robot=%s' % test_easyconfigs_dir,
]
self.mock_stdout(True)
self.eb_main(args, testing=False)
txt = self.get_stdout()
self.mock_stdout(False)
for ec in gcc_ecs:
regex = re.compile(r"^ \* %s$" % ec, re.M)
self.assertTrue(regex.search(txt), "Found pattern '%s' in: %s" % (regex.pattern, txt))
# test --search-filename --terse
args = [
'--search-filename=^gcc',
'--terse',
'--robot=%s' % test_easyconfigs_dir,
]
self.mock_stdout(True)
self.eb_main(args, testing=False)
txt = self.get_stdout()
self.mock_stdout(False)
for ec in gcc_ecs:
regex = re.compile(r"^%s$" % ec, re.M)
self.assertTrue(regex.search(txt), "Found pattern '%s' in: %s" % (regex.pattern, txt))
# also test --search-short/-S
for search_arg in ['-S', '--search-short']:
args = [
search_arg,
'^toy-0.0',
'-r',
test_easyconfigs_dir,
]
self.mock_stdout(True)
self.eb_main(args, raise_error=True, verbose=True, testing=False)
txt = self.get_stdout()
self.mock_stdout(False)
self.assertTrue(re.search(r'^CFGS\d+=', txt, re.M), "CFGS line message found in '%s'" % txt)
for ec in ["toy-0.0.eb", "toy-0.0-multiple.eb"]:
regex = re.compile(r" \* \$CFGS\d+/*%s" % ec, re.M)
self.assertTrue(regex.search(txt), "Found pattern '%s' in: %s" % (regex.pattern, txt))
# combining --search with --try-* should not cause trouble; --try-* should just be ignored
args = [
'--search=^gcc',
'--robot-paths=%s' % test_easyconfigs_dir,
'--try-toolchain-version=1.2.3',
]
self.mock_stdout(True)
self.eb_main(args, testing=False, raise_error=True)
txt = self.get_stdout()
self.mock_stdout(False)
self.assertTrue(re.search('GCC-4.9.2', txt))
# test using a search pattern that includes special characters like '+', '(', or ')' (should not crash)
# cfr. https://github.com/easybuilders/easybuild-framework/issues/2966
# characters like ^, . or * are not touched, since these can be used as regex characters in queries
for opt in ['--search', '-S', '--search-short']:
for pattern in ['netCDF-C++', 'foo|bar', '^foo', 'foo.*bar']:
args = [opt, pattern, '--robot', test_easyconfigs_dir]
self.mock_stdout(True)
self.eb_main(args, raise_error=True, verbose=True, testing=False)
stdout = self.get_stdout()
self.mock_stdout(False)
# there shouldn't be any hits for any of these queries, so empty output...
self.assertEqual(stdout.strip(), '')
# some search patterns are simply invalid,
# if they include allowed special characters like '*' but are used incorrectly...
# a proper error is produced in that case (as opposed to a crash)
for opt in ['--search', '-S', '--search-short']:
for pattern in ['*foo', '(foo', ')foo', 'foo)', 'foo(']:
args = [opt, pattern, '--robot', test_easyconfigs_dir]
self.assertErrorRegex(EasyBuildError, "Invalid search query", self.eb_main, args, raise_error=True)
def test_ignore_index(self):
"""
Test use of --ignore-index.
"""
test_ecs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs')
toy_ec = os.path.join(test_ecs_dir, 'test_ecs', 't', 'toy', 'toy-0.0.eb')
copy_file(toy_ec, self.test_prefix)
toy_ec_list = ['toy-0.0.eb', 'toy-1.2.3.eb', 'toy-4.5.6.eb', 'toy-11.5.6.eb']
# install index that list more files than are actually available,
# so we can check whether it's used
index_txt = '\n'.join(toy_ec_list)
write_file(os.path.join(self.test_prefix, '.eb-path-index'), index_txt)
args = [
'--search=toy',
'--robot-paths=%s' % self.test_prefix,
'--terse',
]
self.mock_stdout(True)
self.eb_main(args, testing=False, raise_error=True)
stdout = self.get_stdout()
self.mock_stdout(False)
# Also checks for ordering: 11.x comes last!
expected_output = '\n'.join(os.path.join(self.test_prefix, ec) for ec in toy_ec_list) + '\n'
self.assertEqual(stdout, expected_output)
args.append('--ignore-index')
self.mock_stdout(True)
self.eb_main(args, testing=False, raise_error=True)
stdout = self.get_stdout()
self.mock_stdout(False)
# This should be the only EC found
self.assertEqual(stdout, os.path.join(self.test_prefix, 'toy-0.0.eb') + '\n')
def test_search_archived(self):
"Test searching for archived easyconfigs"
args = ['--search-filename=^intel']
self.mock_stdout(True)
self.eb_main(args, testing=False)
txt = self.get_stdout().rstrip()
self.mock_stdout(False)
expected = '\n'.join([
' * intel-compilers-2021.2.0.eb',
' * intel-2018a.eb',
'',
"Note: 1 matching archived easyconfig(s) found, use --consider-archived-easyconfigs to see them",
])
self.assertEqual(txt, expected)
args.append('--consider-archived-easyconfigs')
self.mock_stdout(True)
self.eb_main(args, testing=False)
txt = self.get_stdout().rstrip()
self.mock_stdout(False)
expected = '\n'.join([
' * intel-compilers-2021.2.0.eb',
' * intel-2018a.eb',
'',
"Matching archived easyconfigs:",
'',
' * intel-2012a.eb',
])
self.assertEqual(txt, expected)
def test_show_ec(self):
"""Test 'eb --show-ec'."""
args = [
'--show-ec',
'toy-0.0.eb',
'gzip-1.6-GCC-4.9.2.eb',
]
self.mock_stderr(True)
self.mock_stdout(True)
self.eb_main(args)
stderr, stdout = self.get_stderr(), self.get_stdout()
self.mock_stderr(False)
self.mock_stdout(False)
self.assertFalse(stderr)
patterns = [
r"^== Contents of .*/test/framework/easyconfigs/test_ecs/t/toy/toy-0.0.eb:",
r"^name = 'toy'",
r"^toolchain = SYSTEM",
r"^sanity_check_paths = {\n 'files': \[\('bin/yot', 'bin/toy'\)\],",
r"^== Contents of .*/test/framework/easyconfigs/test_ecs/g/gzip/gzip-1.6-GCC-4.9.2.eb:",
r"^easyblock = 'ConfigureMake'\n\nname = 'gzip'",
r"^toolchain = {'name': 'GCC', 'version': '4.9.2'}",
]
for pattern in patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(stdout), "Pattern '%s' found in: %s" % (regex.pattern, stdout))
def mocked_main(self, args, **kwargs):
"""Run eb_main with mocked stdout/stderr."""
if not kwargs:
kwargs = {'raise_error': True}
self.mock_stderr(True)
self.mock_stdout(True)
self.eb_main(args, **kwargs)
stderr, stdout = self.get_stderr(), self.get_stdout()
self.mock_stderr(False)
self.mock_stdout(False)
self.assertEqual(stderr, '')
return stdout.strip()
def test_copy_ec(self):
"""Test --copy-ec."""
topdir = os.path.dirname(os.path.abspath(__file__))
test_easyconfigs_dir = os.path.join(topdir, 'easyconfigs', 'test_ecs')
toy_ec_txt = read_file(os.path.join(test_easyconfigs_dir, 't', 'toy', 'toy-0.0.eb'))
bzip2_ec_txt = read_file(os.path.join(test_easyconfigs_dir, 'b', 'bzip2', 'bzip2-1.0.6-GCC-4.9.2.eb'))
# basic test: copying one easyconfig file to a non-existing absolute path
test_ec = os.path.join(self.test_prefix, 'test.eb')
args = ['--copy-ec', 'toy-0.0.eb', test_ec]
stdout = self.mocked_main(args)
regex = re.compile(r'.*/toy-0.0.eb copied to %s' % test_ec)
self.assertTrue(regex.search(stdout), "Pattern '%s' found in: %s" % (regex.pattern, stdout))
self.assertTrue(os.path.exists(test_ec))
self.assertEqual(toy_ec_txt, read_file(test_ec))
remove_file(test_ec)
# basic test: copying one easyconfig file to a non-existing relative path
cwd = change_dir(self.test_prefix)
target_fn = 'test.eb'
self.assertFalse(os.path.exists(target_fn))
args = ['--copy-ec', 'toy-0.0.eb', target_fn]
stdout = self.mocked_main(args)
regex = re.compile(r'.*/toy-0.0.eb copied to test.eb')
self.assertTrue(regex.search(stdout), "Pattern '%s' found in: %s" % (regex.pattern, stdout))
change_dir(cwd)
self.assertTrue(os.path.exists(test_ec))
self.assertEqual(toy_ec_txt, read_file(test_ec))
# copying one easyconfig into an existing directory
test_target_dir = os.path.join(self.test_prefix, 'test_target_dir')
mkdir(test_target_dir)
args = ['--copy-ec', 'toy-0.0.eb', test_target_dir]
stdout = self.mocked_main(args)
regex = re.compile(r'.*/toy-0.0.eb copied to %s' % test_target_dir)
self.assertTrue(regex.search(stdout), "Pattern '%s' found in: %s" % (regex.pattern, stdout))
copied_toy_ec = os.path.join(test_target_dir, 'toy-0.0.eb')
self.assertTrue(os.path.exists(copied_toy_ec))
self.assertEqual(toy_ec_txt, read_file(copied_toy_ec))
remove_dir(test_target_dir)
def check_copied_files():
"""Helper function to check result of copying multiple easyconfigs."""
self.assertTrue(os.path.exists(test_target_dir))
self.assertEqual(sorted(os.listdir(test_target_dir)), ['bzip2-1.0.6-GCC-4.9.2.eb', 'toy-0.0.eb'])
copied_toy_ec = os.path.join(test_target_dir, 'toy-0.0.eb')
self.assertTrue(os.path.exists(copied_toy_ec))
self.assertEqual(toy_ec_txt, read_file(copied_toy_ec))
copied_bzip2_ec = os.path.join(test_target_dir, 'bzip2-1.0.6-GCC-4.9.2.eb')
self.assertTrue(os.path.exists(copied_bzip2_ec))
self.assertEqual(bzip2_ec_txt, read_file(copied_bzip2_ec))
# copying multiple easyconfig files to a non-existing target directory (which is created automatically)
args = ['--copy-ec', 'toy-0.0.eb', 'bzip2-1.0.6-GCC-4.9.2.eb', test_target_dir]
stdout = self.mocked_main(args)
self.assertEqual(stdout, '2 file(s) copied to %s' % test_target_dir)
check_copied_files()
remove_dir(test_target_dir)
# same but with relative path for target dir
change_dir(self.test_prefix)
args[-1] = os.path.basename(test_target_dir)
self.assertFalse(os.path.exists(args[-1]))
stdout = self.mocked_main(args)
self.assertEqual(stdout, '2 file(s) copied to test_target_dir')
check_copied_files()
# copying multiple easyconfig to an existing target file results in an error
target = os.path.join(self.test_prefix, 'test.eb')
self.assertTrue(os.path.isfile(target))
args = ['--copy-ec', 'toy-0.0.eb', 'bzip2-1.0.6-GCC-4.9.2.eb', target]
error_pattern = ".*/test.eb exists but is not a directory"
self.assertErrorRegex(EasyBuildError, error_pattern, self.eb_main, args, raise_error=True)
# test use of --copy-ec with only one argument: copy to current working directory
test_working_dir = os.path.join(self.test_prefix, 'test_working_dir')
mkdir(test_working_dir)
change_dir(test_working_dir)
self.assertEqual(len(os.listdir(os.getcwd())), 0)
args = ['--copy-ec', 'toy-0.0.eb']
stdout = self.mocked_main(args)
regex = re.compile('.*/toy-0.0.eb copied to .*/%s' % os.path.basename(test_working_dir))
self.assertTrue(regex.match(stdout), "Pattern '%s' found in: %s" % (regex.pattern, stdout))
copied_toy_cwd = os.path.join(test_working_dir, 'toy-0.0.eb')
self.assertTrue(os.path.exists(copied_toy_cwd))
self.assertEqual(read_file(copied_toy_cwd), toy_ec_txt)
# --copy-ec without arguments results in a proper error
args = ['--copy-ec']
error_pattern = "One or more files to copy should be specified!"
self.assertErrorRegex(EasyBuildError, error_pattern, self.eb_main, args, raise_error=True)
def test_github_copy_ec_from_pr(self):
"""Test combination of --copy-ec with --from-pr."""
if self.github_token is None:
print("Skipping test_copy_ec_from_pr, no GitHub token available?")
return
test_working_dir = os.path.join(self.test_prefix, 'test_working_dir')
mkdir(test_working_dir)
test_target_dir = os.path.join(self.test_prefix, 'test_target_dir')
# Make sure the test target directory doesn't exist
remove_dir(test_target_dir)
all_files_pr8007 = [
'Arrow-0.7.1-intel-2017b-Python-3.6.3.eb',
'bat-0.3.3-fix-pyspark.patch',
'bat-0.3.3-intel-2017b-Python-3.6.3.eb',
]
# test use of --copy-ec with --from-pr to the current working directory
cwd = change_dir(test_working_dir)
args = ['--copy-ec', '--from-pr', '8007']
stdout = self.mocked_main(args)
regex = re.compile(r"3 file\(s\) copied to .*/%s" % os.path.basename(test_working_dir))
self.assertTrue(regex.search(stdout), "Pattern '%s' should be found in: %s" % (regex.pattern, stdout))
# check that the files exist
for pr_file in all_files_pr8007:
self.assertTrue(os.path.exists(os.path.join(test_working_dir, pr_file)))
remove_file(os.path.join(test_working_dir, pr_file))
# copying all files touched by PR to a non-existing target directory (which is created automatically)
self.assertFalse(os.path.exists(test_target_dir))
args = ['--copy-ec', '--from-pr', '8007', test_target_dir]
stdout = self.mocked_main(args)
regex = re.compile(r"3 file\(s\) copied to .*/%s" % os.path.basename(test_target_dir))
self.assertTrue(regex.search(stdout), "Pattern '%s' should be found in: %s" % (regex.pattern, stdout))
for pr_file in all_files_pr8007:
self.assertTrue(os.path.exists(os.path.join(test_target_dir, pr_file)))
remove_dir(test_target_dir)
# test where we select a single easyconfig file from a PR
mkdir(test_target_dir)
ec_filename = 'bat-0.3.3-intel-2017b-Python-3.6.3.eb'
args = ['--copy-ec', '--from-pr', '8007', ec_filename, test_target_dir]
stdout = self.mocked_main(args)
regex = re.compile(r"%s copied to .*/%s" % (ec_filename, os.path.basename(test_target_dir)))
self.assertTrue(regex.search(stdout), "Pattern '%s' should be found in: %s" % (regex.pattern, stdout))
self.assertEqual(os.listdir(test_target_dir), [ec_filename])
self.assertTrue("name = 'bat'" in read_file(os.path.join(test_target_dir, ec_filename)))
remove_dir(test_target_dir)
# test copying of a single easyconfig file from a PR to a non-existing path
bat_ec = os.path.join(self.test_prefix, 'bat.eb')
args[-1] = bat_ec
stdout = self.mocked_main(args)
regex = re.compile(r"%s copied to .*/bat.eb" % ec_filename)
self.assertTrue(regex.search(stdout), "Pattern '%s' should be found in: %s" % (regex.pattern, stdout))
self.assertTrue(os.path.exists(bat_ec))
self.assertTrue("name = 'bat'" in read_file(bat_ec))
change_dir(cwd)
remove_dir(test_working_dir)
mkdir(test_working_dir)
change_dir(test_working_dir)
# test copying of a patch file from a PR via --copy-ec to current directory
patch_fn = 'bat-0.3.3-fix-pyspark.patch'
args = ['--copy-ec', '--from-pr', '8007', patch_fn, '.']
stdout = self.mocked_main(args)
self.assertEqual(os.listdir(test_working_dir), [patch_fn])
patch_path = os.path.join(test_working_dir, patch_fn)
self.assertTrue(os.path.exists(patch_path))
self.assertTrue(is_patch_file(patch_path))
remove_file(patch_path)
# test the same thing but where we don't provide a target location
change_dir(test_working_dir)
args = ['--copy-ec', '--from-pr', '8007', ec_filename]
stdout = self.mocked_main(args)
regex = re.compile(r"%s copied to .*/%s" % (ec_filename, os.path.basename(test_working_dir)))
self.assertTrue(regex.search(stdout), "Pattern '%s' should be found in: %s" % (regex.pattern, stdout))
self.assertEqual(os.listdir(test_working_dir), [ec_filename])
self.assertTrue("name = 'bat'" in read_file(os.path.join(test_working_dir, ec_filename)))
# also test copying of patch file to current directory (without specifying target location)
change_dir(test_working_dir)
args = ['--copy-ec', '--from-pr', '8007', patch_fn]
stdout = self.mocked_main(args)
regex = re.compile(r"%s copied to .*/%s" % (patch_fn, os.path.basename(test_working_dir)))
self.assertTrue(regex.search(stdout), "Pattern '%s' should be found in: %s" % (regex.pattern, stdout))
self.assertEqual(sorted(os.listdir(test_working_dir)), sorted([ec_filename, patch_fn]))
self.assertTrue(is_patch_file(os.path.join(test_working_dir, patch_fn)))
change_dir(cwd)
remove_dir(test_working_dir)
# test with only one ec in the PR (final argument is taken as a filename)
test_ec = os.path.join(self.test_prefix, 'test.eb')
args = ['--copy-ec', '--from-pr', '11521', test_ec]
ec_pr11521 = "ExifTool-12.00-GCCcore-9.3.0.eb"
stdout = self.mocked_main(args)
regex = re.compile(r'.*/%s copied to %s' % (ec_pr11521, test_ec))
self.assertTrue(regex.search(stdout), "Pattern '%s' found in: %s" % (regex.pattern, stdout))
self.assertTrue(os.path.exists(test_ec))
self.assertTrue("name = 'ExifTool'" in read_file(test_ec))
remove_file(test_ec)
def test_dry_run(self):
"""Test dry run (long format)."""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
args = [
'gzip-1.4-GCC-4.6.3.eb',
'--dry-run', # implies enabling dependency resolution
'--unittest-file=%s' % self.logfile,
]
self.eb_main(args, logfile=dummylogfn)
logtxt = read_file(self.logfile)
info_msg = r"Dry run: printing build status of easyconfigs and dependencies"
self.assertTrue(re.search(info_msg, logtxt, re.M), "Info message dry running in '%s'" % logtxt)
ecs_mods = [
("gzip-1.4-GCC-4.6.3.eb", "gzip/1.4-GCC-4.6.3", ' '),
("GCC-4.6.3.eb", "GCC/4.6.3", 'x'),
]
for ec, mod, mark in ecs_mods:
regex = re.compile(r" \* \[%s\] \S+%s \(module: %s\)" % (mark, ec, mod), re.M)
self.assertTrue(regex.search(logtxt), "Found match for pattern %s in '%s'" % (regex.pattern, logtxt))
def test_missing(self):
"""Test use of --missing/-M."""
for mns in [None, 'HierarchicalMNS']:
args = ['gzip-1.4-GCC-4.6.3.eb']
if mns == 'HierarchicalMNS':
args.append('--module-naming-scheme=%s' % mns)
expected = '\n'.join([
"4 out of 4 required modules missing:",
'',
"* Core | GCC/4.6.3 (GCC-4.6.3.eb)",
"* Core | intel/2018a (intel-2018a.eb)",
"* Core | toy/.0.0-deps (toy-0.0-deps.eb)",
"* Compiler/GCC/4.6.3 | gzip/1.4 (gzip-1.4-GCC-4.6.3.eb)",
'',
])
else:
expected = '\n'.join([
"1 out of 4 required modules missing:",
'',
"* gzip/1.4-GCC-4.6.3 (gzip-1.4-GCC-4.6.3.eb)",
'',
])
for opt in ['-M', '--missing-modules']:
self.mock_stderr(True)
self.mock_stdout(True)
self.eb_main(args + [opt], testing=False, raise_error=True)
stderr, stdout = self.get_stderr(), self.get_stdout()
self.mock_stderr(False)
self.mock_stdout(False)
self.assertFalse(stderr)
self.assertTrue(expected in stdout, "Pattern '%s' found in: %s" % (expected, stdout))
def test_dry_run_short(self):
"""Test dry run (short format)."""
# unset $EASYBUILD_ROBOT_PATHS that was defined in setUp
del os.environ['EASYBUILD_ROBOT_PATHS']
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
# copy test easyconfigs to easybuild/easyconfigs subdirectory of temp directory
# to check whether easyconfigs install path is auto-included in robot path
tmpdir = tempfile.mkdtemp(prefix='easybuild-easyconfigs-pkg-install-path')
mkdir(os.path.join(tmpdir, 'easybuild'), parents=True)
test_ecs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'test_ecs')
copy_dir(test_ecs_dir, os.path.join(tmpdir, 'easybuild', 'easyconfigs'))
orig_sys_path = sys.path[:]
sys.path.insert(0, tmpdir) # prepend to give it preference over possible other installed easyconfigs pkgs
robot_decoy = os.path.join(self.test_prefix, 'robot_decoy')
mkdir(robot_decoy)
for dry_run_arg in ['-D', '--dry-run-short']:
write_file(self.logfile, '')
args = [
os.path.join(tmpdir, 'easybuild', 'easyconfigs', 'g', 'gzip', 'gzip-1.4-GCC-4.6.3.eb'),
dry_run_arg,
# purposely specifying senseless dir, to test auto-inclusion of easyconfigs pkg path in robot path
'--robot=%s' % robot_decoy,
'--unittest-file=%s' % self.logfile,
]
outtxt = self.eb_main(args, logfile=dummylogfn, raise_error=True)
info_msg = r"Dry run: printing build status of easyconfigs and dependencies"
self.assertTrue(re.search(info_msg, outtxt, re.M), "Info message dry running in '%s'" % outtxt)
self.assertTrue(re.search('CFGS=', outtxt), "CFGS line message found in '%s'" % outtxt)
ecs_mods = [
("gzip-1.4-GCC-4.6.3.eb", "gzip/1.4-GCC-4.6.3", ' '),
("GCC-4.6.3.eb", "GCC/4.6.3", 'x'),
]
for ec, mod, mark in ecs_mods:
regex = re.compile(r" \* \[%s\] \$CFGS\S+%s \(module: %s\)" % (mark, ec, mod), re.M)
self.assertTrue(regex.search(outtxt), "Found match for pattern %s in '%s'" % (regex.pattern, outtxt))
if os.path.exists(dummylogfn):
os.remove(dummylogfn)
# cleanup
shutil.rmtree(tmpdir)
sys.path[:] = orig_sys_path
def test_try_robot_force(self):
"""
Test correct behavior for combination of --try-toolchain --robot --force.
Only the listed easyconfigs should be forced, resolved dependencies should not (even if tweaked).
"""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
# use toy-0.0.eb easyconfig file that comes with the tests
test_ecs = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'test_ecs')
eb1 = os.path.join(test_ecs, 'f', 'FFTW', 'FFTW-3.3.7-gompi-2018a.eb')
eb2 = os.path.join(test_ecs, 's', 'ScaLAPACK', 'ScaLAPACK-2.0.2-gompi-2018a-OpenBLAS-0.2.20.eb')
# check log message with --skip for existing module
args = [
eb1,
eb2,
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--debug',
'--force',
'--robot=%s' % test_ecs,
'--try-toolchain=gompi,2018b',
'--dry-run',
'--unittest-file=%s' % self.logfile,
]
outtxt = self.eb_main(args, logfile=dummylogfn)
scalapack_ver = '2.0.2-gompi-2018b-OpenBLAS-0.2.20'
ecs_mods = [
# GCC/OpenMPI dependencies are there, but part of toolchain => 'x'
("GCC-7.3.0-2.30.eb", "GCC/7.3.0-2.30", 'x'),
("OpenMPI-3.1.1-GCC-7.3.0-2.30.eb", "OpenMPI/3.1.1-GCC-7.3.0-2.30", 'x'),
# toolchain used for OpenBLAS is mapped to GCC/7.3.0-2.30 subtoolchain in gompi/2018b
# (rather than the original GCC/6.4.0-2.28 as subtoolchain of gompi/2018a)
("OpenBLAS-0.2.20-GCC-7.3.0-2.30.eb", "OpenBLAS/0.2.20-GCC-7.3.0-2.30", 'x'),
# both FFTW and ScaLAPACK are listed => 'F'
("ScaLAPACK-%s.eb" % scalapack_ver, "ScaLAPACK/%s" % scalapack_ver, 'F'),
("FFTW-3.3.7-gompi-2018b.eb", "FFTW/3.3.7-gompi-2018b", 'F'),
]
for ec, mod, mark in ecs_mods:
regex = re.compile(r"^ \* \[%s\] \S+%s \(module: %s\)$" % (mark, ec, mod), re.M)
self.assertTrue(regex.search(outtxt), "Found match for pattern %s in '%s'" % (regex.pattern, outtxt))
def test_try_toolchain_mapping(self):
"""Test mapping of subtoolchains with --try-toolchain."""
test_ecs = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'test_ecs')
gzip_ec = os.path.join(test_ecs, 'g', 'gzip', 'gzip-1.5-foss-2018a.eb')
args = [
gzip_ec,
'--try-toolchain=iccifort,2016.1.150-GCC-4.9.3-2.25',
'--dry-run',
]
# by default, toolchain mapping is enabled
# if it fails, an error is printed
error_pattern = "Toolchain iccifort is not equivalent to toolchain foss in terms of capabilities."
self.assertErrorRegex(EasyBuildError, error_pattern, self.eb_main, args, raise_error=True, do_build=True)
# can continue anyway using --disable-map-toolchains
args.append('--disable-map-toolchains')
outtxt = self.eb_main(args, raise_error=True, do_build=True)
patterns = [
r"^ \* \[ \] .*/iccifort-2016.1.150-GCC-4.9.3-2.25.eb \(module: iccifort/.*\)$",
r"^ \* \[ \] .*/gzip-1.5-iccifort-2016.1.150-GCC-4.9.3-2.25.eb \(module: gzip/1.5-iccifort.*\)$",
]
for pattern in patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(outtxt), "Pattern '%s' found in: %s" % (regex.pattern, outtxt))
anti_patterns = [
r"^ \* \[.\] .*-foss-2018a",
r"^ \* \[.\] .*-gompi-2018a",
r"^ \* \[.\] .*-GCC.*6\.4\.0",
]
for anti_pattern in anti_patterns:
regex = re.compile(anti_pattern, re.M)
self.assertFalse(regex.search(outtxt), "Pattern '%s' NOT found in: %s" % (regex.pattern, outtxt))
def test_try_update_deps(self):
"""Test for --try-update-deps."""
# first, construct a toy easyconfig that is well suited for testing (multiple deps)
test_ectxt = '\n'.join([
"easyblock = 'ConfigureMake'",
'',
"name = 'test'",
"version = '1.2.3'",
''
"homepage = 'https://test.org'",
"description = 'this is just a test'",
'',
"toolchain = {'name': 'GCC', 'version': '4.9.3-2.26'}",
'',
"builddependencies = [('gzip', '1.4')]",
"dependencies = [('hwloc', '1.6.2')]",
])
test_ec = os.path.join(self.test_prefix, 'test.eb')
write_file(test_ec, test_ectxt)
args = [
test_ec,
'--try-toolchain-version=6.4.0-2.28',
'--try-update-deps',
'-D',
]
self.assertErrorRegex(EasyBuildError, "Experimental functionality", self.eb_main, args, raise_error=True)
args.append('--experimental')
outtxt = self.eb_main(args, raise_error=True, do_build=True)
patterns = [
# toolchain got updated
r"^ \* \[x\] .*/test_ecs/g/GCC/GCC-6.4.0-2.28.eb \(module: GCC/6.4.0-2.28\)$",
# no version update for gzip (because there's no gzip easyconfig using GCC/6.4.0-2.28 (sub)toolchain)
r"^ \* \[ \] .*/tweaked_dep_easyconfigs/gzip-1.4-GCC-6.4.0-2.28.eb \(module: gzip/1.4-GCC-6.4.0-2.28\)$",
# hwloc was updated to 1.11.8, thanks to available easyconfig
r"^ \* \[x\] .*/test_ecs/h/hwloc/hwloc-1.11.8-GCC-6.4.0-2.28.eb \(module: hwloc/1.11.8-GCC-6.4.0-2.28\)$",
# also generated easyconfig for test/1.2.3 with expected toolchain
r"^ \* \[ \] .*/tweaked_easyconfigs/test-1.2.3-GCC-6.4.0-2.28.eb \(module: test/1.2.3-GCC-6.4.0-2.28\)$",
]
for pattern in patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(outtxt), "Pattern '%s' should be found in: %s" % (regex.pattern, outtxt))
# construct another toy easyconfig that is well suited for testing ignoring versionsuffix
test_ectxt = '\n'.join([
"easyblock = 'ConfigureMake'",
'',
"name = 'test'",
"version = '1.2.3'",
''
"homepage = 'https://test.org'",
"description = 'this is just a test'",
'',
"toolchain = {'name': 'GCC', 'version': '4.8.2'}",
'',
"dependencies = [('OpenBLAS', '0.2.8', '-LAPACK-3.4.2')]",
])
write_file(test_ec, test_ectxt)
self.mock_stderr(True)
outtxt = self.eb_main(args, raise_error=True, do_build=True)
errtxt = self.get_stderr()
warning_stub = "\nWARNING: There may be newer version(s) of dep 'OpenBLAS' available with a different " \
"versionsuffix to '-LAPACK-3.4.2'"
self.mock_stderr(False)
self.assertTrue(warning_stub in errtxt)
patterns = [
# toolchain got updated
r"^ \* \[x\] .*/test_ecs/g/GCC/GCC-6.4.0-2.28.eb \(module: GCC/6.4.0-2.28\)$",
# no version update for OpenBLAS (because there's no corresponding ec using GCC/6.4.0-2.28 (sub)toolchain)
r"^ \* \[ \] .*/tweaked_dep_easyconfigs/OpenBLAS-0.2.8-GCC-6.4.0-2.28-LAPACK-3.4.2.eb "
r"\(module: OpenBLAS/0.2.8-GCC-6.4.0-2.28-LAPACK-3.4.2\)$",
# also generated easyconfig for test/1.2.3 with expected toolchain
r"^ \* \[ \] .*/tweaked_easyconfigs/test-1.2.3-GCC-6.4.0-2.28.eb \(module: test/1.2.3-GCC-6.4.0-2.28\)$",
]
for pattern in patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(outtxt), "Pattern '%s' should be found in: %s" % (regex.pattern, outtxt))
# Now verify that we can ignore versionsuffixes
args.append('--try-ignore-versionsuffixes')
outtxt = self.eb_main(args, raise_error=True, do_build=True)
patterns = [
# toolchain got updated
r"^ \* \[x\] .*/test_ecs/g/GCC/GCC-6.4.0-2.28.eb \(module: GCC/6.4.0-2.28\)$",
# no version update for OpenBLAS (because there's no corresponding ec using GCC/6.4.0-2.28 (sub)toolchain)
r"^ \* \[x\] .*/test_ecs/o/OpenBLAS/OpenBLAS-0.2.20-GCC-6.4.0-2.28.eb "
r"\(module: OpenBLAS/0.2.20-GCC-6.4.0-2.28\)$",
# also generated easyconfig for test/1.2.3 with expected toolchain
r"^ \* \[ \] .*/tweaked_easyconfigs/test-1.2.3-GCC-6.4.0-2.28.eb \(module: test/1.2.3-GCC-6.4.0-2.28\)$",
]
for pattern in patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(outtxt), "Pattern '%s' should be found in: %s" % (regex.pattern, outtxt))
def test_dry_run_hierarchical(self):
"""Test dry run using a hierarchical module naming scheme."""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
args = [
'gzip-1.5-foss-2018a.eb',
'OpenMPI-2.1.2-GCC-6.4.0-2.28.eb',
'--dry-run',
'--unittest-file=%s' % self.logfile,
'--module-naming-scheme=HierarchicalMNS',
'--ignore-osdeps',
'--force',
'--debug',
]
outtxt = self.eb_main(args, logfile=dummylogfn, verbose=True, raise_error=True)
ecs_mods = [
# easyconfig, module subdir, (short) module name
("GCC-6.4.0-2.28.eb", "Core", "GCC/6.4.0-2.28", 'x'), # already present but not listed, so 'x'
("hwloc-1.11.8-GCC-6.4.0-2.28.eb", "Compiler/GCC/6.4.0-2.28", "hwloc/1.11.8", 'x'),
# already present and listed, so 'F'
("OpenMPI-2.1.2-GCC-6.4.0-2.28.eb", "Compiler/GCC/6.4.0-2.28", "OpenMPI/2.1.2", 'F'),
("gompi-2018a.eb", "Core", "gompi/2018a", 'x'),
("OpenBLAS-0.2.20-GCC-6.4.0-2.28.eb", "Compiler/GCC/6.4.0-2.28", "OpenBLAS/0.2.20", ' '),
("FFTW-3.3.7-gompi-2018a.eb", "MPI/GCC/6.4.0-2.28/OpenMPI/2.1.2", "FFTW/3.3.7", 'x'),
("ScaLAPACK-2.0.2-gompi-2018a-OpenBLAS-0.2.20.eb", "MPI/GCC/6.4.0-2.28/OpenMPI/2.1.2",
"ScaLAPACK/2.0.2-OpenBLAS-0.2.20", 'x'),
("foss-2018a.eb", "Core", "foss/2018a", 'x'),
# listed but not there: ' '
("gzip-1.5-foss-2018a.eb", "MPI/GCC/6.4.0-2.28/OpenMPI/2.1.2", "gzip/1.5", ' '),
]
for ec, mod_subdir, mod_name, mark in ecs_mods:
regex = re.compile(r"^ \* \[%s\] \S+%s \(module: %s \| %s\)$" % (mark, ec, mod_subdir, mod_name), re.M)
self.assertTrue(regex.search(outtxt), "Found match for pattern %s in '%s'" % (regex.pattern, outtxt))
if os.path.exists(dummylogfn):
os.remove(dummylogfn)
def test_dry_run_categorized(self):
"""Test dry run using a categorized hierarchical module naming scheme."""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
self.setup_categorized_hmns_modules()
args = [
'gzip-1.5-foss-2018a.eb',
'OpenMPI-2.1.2-GCC-6.4.0-2.28.eb',
'--dry-run',
'--unittest-file=%s' % self.logfile,
'--module-naming-scheme=CategorizedHMNS',
'--ignore-osdeps',
'--force',
'--debug',
]
outtxt = self.eb_main(args, logfile=dummylogfn, verbose=True, raise_error=True)
ecs_mods = [
# easyconfig, module subdir, (short) module name, mark
("GCC-6.4.0-2.28.eb", "Core/compiler", "GCC/6.4.0-2.28", 'x'), # already present but not listed, so 'x'
("hwloc-1.11.8-GCC-6.4.0-2.28.eb", "Compiler/GCC/6.4.0-2.28/system", "hwloc/1.11.8", 'x'),
# already present and listed, so 'F'
("OpenMPI-2.1.2-GCC-6.4.0-2.28.eb", "Compiler/GCC/6.4.0-2.28/mpi", "OpenMPI/2.1.2", 'F'),
("gompi-2018a.eb", "Core/toolchain", "gompi/2018a", 'x'),
("OpenBLAS-0.2.20-GCC-6.4.0-2.28.eb", "Compiler/GCC/6.4.0-2.28/numlib",
"OpenBLAS/0.2.20", 'x'),
("FFTW-3.3.7-gompi-2018a.eb", "MPI/GCC/6.4.0-2.28/OpenMPI/2.1.2/numlib", "FFTW/3.3.7", 'x'),
("ScaLAPACK-2.0.2-gompi-2018a-OpenBLAS-0.2.20.eb", "MPI/GCC/6.4.0-2.28/OpenMPI/2.1.2/numlib",
"ScaLAPACK/2.0.2-OpenBLAS-0.2.20", 'x'),
("foss-2018a.eb", "Core/toolchain", "foss/2018a", 'x'),
# listed but not there: ' '
("gzip-1.5-foss-2018a.eb", "MPI/GCC/6.4.0-2.28/OpenMPI/2.1.2/tools", "gzip/1.5", ' '),
]
for ec, mod_subdir, mod_name, mark in ecs_mods:
regex = re.compile(r"^ \* \[%s\] \S+%s \(module: %s \| %s\)$" % (mark, ec, mod_subdir, mod_name), re.M)
self.assertTrue(regex.search(outtxt), "Found match for pattern %s in '%s'" % (regex.pattern, outtxt))
if os.path.exists(dummylogfn):
os.remove(dummylogfn)
def test_github_from_pr(self):
"""Test fetching easyconfigs from a PR."""
if self.github_token is None:
print("Skipping test_from_pr, no GitHub token available?")
return
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
tmpdir = tempfile.mkdtemp()
args = [
# PR for foss/2018b, see https://github.com/easybuilders/easybuild-easyconfigs/pull/6424/files
'--from-pr=6424',
'--dry-run',
# an argument must be specified to --robot, since easybuild-easyconfigs may not be installed
'--robot=%s' % os.path.join(os.path.dirname(__file__), 'easyconfigs'),
'--unittest-file=%s' % self.logfile,
'--github-user=%s' % GITHUB_TEST_ACCOUNT, # a GitHub token should be available for this user
'--tmpdir=%s' % tmpdir,
]
try:
outtxt = self.eb_main(args, logfile=dummylogfn, raise_error=True)
modules = [
(tmpdir, 'FFTW/3.3.8-gompi-2018b'),
(tmpdir, 'foss/2018b'),
('.*', 'GCC/7.3.0-2.30'), # not included in PR
(tmpdir, 'gompi/2018b'),
(tmpdir, 'HPL/2.2-foss-2018b'),
('.*', 'hwloc/1.11.8-GCC-7.3.0-2.30'),
('.*', 'OpenBLAS/0.3.1-GCC-7.3.0-2.30'),
('.*', 'OpenMPI/3.1.1-GCC-7.3.0-2.30'),
(tmpdir, 'ScaLAPACK/2.0.2-gompi-2018b-OpenBLAS-0.3.1'),
]
for path_prefix, module in modules:
ec_fn = "%s.eb" % '-'.join(module.split('/'))
path = '.*%s' % os.path.dirname(path_prefix)
regex = re.compile(r"^ \* \[.\] %s.*%s \(module: %s\)$" % (path, ec_fn, module), re.M)
self.assertTrue(regex.search(outtxt), "Found pattern %s in %s" % (regex.pattern, outtxt))
# make sure that *only* these modules are listed, no others
regex = re.compile(r"^ \* \[.\] .*/(?P<filepath>.*) \(module: (?P<module>.*)\)$", re.M)
self.assertTrue(sorted(regex.findall(outtxt)), sorted(modules))
pr_tmpdir = os.path.join(tmpdir, r'eb-\S{6,8}', 'files_pr6424')
regex = re.compile(r"Extended list of robot search paths with \['%s'\]:" % pr_tmpdir, re.M)
self.assertTrue(regex.search(outtxt), "Found pattern %s in %s" % (regex.pattern, outtxt))
except URLError as err:
print("Ignoring URLError '%s' in test_from_pr" % err)
shutil.rmtree(tmpdir)
# test with multiple prs
tmpdir = tempfile.mkdtemp()
args = [
# PRs for ReFrame 3.4.1 and 3.5.0
'--from-pr=12150,12366',
'--dry-run',
# an argument must be specified to --robot, since easybuild-easyconfigs may not be installed
'--robot=%s' % os.path.join(os.path.dirname(__file__), 'easyconfigs'),
'--unittest-file=%s' % self.logfile,
'--github-user=%s' % GITHUB_TEST_ACCOUNT, # a GitHub token should be available for this user
'--tmpdir=%s' % tmpdir,
]
try:
outtxt = self.eb_main(args, logfile=dummylogfn, raise_error=True)
modules = [
(tmpdir, 'ReFrame/3.4.1'),
(tmpdir, 'ReFrame/3.5.0'),
]
for path_prefix, module in modules:
ec_fn = "%s.eb" % '-'.join(module.split('/'))
path = '.*%s' % os.path.dirname(path_prefix)
regex = re.compile(r"^ \* \[.\] %s.*%s \(module: %s\)$" % (path, ec_fn, module), re.M)
self.assertTrue(regex.search(outtxt), "Found pattern %s in %s" % (regex.pattern, outtxt))
# make sure that *only* these modules are listed, no others
regex = re.compile(r"^ \* \[.\] .*/(?P<filepath>.*) \(module: (?P<module>.*)\)$", re.M)
self.assertTrue(sorted(regex.findall(outtxt)), sorted(modules))
for pr in ('12150', '12366'):
pr_tmpdir = os.path.join(tmpdir, r'eb-\S{6,8}', 'files_pr%s' % pr)
regex = re.compile(r"Extended list of robot search paths with .*%s.*:" % pr_tmpdir, re.M)
self.assertTrue(regex.search(outtxt), "Found pattern %s in %s" % (regex.pattern, outtxt))
except URLError as err:
print("Ignoring URLError '%s' in test_from_pr" % err)
shutil.rmtree(tmpdir)
def test_github_from_pr_token_log(self):
"""Check that --from-pr doesn't leak GitHub token in log."""
if self.github_token is None:
print("Skipping test_from_pr_token_log, no GitHub token available?")
return
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
args = [
# PR for foss/2018b, see https://github.com/easybuilders/easybuild-easyconfigs/pull/6424/files
'--from-pr=6424',
'--dry-run',
'--debug',
# an argument must be specified to --robot, since easybuild-easyconfigs may not be installed
'--robot=%s' % os.path.join(os.path.dirname(__file__), 'easyconfigs'),
'--github-user=%s' % GITHUB_TEST_ACCOUNT, # a GitHub token should be available for this user
]
try:
self.mock_stdout(True)
self.mock_stderr(True)
outtxt = self.eb_main(args, logfile=dummylogfn, raise_error=True)
stdout = self.get_stdout()
stderr = self.get_stderr()
self.mock_stdout(False)
self.mock_stderr(False)
self.assertFalse(self.github_token in outtxt)
self.assertFalse(self.github_token in stdout)
self.assertFalse(self.github_token in stderr)
except URLError as err:
print("Ignoring URLError '%s' in test_from_pr" % err)
def test_github_from_pr_listed_ecs(self):
"""Test --from-pr in combination with specifying easyconfigs on the command line."""
if self.github_token is None:
print("Skipping test_from_pr, no GitHub token available?")
return
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
# copy test easyconfigs to easybuild/easyconfigs subdirectory of temp directory
test_ecs_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'test_ecs')
ecstmpdir = tempfile.mkdtemp(prefix='easybuild-easyconfigs-pkg-install-path')
mkdir(os.path.join(ecstmpdir, 'easybuild'), parents=True)
copy_dir(test_ecs_path, os.path.join(ecstmpdir, 'easybuild', 'easyconfigs'))
# inject path to test easyconfigs into head of Python search path
sys.path.insert(0, ecstmpdir)
tmpdir = tempfile.mkdtemp()
args = [
'toy-0.0.eb',
'gompi-2018b.eb', # also pulls in GCC, OpenMPI (which pulls in hwloc)
'GCC-4.6.3.eb',
# PR for foss/2018b, see https://github.com/easybuilders/easybuild-easyconfigs/pull/6424/files
'--from-pr=6424',
'--dry-run',
# an argument must be specified to --robot, since easybuild-easyconfigs may not be installed
'--robot=%s' % test_ecs_path,
'--unittest-file=%s' % self.logfile,
'--github-user=%s' % GITHUB_TEST_ACCOUNT, # a GitHub token should be available for this user
'--tmpdir=%s' % tmpdir,
]
try:
outtxt = self.eb_main(args, logfile=dummylogfn, raise_error=True)
modules = [
(test_ecs_path, 'toy/0.0'), # not included in PR
(test_ecs_path, 'GCC/7.3.0-2.30'), # not included in PR, available locally
(test_ecs_path, 'hwloc/1.11.8-GCC-7.3.0-2.30'),
(test_ecs_path, 'OpenMPI/3.1.1-GCC-7.3.0-2.30'),
('.*%s' % os.path.dirname(tmpdir), 'gompi/2018b'),
(test_ecs_path, 'GCC/4.6.3'), # not included in PR, available locally
]
for path_prefix, module in modules:
ec_fn = "%s.eb" % '-'.join(module.split('/'))
regex = re.compile(r"^ \* \[.\] %s.*%s \(module: %s\)$" % (path_prefix, ec_fn, module), re.M)
self.assertTrue(regex.search(outtxt), "Found pattern %s in %s" % (regex.pattern, outtxt))
# make sure that *only* these modules are listed, no others
regex = re.compile(r"^ \* \[.\] .*/(?P<filepath>.*) \(module: (?P<module>.*)\)$", re.M)
self.assertTrue(sorted(regex.findall(outtxt)), sorted(modules))
except URLError as err:
print("Ignoring URLError '%s' in test_from_pr" % err)
shutil.rmtree(tmpdir)
def test_github_from_pr_x(self):
"""Test combination of --from-pr with --extended-dry-run."""
if self.github_token is None:
print("Skipping test_from_pr_x, no GitHub token available?")
return
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
args = [
# PR for foss/2018b, see https://github.com/easybuilders/easybuild-easyconfigs/pull/6424/files
'--from-pr=6424',
'FFTW-3.3.8-gompi-2018b.eb',
# an argument must be specified to --robot, since easybuild-easyconfigs may not be installed
'--github-user=%s' % GITHUB_TEST_ACCOUNT, # a GitHub token should be available for this user
'--tmpdir=%s' % self.test_prefix,
'--extended-dry-run',
]
try:
# PR #6424 includes easyconfigs that use 'dummy' toolchain,
# so we need to allow triggering deprecated behaviour
self.allow_deprecated_behaviour()
self.mock_stderr(True) # just to capture deprecation warning
self.mock_stdout(True)
self.mock_stderr(True)
self.eb_main(args, do_build=True, raise_error=True, testing=False)
stdout = self.get_stdout()
self.mock_stdout(False)
self.mock_stderr(False)
msg_regexs = [
re.compile(r"^== Build succeeded for 1 out of 1", re.M),
re.compile(r"^\*\*\* DRY RUN using 'EB_FFTW' easyblock", re.M),
re.compile(r"^== building and installing FFTW/3.3.8-gompi-2018b\.\.\.", re.M),
re.compile(r"^building... \[DRY RUN\]", re.M),
re.compile(r"^== COMPLETED: Installation ended successfully \(took .* secs?\)", re.M),
]
for msg_regex in msg_regexs:
self.assertTrue(msg_regex.search(stdout), "Pattern '%s' found in: %s" % (msg_regex.pattern, stdout))
except URLError as err:
print("Ignoring URLError '%s' in test_from_pr_x" % err)
def test_no_such_software(self):
"""Test using no arguments."""
args = [
'--software-name=nosuchsoftware',
'--robot=.',
'--debug',
]
outtxt = self.eb_main(args)
# error message when template is not found
error_msg1 = "ERROR.* No easyconfig files found for software nosuchsoftware, and no templates available. "
error_msg1 += "I'm all out of ideas."
# error message when template is found
error_msg2 = "ERROR Unable to find an easyconfig for the given specifications"
regex = re.compile("(%s|%s)" % (error_msg1, error_msg2))
self.assertTrue(regex.search(outtxt), "Pattern '%s' found in: %s" % (regex.pattern, outtxt))
def test_header_footer(self):
"""Test specifying a module header/footer."""
# create file containing modules footer
if get_module_syntax() == 'Tcl':
modules_header_txt = '\n'.join([
"# test header",
"setenv SITE_SPECIFIC_HEADER_ENV_VAR foo",
])
modules_footer_txt = '\n'.join([
"# test footer",
"setenv SITE_SPECIFIC_FOOTER_ENV_VAR bar",
])
elif get_module_syntax() == 'Lua':
modules_header_txt = '\n'.join([
"-- test header",
'setenv("SITE_SPECIFIC_HEADER_ENV_VAR", "foo")',
])
modules_footer_txt = '\n'.join([
"-- test footer",
'setenv("SITE_SPECIFIC_FOOTER_ENV_VAR", "bar")',
])
else:
self.assertTrue(False, "Unknown module syntax: %s" % get_module_syntax())
# dump header/footer text to file
handle, modules_footer = tempfile.mkstemp(prefix='modules-footer-')
os.close(handle)
write_file(modules_footer, modules_footer_txt)
handle, modules_header = tempfile.mkstemp(prefix='modules-header-')
os.close(handle)
write_file(modules_header, modules_header_txt)
# use toy-0.0.eb easyconfig file that comes with the tests
eb_file = os.path.join(os.path.dirname(__file__), 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb')
# check log message with --skip for existing module
args = [
eb_file,
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--debug',
'--force',
'--modules-header=%s' % modules_header,
'--modules-footer=%s' % modules_footer,
]
self.eb_main(args, do_build=True, raise_error=True)
toy_module = os.path.join(self.test_installpath, 'modules', 'all', 'toy', '0.0')
if get_module_syntax() == 'Lua':
toy_module += '.lua'
toy_module_txt = read_file(toy_module)
regex = re.compile(r'%s$' % modules_header_txt.replace('(', '\\(').replace(')', '\\)'), re.M)
msg = "modules header '%s' is present in '%s'" % (modules_header_txt, toy_module_txt)
self.assertTrue(regex.search(toy_module_txt), msg)
regex = re.compile(r'%s$' % modules_footer_txt.replace('(', '\\(').replace(')', '\\)'), re.M)
msg = "modules footer '%s' is present in '%s'" % (modules_footer_txt, toy_module_txt)
self.assertTrue(regex.search(toy_module_txt), msg)
# cleanup
os.remove(modules_footer)
os.remove(modules_header)
def test_recursive_module_unload(self):
"""Test generating recursively unloading modules."""
# use toy-0.0.eb easyconfig file that comes with the tests
eb_file = os.path.join(os.path.dirname(__file__), 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0-deps.eb')
# check log message with --skip for existing module
lastargs = ['--recursive-module-unload']
if self.modtool.supports_depends_on:
lastargs.append('--module-depends-on')
for lastarg in lastargs:
args = [
eb_file,
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--debug',
'--force',
lastarg,
]
self.eb_main(args, do_build=True, verbose=True)
toy_module = os.path.join(self.test_installpath, 'modules', 'all', 'toy', '0.0-deps')
if get_module_syntax() == 'Lua':
toy_module += '.lua'
is_loaded_regex = re.compile(r'if not \( isloaded\("gompi/2018a"\) \)', re.M)
else:
# Tcl syntax
is_loaded_regex = re.compile(r"if { !\[is-loaded gompi/2018a\] }", re.M)
toy_module_txt = read_file(toy_module)
self.assertFalse(is_loaded_regex.search(toy_module_txt), "Recursive unloading is used: %s" % toy_module_txt)
def test_tmpdir(self):
"""Test setting temporary directory to use by EasyBuild."""
# use temporary paths for build/install paths, make sure sources can be found
tmpdir = tempfile.mkdtemp()
# use toy-0.0.eb easyconfig file that comes with the tests
eb_file = os.path.join(os.path.dirname(__file__), 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb')
# check log message with --skip for existing module
args = [
eb_file,
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--debug',
'--tmpdir=%s' % tmpdir,
]
outtxt = self.eb_main(args, do_build=True, reset_env=False)
tmpdir_msg = r"Using %s\S+ as temporary directory" % os.path.join(tmpdir, 'eb-')
found = re.search(tmpdir_msg, outtxt, re.M)
self.assertTrue(found, "Log message for tmpdir found in outtxt: %s" % outtxt)
for var in ['TMPDIR', 'TEMP', 'TMP']:
self.assertTrue(os.environ[var].startswith(os.path.join(tmpdir, 'eb-')))
self.assertTrue(tempfile.gettempdir().startswith(os.path.join(tmpdir, 'eb-')))
tempfile_tmpdir = tempfile.mkdtemp()
self.assertTrue(tempfile_tmpdir.startswith(os.path.join(tmpdir, 'eb-')))
fd, tempfile_tmpfile = tempfile.mkstemp()
self.assertTrue(tempfile_tmpfile.startswith(os.path.join(tmpdir, 'eb-')))
# cleanup
os.close(fd)
shutil.rmtree(tmpdir)
def test_ignore_osdeps(self):
"""Test ignoring of listed OS dependencies."""
txt = '\n'.join([
'easyblock = "ConfigureMake"',
'name = "pi"',
'version = "3.14"',
'homepage = "http://example.com"',
'description = "test easyconfig"',
'toolchain = SYSTEM',
'osdependencies = ["nosuchosdependency", ("nosuchdep_option1", "nosuchdep_option2")]',
])
fd, eb_file = tempfile.mkstemp(prefix='easyconfig_test_file_', suffix='.eb')
os.close(fd)
write_file(eb_file, txt)
# check whether non-existing OS dependencies result in failure, by default
args = [
eb_file,
]
outtxt = self.eb_main(args, do_build=True)
regex = re.compile("Checking OS dependencies")
self.assertTrue(regex.search(outtxt), "OS dependencies are checked, outtxt: %s" % outtxt)
msg = "One or more OS dependencies were not found: "
msg += r"\[\('nosuchosdependency',\), \('nosuchdep_option1', 'nosuchdep_option2'\)\]"
regex = re.compile(r'%s' % msg, re.M)
self.assertTrue(regex.search(outtxt), "OS dependencies are honored, outtxt: %s" % outtxt)
# check whether OS dependencies are effectively ignored
args = [
eb_file,
'--ignore-osdeps',
'--dry-run',
]
outtxt = self.eb_main(args, do_build=True)
regex = re.compile("Not checking OS dependencies", re.M)
self.assertTrue(regex.search(outtxt), "OS dependencies are ignored with --ignore-osdeps, outtxt: %s" % outtxt)
txt += "\nstop = 'notavalidstop'"
write_file(eb_file, txt)
args = [
eb_file,
'--dry-run', # no explicit --ignore-osdeps, but implied by --dry-run
]
outtxt = self.eb_main(args, do_build=True)
regex = re.compile("stop provided 'notavalidstop' is not valid", re.M)
self.assertTrue(regex.search(outtxt), "Validations are performed with --ignore-osdeps, outtxt: %s" % outtxt)
def test_experimental(self):
"""Test the experimental option"""
orig_value = easybuild.tools.build_log.EXPERIMENTAL
# make sure it's off by default
self.assertFalse(orig_value)
log = fancylogger.getLogger()
# force it to False
EasyBuildOptions(
go_args=['--disable-experimental'],
)
try:
log.experimental('x')
# sanity check, should never be reached if it works.
self.assertTrue(False, "Experimental logging should be disabled by setting --disable-experimental option")
except easybuild.tools.build_log.EasyBuildError as err:
# check error message
self.assertTrue('Experimental functionality.' in str(err))
# toggle experimental
EasyBuildOptions(
go_args=['--experimental'],
)
try:
log.experimental('x')
except easybuild.tools.build_log.EasyBuildError as err:
self.assertTrue(False, "Experimental logging should be allowed by the --experimental option: %s" % err)
# set it back
easybuild.tools.build_log.EXPERIMENTAL = orig_value
def test_deprecated(self):
"""Test the deprecated option"""
if 'EASYBUILD_DEPRECATED' in os.environ:
os.environ['EASYBUILD_DEPRECATED'] = str(VERSION)
init_config()
orig_value = easybuild.tools.build_log.CURRENT_VERSION
# make sure it's off by default
self.assertEqual(orig_value, VERSION)
log = fancylogger.getLogger()
# force it to lower version using 0.x, which should no result in any raised error (only deprecation logging)
EasyBuildOptions(
go_args=['--deprecated=0.%s' % orig_value],
)
stderr = None
try:
self.mock_stderr(True)
log.deprecated('x', str(orig_value))
stderr = self.get_stderr()
self.mock_stderr(False)
except easybuild.tools.build_log.EasyBuildError as err:
self.assertTrue(False, "Deprecated logging should work: %s" % err)
stderr_regex = re.compile("^\nWARNING: Deprecated functionality, will no longer work in")
self.assertTrue(stderr_regex.search(stderr), "Pattern '%s' found in: %s" % (stderr_regex.pattern, stderr))
# force it to current version, which should result in deprecation
EasyBuildOptions(
go_args=['--deprecated=%s' % orig_value],
)
try:
log.deprecated('x', str(orig_value))
# not supposed to get here
self.assertTrue(False, 'Deprecated logging should throw EasyBuildError')
except easybuild.tools.build_log.EasyBuildError as err2:
self.assertTrue('DEPRECATED' in str(err2))
# force higher version by prefixing it with 1, which should result in deprecation errors
EasyBuildOptions(
go_args=['--deprecated=1%s' % orig_value],
)
try:
log.deprecated('x', str(orig_value))
# not supposed to get here
self.assertTrue(False, 'Deprecated logging should throw EasyBuildError')
except easybuild.tools.build_log.EasyBuildError as err3:
self.assertTrue('DEPRECATED' in str(err3))
# set it back
easybuild.tools.build_log.CURRENT_VERSION = orig_value
def test_allow_modules_tool_mismatch(self):
"""Test allowing mismatch of modules tool with 'module' function."""
# make sure MockModulesTool is available
from test.framework.modulestool import MockModulesTool # noqa
# trigger that main() creates new instance of ModulesTool
self.modtool = None
topdir = os.path.abspath(os.path.dirname(__file__))
ec_file = os.path.join(topdir, 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb')
# keep track of original module definition so we can restore it
orig_module = os.environ.get('module', None)
# check whether mismatch between 'module' function and selected modules tool is detected
os.environ['module'] = "() { eval `/Users/kehoste/Modules/$MODULE_VERSION/bin/modulecmd bash $*`\n}"
args = [
ec_file,
'--modules-tool=MockModulesTool',
'--module-syntax=Tcl', # Lua would require Lmod
]
self.eb_main(args, do_build=True)
outtxt = read_file(self.logfile)
error_regex = re.compile("ERROR .*pattern .* not found in defined 'module' function")
self.assertTrue(error_regex.search(outtxt), "Found error w.r.t. module function mismatch: %s" % outtxt[-600:])
# check that --allow-modules-tool-mispatch transforms this error into a warning
os.environ['module'] = "() { eval `/Users/kehoste/Modules/$MODULE_VERSION/bin/modulecmd bash $*`\n}"
args = [
ec_file,
'--modules-tool=MockModulesTool',
'--module-syntax=Tcl', # Lua would require Lmod
'--allow-modules-tool-mismatch',
]
self.eb_main(args, do_build=True)
outtxt = read_file(self.logfile)
warn_regex = re.compile("WARNING .*pattern .* not found in defined 'module' function")
self.assertTrue(warn_regex.search(outtxt), "Found warning w.r.t. module function mismatch: %s" % outtxt[-600:])
# check whether match between 'module' function and selected modules tool is detected
os.environ['module'] = "() { eval ` /bin/echo $*`\n}"
args = [
ec_file,
'--modules-tool=MockModulesTool',
'--module-syntax=Tcl', # Lua would require Lmod
'--debug',
]
self.eb_main(args, do_build=True)
outtxt = read_file(self.logfile)
found_regex = re.compile("DEBUG Found pattern .* in defined 'module' function")
self.assertTrue(found_regex.search(outtxt), "Found debug message w.r.t. module function: %s" % outtxt[-600:])
# restore 'module' function
if orig_module is not None:
os.environ['module'] = orig_module
else:
del os.environ['module']
def test_try(self):
"""Test whether --try options are taken into account."""
ecs_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'test_ecs')
tweaked_toy_ec = os.path.join(self.test_buildpath, 'toy-0.0-tweaked.eb')
copy_file(os.path.join(ecs_path, 't', 'toy', 'toy-0.0.eb'), tweaked_toy_ec)
write_file(tweaked_toy_ec, "easyblock = 'ConfigureMake'", append=True)
args = [
tweaked_toy_ec,
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--dry-run',
'--robot=%s' % ecs_path,
]
test_cases = [
([], 'toy/0.0'),
# try-* only uses the subtoolchain with matching necessary features
(['--try-software=foo,1.2.3', '--try-toolchain=gompi,2018a'], 'foo/1.2.3-GCC-6.4.0-2.28'),
(['--try-toolchain-name=gompi', '--try-toolchain-version=2018a'], 'toy/0.0-GCC-6.4.0.2.28'),
# --try-toolchain is overridden by --toolchain
(['--try-toolchain=gompi,2018a', '--toolchain=system,system'], 'toy/0.0'),
# check we interpret SYSTEM correctly as a toolchain
(['--try-toolchain=SYSTEM'], 'toy/0.0'),
(['--toolchain=SYSTEM'], 'toy/0.0'),
(['--try-software-name=foo', '--try-software-version=1.2.3'], 'foo/1.2.3'),
(['--try-toolchain-name=gompi', '--try-toolchain-version=2018a'], 'toy/0.0-GCC-6.4.0.2.28'),
(['--try-software-version=1.2.3', '--try-toolchain=gompi,2018a'], 'toy/1.2.3-GCC-6.4.0.2.28'),
(['--try-amend=versionsuffix=-test'], 'toy/0.0-test'),
# --try-amend is overridden by --amend
(['--amend=versionsuffix=', '--try-amend=versionsuffix=-test'], 'toy/0.0'),
(['--try-toolchain=gompi,2018a', '--toolchain=system,system'], 'toy/0.0'),
# tweak existing list-typed value (patches)
(['--try-amend=versionsuffix=-test2', '--try-amend=patches=1.patch,2.patch'], 'toy/0.0-test2'),
# append to existing list-typed value (patches)
(['--try-amend=versionsuffix=-test3', '--try-amend=patches=,extra.patch'], 'toy/0.0-test3'),
# prepend to existing list-typed value (patches)
(['--try-amend=versionsuffix=-test4', '--try-amend=patches=extra.patch,'], 'toy/0.0-test4'),
# define extra list-typed parameter
(['--try-amend=versionsuffix=-test5', '--try-amend=exts_list=1,2,3'], 'toy/0.0-test5'),
# only --try causes other build specs to be included too
(['--try-software=foo,1.2.3', '--toolchain=gompi,2018a'], 'foo/1.2.3-GCC-6.4.0-2.28'),
(['--software=foo,1.2.3', '--try-toolchain=gompi,2018a'], 'foo/1.2.3-GCC-6.4.0-2.28'),
(['--software=foo,1.2.3', '--try-amend=versionsuffix=-test'], 'foo/1.2.3-test'),
]
for extra_args, mod in test_cases:
outtxt = self.eb_main(args + extra_args, verbose=True, raise_error=True)
mod_regex = re.compile(r"\(module: %s\)$" % mod, re.M)
self.assertTrue(mod_regex.search(outtxt), "Pattern %s found in %s" % (mod_regex.pattern, outtxt))
for extra_arg in ['--try-software=foo', '--try-toolchain=gompi', '--try-toolchain=gomp,2018a,-a-suffix']:
allargs = args + [extra_arg]
self.assertErrorRegex(EasyBuildError, "problems validating the options",
self.eb_main, allargs, raise_error=True)
# no --try used, so no tweaked easyconfig files are generated
allargs = args + ['--software-version=1.2.3', '--toolchain=gompi,2018a']
self.assertErrorRegex(EasyBuildError, "version .* not available", self.eb_main, allargs, raise_error=True)
def test_try_with_copy(self):
"""Test whether --try options are taken into account."""
ecs_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'test_ecs')
tweaked_toy_ec = os.path.join(self.test_buildpath, 'toy-0.0-tweaked.eb')
copy_file(os.path.join(ecs_path, 't', 'toy', 'toy-0.0.eb'), tweaked_toy_ec)
write_file(tweaked_toy_ec, "easyblock = 'ConfigureMake'", append=True)
args = [
tweaked_toy_ec,
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--dry-run',
'--robot=%s' % ecs_path,
'--copy-ec',
]
self.mock_stdout(True)
self.mock_stderr(True)
copied_ec = os.path.join(self.test_buildpath, 'my_eb.eb')
self.eb_main(args + [copied_ec], verbose=True, raise_error=True)
outtxt = self.get_stdout()
errtxt = self.get_stderr()
self.assertTrue(r'toy-0.0-tweaked.eb copied to ' + copied_ec in outtxt)
self.assertFalse(errtxt)
self.mock_stdout(False)
self.mock_stderr(False)
self.assertTrue(os.path.exists(copied_ec))
self.mock_stdout(True)
self.mock_stderr(True)
tweaked_ecs_dir = os.path.join(self.test_buildpath, 'my_tweaked_ecs')
self.eb_main(args + ['--try-software=foo,1.2.3', '--try-toolchain=gompi,2018a', tweaked_ecs_dir],
verbose=True, raise_error=True)
outtxt = self.get_stdout()
errtxt = self.get_stderr()
self.assertTrue(r'1 file(s) copied to ' + tweaked_ecs_dir in outtxt)
self.assertFalse(errtxt)
self.mock_stdout(False)
self.mock_stderr(False)
self.assertTrue(
os.path.exists(os.path.join(self.test_buildpath, tweaked_ecs_dir, 'foo-1.2.3-GCC-6.4.0-2.28.eb'))
)
def test_software_version_ordering(self):
"""Test whether software versions are correctly ordered when using --software."""
ecs_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'test_ecs')
gcc_ec = os.path.join(ecs_path, 'g', 'GCC', 'GCC-4.9.2.eb')
test_gcc_ec = os.path.join(self.test_prefix, 'GCC-4.10.1.eb')
test_gcc_txt = read_file(gcc_ec).replace("version = '4.9.2'", "version = '4.10.1'")
write_file(test_gcc_ec, test_gcc_txt)
args = [
'--software=GCC,4.10.1',
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--dry-run',
'--robot=%s:%s' % (ecs_path, self.test_prefix),
]
out = self.eb_main(['--software=GCC,4.10.1'] + args[1:], raise_error=True)
regex = re.compile(r"GCC-4.10.1.eb \(module: GCC/4.10.1\)$", re.M)
self.assertTrue(regex.search(out), "Pattern '%s' found in: %s" % (regex.pattern, out))
def test_recursive_try(self):
"""Test whether recursive --try-X works."""
ecs_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'test_ecs')
tweaked_toy_ec = os.path.join(self.test_buildpath, 'toy-0.0-tweaked.eb')
copy_file(os.path.join(ecs_path, 't', 'toy', 'toy-0.0.eb'), tweaked_toy_ec)
write_file(tweaked_toy_ec, "dependencies = [('gzip', '1.4')]\n", append=True) # add fictious dependency
sourcepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'sandbox', 'sources')
args = [
tweaked_toy_ec,
'--sourcepath=%s' % sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--try-toolchain=gompi,2018a',
'--robot=%s' % ecs_path,
'--ignore-osdeps',
'--dry-run',
]
for extra_args in [[], ['--module-naming-scheme=HierarchicalMNS']]:
outtxt = self.eb_main(args + extra_args, verbose=True, raise_error=True)
# toolchain GCC/4.7.2 (subtoolchain of gompi/2018a) should be listed (and present)
tc_regex = re.compile(r"^ \* \[x\] .*/GCC-6.4.0-2.28.eb \(module: .*GCC/6.4.0-2.28\)$", re.M)
self.assertTrue(tc_regex.search(outtxt), "Pattern %s found in %s" % (tc_regex.pattern, outtxt))
# both toy and gzip dependency should be listed with new toolchains
# in this case we map original toolchain `dummy` to the compiler-only GCC subtoolchain of gompi/2018a
# since this subtoolchain already has sufficient capabilities (we do not map higher than necessary)
for ec_name in ['gzip-1.4', 'toy-0.0']:
ec = '%s-GCC-6.4.0-2.28.eb' % ec_name
if extra_args:
mod = ec_name.replace('-', '/')
else:
mod = '%s-GCC-6.4.0-2.28' % ec_name.replace('-', '/')
mod_regex = re.compile(r"^ \* \[ \] \S+/eb-\S+/%s \(module: .*%s\)$" % (ec, mod), re.M)
self.assertTrue(mod_regex.search(outtxt), "Pattern %s found in %s" % (mod_regex.pattern, outtxt))
# recursive try also when --(try-)software(-X) is involved
for extra_args in [[],
['--module-naming-scheme=HierarchicalMNS']]:
outtxt = self.eb_main(args + extra_args + ['--try-software-version=1.2.3'], verbose=True, raise_error=True)
# toolchain GCC/6.4.0-2.28 (subtoolchain of gompi/2018a) should be listed (and present)
tc_regex = re.compile(r"^ \* \[x\] .*/GCC-6.4.0-2.28.eb \(module: .*GCC/6.4.0-2.28\)$", re.M)
self.assertTrue(tc_regex.search(outtxt), "Pattern %s found in %s" % (tc_regex.pattern, outtxt))
# both toy and gzip dependency should be listed with new toolchains
# in this case we map original toolchain `dummy` to the compiler-only GCC subtoolchain of gompi/2018a
# since this subtoolchain already has sufficient capabilities (we do not map higher than necessary)
for ec_name in ['gzip-1.4', 'toy-1.2.3']:
ec = '%s-GCC-6.4.0-2.28.eb' % ec_name
mod = ec_name.replace('-', '/')
if not extra_args:
mod += '-GCC-6.4.0-2.28'
mod_regex = re.compile(r"^ \* \[ \] \S+/eb-\S+/%s \(module: .*%s\)$" % (ec, mod), re.M)
self.assertTrue(mod_regex.search(outtxt), "Pattern %s found in %s" % (mod_regex.pattern, outtxt))
# clear fictitious dependency
write_file(tweaked_toy_ec, "dependencies = []\n", append=True)
# no recursive try if --disable-map-toolchains is involved
for extra_args in [['--try-software-version=1.2.3'], ['--software-version=1.2.3']]:
outtxt = self.eb_main(args + ['--disable-map-toolchains'] + extra_args, raise_error=True)
for mod in ['toy/1.2.3-gompi-2018a', 'gompi/2018a', 'GCC/6.4.0-2.28']:
mod_regex = re.compile(r"\(module: %s\)$" % mod, re.M)
self.assertTrue(mod_regex.search(outtxt), "Pattern %s found in %s" % (mod_regex.pattern, outtxt))
for mod in ['gompi/1.2.3', 'GCC/1.2.3']:
mod_regex = re.compile(r"\(module: %s\)$" % mod, re.M)
self.assertFalse(mod_regex.search(outtxt), "Pattern %s found in %s" % (mod_regex.pattern, outtxt))
def test_cleanup_builddir(self):
"""Test cleaning up of build dir and --disable-cleanup-builddir."""
toy_ec = os.path.join(os.path.dirname(__file__), 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb')
toy_buildpath = os.path.join(self.test_buildpath, 'toy', '0.0', 'system-system')
args = [
toy_ec,
'--force',
]
self.eb_main(args, do_build=True, verbose=True)
# make sure build directory is properly cleaned up after a successful build (default behavior)
self.assertFalse(os.path.exists(toy_buildpath), "Build dir %s removed after successful build" % toy_buildpath)
# make sure --disable-cleanup-builddir works
args.append('--disable-cleanup-builddir')
self.eb_main(args, do_build=True, verbose=True)
self.assertTrue(os.path.exists(toy_buildpath), "Build dir %s is retained when requested" % toy_buildpath)
shutil.rmtree(toy_buildpath)
# make sure build dir stays in case of failed build
args = [
toy_ec,
'--force',
'--try-amend=prebuildopts=nosuchcommand &&',
]
self.eb_main(args, do_build=True)
self.assertTrue(os.path.exists(toy_buildpath), "Build dir %s is retained after failed build" % toy_buildpath)
def test_filter_deps(self):
"""Test use of --filter-deps."""
test_dir = os.path.dirname(os.path.abspath(__file__))
ec_file = os.path.join(test_dir, 'easyconfigs', 'test_ecs', 'f', 'foss', 'foss-2018a.eb')
os.environ['MODULEPATH'] = os.path.join(test_dir, 'modules')
args = [
ec_file,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--robot=%s' % os.path.join(test_dir, 'easyconfigs'),
'--dry-run',
]
outtxt = self.eb_main(args, do_build=True, verbose=True, raise_error=True)
# note: using loose regex pattern when we expect no match, strict pattern when we do expect a match
self.assertTrue(re.search('module: FFTW/3.3.7-gompi', outtxt))
self.assertTrue(re.search('module: ScaLAPACK/2.0.2-gompi', outtxt))
self.assertFalse(re.search('module: zlib', outtxt))
# clear log file
write_file(self.logfile, '')
# filter deps (including a non-existing dep, i.e. zlib)
args.extend(['--filter-deps', 'FFTW,ScaLAPACK,zlib'])
outtxt = self.eb_main(args, do_build=True, verbose=True, raise_error=True)
self.assertFalse(re.search('module: FFTW/3.3.7-gompi', outtxt))
self.assertFalse(re.search('module: ScaLAPACK/2.0.2-gompi', outtxt))
self.assertFalse(re.search('module: zlib', outtxt))
write_file(self.logfile, '')
# filter specific version of deps
args[-1] = 'FFTW=3.2.3,zlib,ScaLAPACK=2.0.2'
outtxt = self.eb_main(args, do_build=True, verbose=True, raise_error=True)
self.assertTrue(re.search('module: FFTW/3.3.7-gompi', outtxt))
self.assertFalse(re.search('module: ScaLAPACK', outtxt))
self.assertFalse(re.search('module: zlib', outtxt))
write_file(self.logfile, '')
args[-1] = 'zlib,FFTW=3.3.7,ScaLAPACK=2.0.1'
outtxt = self.eb_main(args, do_build=True, verbose=True, raise_error=True)
self.assertFalse(re.search('module: FFTW', outtxt))
self.assertTrue(re.search('module: ScaLAPACK/2.0.2-gompi', outtxt))
self.assertFalse(re.search('module: zlib', outtxt))
write_file(self.logfile, '')
# filter deps with version range: only filter FFTW 3.x, ScaLAPACK 1.x
args[-1] = 'zlib,ScaLAPACK=]1.0:2.0[,FFTW=[3.0:4.0['
outtxt = self.eb_main(args, do_build=True, verbose=True, raise_error=True)
self.assertFalse(re.search('module: FFTW', outtxt))
self.assertTrue(re.search('module: ScaLAPACK/2.0.2-gompi', outtxt))
self.assertFalse(re.search('module: zlib', outtxt))
write_file(self.logfile, '')
# also test open ended ranges
args[-1] = 'zlib,ScaLAPACK=[1.0:,FFTW=:4.0['
outtxt = self.eb_main(args, do_build=True, verbose=True, raise_error=True)
self.assertFalse(re.search('module: FFTW', outtxt))
self.assertFalse(re.search('module: ScaLAPACK', outtxt))
self.assertFalse(re.search('module: zlib', outtxt))
write_file(self.logfile, '')
args[-1] = 'zlib,ScaLAPACK=[2.1:,FFTW=:3.0['
outtxt = self.eb_main(args, do_build=True, verbose=True, raise_error=True)
self.assertTrue(re.search('module: FFTW/3.3.7-gompi', outtxt))
self.assertTrue(re.search('module: ScaLAPACK/2.0.2-gompi', outtxt))
self.assertFalse(re.search('module: zlib', outtxt))
# test corner cases where version to filter in equal to low/high range limit
args[-1] = 'FFTW=[3.3.7:4.0],zlib,ScaLAPACK=[1.0:2.0.2]'
outtxt = self.eb_main(args, do_build=True, verbose=True, raise_error=True)
self.assertFalse(re.search('module: FFTW', outtxt))
self.assertFalse(re.search('module: ScaLAPACK', outtxt))
self.assertFalse(re.search('module: zlib', outtxt))
write_file(self.logfile, '')
# FFTW & ScaLAPACK versions are not included in range, so no filtering
args[-1] = 'FFTW=]3.3.7:4.0],zlib,ScaLAPACK=[1.0:2.0.2['
outtxt = self.eb_main(args, do_build=True, verbose=True, raise_error=True)
self.assertTrue(re.search('module: FFTW/3.3.7-gompi', outtxt))
self.assertTrue(re.search('module: ScaLAPACK/2.0.2-gompi', outtxt))
self.assertFalse(re.search('module: zlib', outtxt))
write_file(self.logfile, '')
# also test mix of ranges & specific versions
args[-1] = 'FFTW=3.3.7,zlib,ScaLAPACK=[1.0:2.0.2['
outtxt = self.eb_main(args, do_build=True, verbose=True, raise_error=True)
self.assertFalse(re.search('module: FFTW', outtxt))
self.assertTrue(re.search('module: ScaLAPACK/2.0.2-gompi', outtxt))
self.assertFalse(re.search('module: zlib', outtxt))
write_file(self.logfile, '')
args[-1] = 'FFTW=]3.3.7:4.0],zlib,ScaLAPACK=2.0.2'
outtxt = self.eb_main(args, do_build=True, verbose=True, raise_error=True)
self.assertTrue(re.search('module: FFTW/3.3.7-gompi', outtxt))
self.assertFalse(re.search('module: ScaLAPACK', outtxt))
self.assertFalse(re.search('module: zlib', outtxt))
# This easyconfig contains a dependency of CMake for which no easyconfig exists. It should still
# succeed when called with --filter-deps=CMake=:2.8.10]
write_file(self.logfile, '')
ec_file = os.path.join(test_dir, 'easyconfigs', 'test_ecs', 'f', 'foss', 'foss-2018a-broken.eb')
args[0] = ec_file
args[-1] = 'FFTW=3.3.7,CMake=:2.8.10],zlib'
outtxt = self.eb_main(args, do_build=True, verbose=True, raise_error=True)
# dictionaries can be printed in any order
regexp = "filtered out dependency.*('name': 'CMake'.*'version': '2.8.10'|'version': '2.8.10'.*'name': 'CMake')"
self.assertTrue(re.search(regexp, outtxt))
# The test below fails without PR 2983
write_file(self.logfile, '')
ec_file = os.path.join(test_dir, 'easyconfigs', 'test_ecs', 'f', 'foss', 'foss-2018a-broken.eb')
args[0] = ec_file
args[-1] = 'FFTW=3.3.7,CMake=:2.8.10],zlib'
outtxt = self.eb_main(args + ['--minimal-toolchains'], do_build=True, verbose=True, raise_error=True)
self.assertTrue(re.search(regexp, outtxt))
def test_hide_deps(self):
"""Test use of --hide-deps."""
test_dir = os.path.dirname(os.path.abspath(__file__))
ec_file = os.path.join(test_dir, 'easyconfigs', 'test_ecs', 'f', 'foss', 'foss-2018a.eb')
os.environ['MODULEPATH'] = os.path.join(test_dir, 'modules')
args = [
ec_file,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--robot=%s' % os.path.join(test_dir, 'easyconfigs'),
'--dry-run',
]
outtxt = self.eb_main(args, do_build=True, verbose=True, raise_error=True)
self.assertTrue(re.search('module: GCC/6.4.0-2.28', outtxt))
self.assertTrue(re.search('module: OpenMPI/2.1.2-GCC-6.4.0-2.28', outtxt))
self.assertTrue(re.search('module: OpenBLAS/0.2.20-GCC-6.4.0-2.28', outtxt))
self.assertTrue(re.search('module: FFTW/3.3.7-gompi', outtxt))
self.assertTrue(re.search('module: ScaLAPACK/2.0.2-gompi', outtxt))
# zlib is not a dep at all
self.assertFalse(re.search('module: zlib', outtxt))
# clear log file
write_file(self.logfile, '')
# hide deps (including a non-existing dep, i.e. zlib)
args.append('--hide-deps=FFTW,ScaLAPACK,zlib')
outtxt = self.eb_main(args, do_build=True, verbose=True, raise_error=True)
self.assertTrue(re.search('module: GCC/6.4.0-2.28', outtxt))
self.assertTrue(re.search('module: OpenMPI/2.1.2-GCC-6.4.0-2.28', outtxt))
self.assertTrue(re.search('module: OpenBLAS/0.2.20-GCC-6.4.0-2.28', outtxt))
self.assertFalse(re.search(r'module: FFTW/3\.3\.7-gompi', outtxt))
self.assertTrue(re.search(r'module: FFTW/\.3\.3\.7-gompi', outtxt))
self.assertFalse(re.search(r'module: ScaLAPACK/2\.0\.2-gompi', outtxt))
self.assertTrue(re.search(r'module: ScaLAPACK/\.2\.0\.2-gompi', outtxt))
# zlib is not a dep at all
self.assertFalse(re.search(r'module: zlib', outtxt))
def test_hide_toolchains(self):
"""Test use of --hide-toolchains."""
test_ecs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'test_ecs')
ec_file = os.path.join(test_ecs_dir, 'g', 'gzip', 'gzip-1.6-GCC-4.9.2.eb')
args = [
ec_file,
'--dry-run',
'--hide-toolchains=GCC',
]
outtxt = self.eb_main(args)
self.assertTrue(re.search(r'module: GCC/\.4\.9\.2', outtxt))
self.assertTrue(re.search(r'module: gzip/1\.6-GCC-4\.9\.2', outtxt))
def test_parse_http_header_fields_urlpat(self):
"""Test function parse_http_header_fields_urlpat"""
urlex = "example.com"
urlgnu = "gnu.org"
hdrauth = "Authorization"
valauth = "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=="
hdragent = "User-Agent"
valagent = "James/0.0.7 (MI6)"
hdrrefer = "Referer"
valrefer = "http://www.example.com/"
filesub1 = os.path.join(self.test_prefix, "testhttpheaders1.txt")
filesub2 = os.path.join(self.test_prefix, "testhttpheaders2.txt")
filesub3 = os.path.join(self.test_prefix, "testhttpheaders3.txt")
filesub4 = os.path.join(self.test_prefix, "testhttpheaders4.txt")
fileauth = os.path.join(self.test_prefix, "testhttpheadersauth.txt")
write_file(filesub4, filesub3)
write_file(filesub3, filesub2)
write_file(filesub2, filesub1)
write_file(filesub1, "%s::%s:%s\n" % (urlgnu, hdrauth, valauth))
write_file(filesub2, "%s::%s\n" % (urlex, filesub1))
write_file(filesub3, "%s::%s:%s\n" % (urlex, hdragent, filesub2))
write_file(fileauth, "%s\n" % (valauth))
# Case A: basic pattern
args = "%s::%s:%s" % (urlgnu, hdragent, valagent)
urlpat_headers = parse_http_header_fields_urlpat(args)
self.assertEqual({urlgnu: ["%s:%s" % (hdragent, valagent)]}, urlpat_headers)
# Case B: urlpat has another urlpat: retain deepest level
args = "%s::%s::%s::%s:%s" % (urlgnu, urlgnu, urlex, hdragent, valagent)
urlpat_headers = parse_http_header_fields_urlpat(args)
self.assertEqual({urlex: ["%s:%s" % (hdragent, valagent)]}, urlpat_headers)
# Case C: header value has a colon
args = "%s::%s:%s" % (urlex, hdrrefer, valrefer)
urlpat_headers = parse_http_header_fields_urlpat(args)
self.assertEqual({urlex: ["%s:%s" % (hdrrefer, valrefer)]}, urlpat_headers)
# Case D: recurse into files
args = filesub3
urlpat_headers = parse_http_header_fields_urlpat(args)
self.assertEqual({urlgnu: ["%s:%s" % (hdrauth, valauth)]}, urlpat_headers)
# Case E: recurse into files as header
args = "%s::%s" % (urlex, filesub3)
urlpat_headers = parse_http_header_fields_urlpat(args)
self.assertEqual({urlgnu: ["%s:%s" % (hdrauth, valauth)]}, urlpat_headers)
# Case F: recurse into files as value (header is replaced)
args = "%s::%s:%s" % (urlex, hdrrefer, filesub3)
urlpat_headers = parse_http_header_fields_urlpat(args)
self.assertEqual({urlgnu: ["%s:%s" % (hdrauth, valauth)]}, urlpat_headers)
# Case G: recurse into files as value (header is retained)
args = "%s::%s:%s" % (urlgnu, hdrauth, fileauth)
urlpat_headers = parse_http_header_fields_urlpat(args)
self.assertEqual({urlgnu: ["%s:%s" % (hdrauth, valauth)]}, urlpat_headers)
# Case H: recurse into files but hit limit
args = filesub4
error_regex = r"Failed to parse_http_header_fields_urlpat \(recursion limit\)"
self.assertErrorRegex(EasyBuildError, error_regex, parse_http_header_fields_urlpat, args)
# Case I: argument is not a string
args = list("foobar")
error_regex = r"Failed to parse_http_header_fields_urlpat \(argument not a string\)"
self.assertErrorRegex(EasyBuildError, error_regex, parse_http_header_fields_urlpat, args)
def test_http_header_fields_urlpat(self):
"""Test use of --http-header-fields-urlpat."""
tmpdir = tempfile.mkdtemp()
test_ecs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'test_ecs')
ec_file = os.path.join(test_ecs_dir, 'g', 'gzip', 'gzip-1.6-GCC-4.9.2.eb')
common_args = [
ec_file,
'--stop=fetch',
'--debug',
'--force',
'--force-download',
'--logtostdout',
'--sourcepath=%s' % tmpdir,
]
# define header fields:values that should (not) show up in the logs, either
# because they are secret or because they are not matched for the url
testdohdr = 'HeaderAPPLIED'
testdoval = 'SECRETvalue'
testdonthdr = 'HeaderIGNORED'
testdontval = 'BOGUSvalue'
# header fields (or its values) could be files to be read instead of literals
testcmdfile = os.path.join(self.test_prefix, 'testhttpheaderscmdline.txt')
testincfile = os.path.join(self.test_prefix, 'testhttpheadersvalinc.txt')
testexcfile = os.path.join(self.test_prefix, 'testhttpheadersvalexc.txt')
testinchdrfile = os.path.join(self.test_prefix, 'testhttpheadershdrinc.txt')
testexchdrfile = os.path.join(self.test_prefix, 'testhttpheadershdrexc.txt')
testurlpatfile = os.path.join(self.test_prefix, 'testhttpheadersurlpat.txt')
# log mention format upon header or file inclusion
mentionhdr = 'Custom HTTP header field set: %s'
mentionfile = 'File included in parse_http_header_fields_urlpat: %s'
def run_and_assert(args, msg, words_expected=None, words_unexpected=None):
stdout, stderr = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False)
if words_expected is not None:
for thestring in words_expected:
self.assertTrue(re.compile(thestring).search(stdout), "Pattern '%s' missing from log (%s)" %
(thestring, msg))
if words_unexpected is not None:
for thestring in words_unexpected:
self.assertFalse(re.compile(thestring).search(stdout), "Pattern '%s' leaked into log (%s)" %
(thestring, msg))
# A: simple direct case (all is logged because passed directly via EasyBuild configuration options)
args = list(common_args)
args.extend([
'--http-header-fields-urlpat=gnu.org::%s:%s' % (testdohdr, testdoval),
'--http-header-fields-urlpat=nomatch.com::%s:%s' % (testdonthdr, testdontval),
])
# expect to find everything passed on cmdline
expected = [mentionhdr % (testdohdr), testdoval, testdonthdr, testdontval]
run_and_assert(args, "case A", expected)
# all subsequent tests share this argument list
args = common_args
args.append('--http-header-fields-urlpat=%s' % (testcmdfile))
# B: simple file case (secrets in file are not logged)
txt = '\n'.join([
'gnu.org::%s: %s' % (testdohdr, testdoval),
'nomatch.com::%s: %s' % (testdonthdr, testdontval),
'',
])
write_file(testcmdfile, txt)
# expect to find only the header key (not its value) and only for the appropriate url
expected = [mentionhdr % testdohdr, mentionfile % testcmdfile]
not_expected = [testdoval, testdonthdr, testdontval]
run_and_assert(args, "case B", expected, not_expected)
# C: recursion one: header value is another file
txt = '\n'.join([
'gnu.org::%s: %s' % (testdohdr, testincfile),
'nomatch.com::%s: %s' % (testdonthdr, testexcfile),
'',
])
write_file(testcmdfile, txt)
write_file(testincfile, '%s\n' % (testdoval))
write_file(testexcfile, '%s\n' % (testdontval))
# expect to find only the header key (not its value and not the filename) and only for the appropriate url
expected = [mentionhdr % (testdohdr), mentionfile % (testcmdfile),
mentionfile % (testincfile), mentionfile % (testexcfile)]
not_expected = [testdoval, testdonthdr, testdontval]
run_and_assert(args, "case C", expected, not_expected)
# D: recursion two: header field+value is another file,
write_file(testcmdfile, '\n'.join(['gnu.org::%s' % (testinchdrfile), 'nomatch.com::%s' % (testexchdrfile), '']))
write_file(testinchdrfile, '%s: %s\n' % (testdohdr, testdoval))
write_file(testexchdrfile, '%s: %s\n' % (testdonthdr, testdontval))
# expect to find only the header key (and the literal filename) and only for the appropriate url
expected = [mentionhdr % (testdohdr), mentionfile % (testcmdfile),
mentionfile % (testinchdrfile), mentionfile % (testexchdrfile)]
not_expected = [testdoval, testdonthdr, testdontval]
run_and_assert(args, "case D", expected, not_expected)
# E: recursion three: url pattern + header field + value in another file
write_file(testcmdfile, '%s\n' % (testurlpatfile))
txt = '\n'.join([
'gnu.org::%s: %s' % (testdohdr, testdoval),
'nomatch.com::%s: %s' % (testdonthdr, testdontval),
'',
])
write_file(testurlpatfile, txt)
# expect to find only the header key (but not the literal filename) and only for the appropriate url
expected = [mentionhdr % (testdohdr), mentionfile % (testcmdfile), mentionfile % (testurlpatfile)]
not_expected = [testdoval, testdonthdr, testdontval]
run_and_assert(args, "case E", expected, not_expected)
# cleanup downloads
shutil.rmtree(tmpdir)
def test_test_report_env_filter(self):
"""Test use of --test-report-env-filter."""
def toy(extra_args=None):
"""Build & install toy, return contents of test report."""
eb_file = os.path.join(os.path.dirname(__file__), 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb')
args = [
eb_file,
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--force',
'--debug',
]
if extra_args is not None:
args.extend(extra_args)
self.eb_main(args, do_build=True, raise_error=True, verbose=True)
software_path = os.path.join(self.test_installpath, 'software', 'toy', '0.0')
test_report_path_pattern = os.path.join(software_path, 'easybuild', 'easybuild-toy-0.0*test_report.md')
test_report_txt = read_file(glob.glob(test_report_path_pattern)[0])
return test_report_txt
# define environment variables that should (not) show up in the test report
test_var_secret = 'THIS_IS_JUST_A_SECRET_ENV_VAR_FOR_EASYBUILD'
os.environ[test_var_secret] = 'thisshouldremainsecretonrequest'
test_var_secret_regex = re.compile(test_var_secret)
test_var_public = 'THIS_IS_JUST_A_PUBLIC_ENV_VAR_FOR_EASYBUILD'
os.environ[test_var_public] = 'thisshouldalwaysbeincluded'
test_var_public_regex = re.compile(test_var_public)
# default: no filtering
test_report_txt = toy()
self.assertTrue(test_var_secret_regex.search(test_report_txt))
self.assertTrue(test_var_public_regex.search(test_report_txt))
# filter out env vars that match specified regex pattern
filter_arg = "--test-report-env-filter=.*_SECRET_ENV_VAR_FOR_EASYBUILD"
test_report_txt = toy(extra_args=[filter_arg])
res = test_var_secret_regex.search(test_report_txt)
self.assertFalse(res, "No match for %s in %s" % (test_var_secret_regex.pattern, test_report_txt))
self.assertTrue(test_var_public_regex.search(test_report_txt))
# make sure that used filter is reported correctly in test report
filter_arg_regex = re.compile(r"--test-report-env-filter='.\*_SECRET_ENV_VAR_FOR_EASYBUILD'")
tup = (filter_arg_regex.pattern, test_report_txt)
self.assertTrue(filter_arg_regex.search(test_report_txt), "%s in %s" % tup)
def test_robot(self):
"""Test --robot and --robot-paths command line options."""
# unset $EASYBUILD_ROBOT_PATHS that was defined in setUp
os.environ['EASYBUILD_ROBOT_PATHS'] = self.test_prefix
test_ecs_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'test_ecs')
# includes 'toy/.0.0-deps' as a dependency
eb_file = os.path.join(test_ecs_path, 'g', 'gzip', 'gzip-1.4-GCC-4.6.3.eb')
# hide test modules
self.reset_modulepath([])
# dependency resolution is disabled by default, even if required paths are available
args = [
eb_file,
'--robot-paths=%s' % test_ecs_path,
]
error_regex = r"Missing modules for dependencies .*: toy/\.0.0-deps"
self.assertErrorRegex(EasyBuildError, error_regex, self.eb_main, args, raise_error=True, do_build=True)
# enable robot, but without passing path required to resolve toy dependency => FAIL
# note that --dry-run is now robust against missing easyconfig, so shouldn't use it here
args = [
eb_file,
'--robot',
]
self.assertErrorRegex(EasyBuildError, 'Missing dependencies', self.eb_main, args, raise_error=True)
# add path to test easyconfigs to robot paths, so dependencies can be resolved
args.append('--dry-run')
self.eb_main(args + ['--robot-paths=%s' % test_ecs_path], raise_error=True)
# copy test easyconfigs to easybuild/easyconfigs subdirectory of temp directory
# to check whether easyconfigs install path is auto-included in robot path
tmpdir = tempfile.mkdtemp(prefix='easybuild-easyconfigs-pkg-install-path')
mkdir(os.path.join(tmpdir, 'easybuild'), parents=True)
copy_dir(test_ecs_path, os.path.join(tmpdir, 'easybuild', 'easyconfigs'))
# prepend path to test easyconfigs into Python search path, so it gets picked up as --robot-paths default
del os.environ['EASYBUILD_ROBOT_PATHS']
orig_sys_path = sys.path[:]
sys.path.insert(0, tmpdir)
self.eb_main(args, raise_error=True)
shutil.rmtree(tmpdir)
sys.path[:] = orig_sys_path
# make sure that paths specified to --robot get preference over --robot-paths
args = [
eb_file,
'--robot=%s' % test_ecs_path,
'--robot-paths=%s' % os.path.join(tmpdir, 'easybuild', 'easyconfigs'),
'--dry-run',
]
outtxt = self.eb_main(args, raise_error=True)
ecfiles = [
'g/GCC/GCC-4.6.3.eb',
'i/intel/intel-2018a.eb',
't/toy/toy-0.0-deps.eb',
'g/gzip/gzip-1.4-GCC-4.6.3.eb',
]
for ecfile in ecfiles:
ec_regex = re.compile(r'^\s\*\s\[[xF ]\]\s%s' % os.path.join(test_ecs_path, ecfile), re.M)
self.assertTrue(ec_regex.search(outtxt), "Pattern %s found in %s" % (ec_regex.pattern, outtxt))
def test_robot_path_check(self):
"""Test path check for --robot"""
empty_file = os.path.join(self.test_prefix, 'empty')
write_file(empty_file, '')
error_pattern = "Argument passed to --robot is not an existing directory"
for robot in ['--robot=foo', '--robot=%s' % empty_file]:
args = ['toy-0.0.eb', '--dry-run', robot]
self.assertErrorRegex(EasyBuildError, error_pattern, self.eb_main, args, raise_error=True)
toy_regex = re.compile('module: toy/0.0')
# works fine is directory exists
args = ['toy-0.0.eb', '-r', self.test_prefix, '--dry-run']
outtxt = self.eb_main(args, raise_error=True)
self.assertTrue(toy_regex.search(outtxt), "Pattern '%s' not found in: %s" % (toy_regex.pattern, outtxt))
# no error when name of an easyconfig file is specified to --robot (even if it doesn't exist)
args = ['--dry-run', '--robot', 'toy-0.0.eb']
outtxt = self.eb_main(args, raise_error=True)
self.assertTrue(toy_regex.search(outtxt), "Pattern '%s' not found in: %s" % (toy_regex.pattern, outtxt))
# different error when a non-existing easyconfig file is specified to --robot
args = ['--dry-run', '--robot', 'no_such_easyconfig_file_in_robot_search_path.eb']
error_pattern = "One or more files not found: no_such_easyconfig_file_in_robot_search_path.eb"
self.assertErrorRegex(EasyBuildError, error_pattern, self.eb_main, args, raise_error=True)
for robot in ['-r%s' % self.test_prefix, '--robot=%s' % self.test_prefix]:
args = ['toy-0.0.eb', '--dry-run', robot]
outtxt = self.eb_main(args, raise_error=True)
self.assertTrue(toy_regex.search(outtxt), "Pattern '%s' not found in: %s" % (toy_regex.pattern, outtxt))
# no problem with using combos of single-letter options with -r included, no matter the order
for arg in ['-Dr', '-rD', '-frkD', '-rfDk']:
args = ['toy-0.0.eb', arg]
outtxt = self.eb_main(args, raise_error=True)
self.assertTrue(toy_regex.search(outtxt), "Pattern '%s' not found in: %s" % (toy_regex.pattern, outtxt))
# unknown options are still recognized, even when used in single-letter combo arguments
for arg in ['-DX', '-DrX', '-DXr', '-frkDX', '-XfrD']:
args = ['toy-0.0.eb', arg]
self.mock_stderr(True)
self.assertErrorRegex(SystemExit, '.*', self.eb_main, args, raise_error=True, raise_systemexit=True)
stderr = self.get_stderr()
self.mock_stderr(False)
self.assertTrue("error: no such option: -X" in stderr)
def test_missing_cfgfile(self):
"""Test behaviour when non-existing config file is specified."""
args = ['--configfiles=/no/such/cfgfile.foo']
error_regex = "parseconfigfiles: configfile .* not found"
self.assertErrorRegex(EasyBuildError, error_regex, self.eb_main, args, raise_error=True)
def test_show_default_moduleclasses(self):
"""Test --show-default-moduleclasses."""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
args = [
'--unittest-file=%s' % self.logfile,
'--show-default-moduleclasses',
]
write_file(self.logfile, '')
self.eb_main(args, logfile=dummylogfn, verbose=True)
logtxt = read_file(self.logfile)
lst = ["\t%s:[ ]*%s" % (c, d.replace('(', '\\(').replace(')', '\\)')) for (c, d) in DEFAULT_MODULECLASSES]
regex = re.compile("Default available module classes:\n\n" + '\n'.join(lst), re.M)
self.assertTrue(regex.search(logtxt), "Pattern '%s' found in %s" % (regex.pattern, logtxt))
def test_show_default_configfiles(self):
"""Test --show-default-configfiles."""
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
home = os.environ['HOME']
for envvar in ['XDG_CONFIG_DIRS', 'XDG_CONFIG_HOME']:
if envvar in os.environ:
del os.environ[envvar]
reload(easybuild.tools.options)
args = [
'--unittest-file=%s' % self.logfile,
'--show-default-configfiles',
]
cfgtxt = '\n'.join([
'[config]',
'prefix = %s' % self.test_prefix,
])
expected_tmpl = '\n'.join([
"Default list of configuration files:",
'',
"[with $XDG_CONFIG_HOME: %s, $XDG_CONFIG_DIRS: %s]",
'',
"* user-level: ${XDG_CONFIG_HOME:-$HOME/.config}/easybuild/config.cfg",
" -> %s",
"* system-level: ${XDG_CONFIG_DIRS:-/etc}/easybuild.d/*.cfg",
" -> %s/easybuild.d/*.cfg => ",
])
write_file(self.logfile, '')
self.eb_main(args, logfile=dummylogfn, verbose=True)
logtxt = read_file(self.logfile)
homecfgfile = os.path.join(os.environ['HOME'], '.config', 'easybuild', 'config.cfg')
homecfgfile_str = homecfgfile
if os.path.exists(homecfgfile):
homecfgfile_str += " => found"
else:
homecfgfile_str += " => not found"
expected = expected_tmpl % ('(not set)', '(not set)', homecfgfile_str, '{/etc}')
self.assertTrue(expected in logtxt)
# to predict the full output, we need to take control over $HOME and $XDG_CONFIG_DIRS
os.environ['HOME'] = self.test_prefix
xdg_config_dirs = os.path.join(self.test_prefix, 'etc')
os.environ['XDG_CONFIG_DIRS'] = xdg_config_dirs
expected_tmpl += '\n'.join([
"%s",
'',
"Default list of existing configuration files (%d): %s",
])
# put dummy cfgfile in place in $HOME (to predict last line of output which only lists *existing* files)
mkdir(os.path.join(self.test_prefix, '.config', 'easybuild'), parents=True)
homecfgfile = os.path.join(self.test_prefix, '.config', 'easybuild', 'config.cfg')
write_file(homecfgfile, cfgtxt)
reload(easybuild.tools.options)
write_file(self.logfile, '')
self.eb_main(args, logfile=dummylogfn, verbose=True)
logtxt = read_file(self.logfile)
expected = expected_tmpl % ('(not set)', xdg_config_dirs, "%s => found" % homecfgfile, '{%s}' % xdg_config_dirs,
'(no matches)', 1, homecfgfile)
self.assertTrue(expected in logtxt)
xdg_config_home = os.path.join(self.test_prefix, 'home')
os.environ['XDG_CONFIG_HOME'] = xdg_config_home
xdg_config_dirs = [os.path.join(self.test_prefix, 'etc'), os.path.join(self.test_prefix, 'moaretc')]
os.environ['XDG_CONFIG_DIRS'] = os.pathsep.join(xdg_config_dirs)
# put various dummy cfgfiles in place
cfgfiles = [
os.path.join(self.test_prefix, 'etc', 'easybuild.d', 'config.cfg'),
os.path.join(self.test_prefix, 'moaretc', 'easybuild.d', 'bar.cfg'),
os.path.join(self.test_prefix, 'moaretc', 'easybuild.d', 'foo.cfg'),
os.path.join(xdg_config_home, 'easybuild', 'config.cfg'),
]
for cfgfile in cfgfiles:
mkdir(os.path.dirname(cfgfile), parents=True)
write_file(cfgfile, cfgtxt)
reload(easybuild.tools.options)
write_file(self.logfile, '')
self.eb_main(args, logfile=dummylogfn, verbose=True)
logtxt = read_file(self.logfile)
expected = expected_tmpl % (xdg_config_home, os.pathsep.join(xdg_config_dirs),
"%s => found" % os.path.join(xdg_config_home, 'easybuild', 'config.cfg'),
'{' + ', '.join(xdg_config_dirs) + '}',
', '.join(cfgfiles[:-1]), 4, ', '.join(cfgfiles))
self.assertTrue(expected in logtxt)
del os.environ['XDG_CONFIG_DIRS']
del os.environ['XDG_CONFIG_HOME']
os.environ['HOME'] = home
reload(easybuild.tools.options)
def test_generate_cmd_line(self):
"""Test for generate_cmd_line."""
self.purge_environment()
def generate_cmd_line(ebopts):
"""Helper function to filter generated command line (to ignore $EASYBUILD_IGNORECONFIGFILES)."""
return [x for x in ebopts.generate_cmd_line() if not x.startswith('--ignoreconfigfiles=')]
ebopts = EasyBuildOptions(envvar_prefix='EASYBUILD')
self.assertEqual(generate_cmd_line(ebopts), [])
ebopts = EasyBuildOptions(go_args=['--force'], envvar_prefix='EASYBUILD')
self.assertEqual(generate_cmd_line(ebopts), ['--force'])
ebopts = EasyBuildOptions(go_args=['--search=bar', '--search', 'foobar'], envvar_prefix='EASYBUILD')
self.assertEqual(generate_cmd_line(ebopts), ["--search='foobar'"])
os.environ['EASYBUILD_DEBUG'] = '1'
ebopts = EasyBuildOptions(go_args=['--force'], envvar_prefix='EASYBUILD')
self.assertEqual(generate_cmd_line(ebopts), ['--debug', '--force'])
args = [
# install path with a single quote in it, iieeeuuuwww
"--installpath=/this/is/a/weird'prefix",
'--test-report-env-filter=(COOKIE|SESSION)',
'--suffix-modules-path=',
'--try-toolchain=foss,2015b',
'--logfile-format=easybuild,eb-%(name)s.log',
# option with spaces with value wrapped in double quotes, oh boy...
'--optarch="O3 -mtune=generic"',
]
expected = [
'--debug',
"--installpath='/this/is/a/weird\\'prefix'",
"--logfile-format='easybuild,eb-%(name)s.log'",
"--optarch='O3 -mtune=generic'",
"--suffix-modules-path=''",
"--test-report-env-filter='(COOKIE|SESSION)'",
"--try-toolchain='foss,2015b'",
]
ebopts = EasyBuildOptions(go_args=args, envvar_prefix='EASYBUILD')
self.assertEqual(generate_cmd_line(ebopts), expected)
# must be run after test for --list-easyblocks, hence the '_xxx_'
# cleaning up the imported easyblocks is quite difficult...
def test_xxx_include_easyblocks(self):
"""Test --include-easyblocks."""
orig_local_sys_path = sys.path[:]
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
# clear log
write_file(self.logfile, '')
# existing test EB_foo easyblock found without include a custom one
args = [
'--list-easyblocks=detailed',
'--unittest-file=%s' % self.logfile,
]
self.eb_main(args, logfile=dummylogfn, raise_error=True)
logtxt = read_file(self.logfile)
test_easyblocks = os.path.dirname(os.path.abspath(__file__))
path_pattern = os.path.join(test_easyblocks, 'sandbox', 'easybuild', 'easyblocks', 'f', 'foo.py')
foo_regex = re.compile(r"^\|-- EB_foo \(easybuild.easyblocks.foo @ %s\)" % path_pattern, re.M)
self.assertTrue(foo_regex.search(logtxt), "Pattern '%s' found in: %s" % (foo_regex.pattern, logtxt))
# 'undo' import of foo easyblock
del sys.modules['easybuild.easyblocks.foo']
sys.path = orig_local_sys_path
import easybuild.easyblocks
reload(easybuild.easyblocks)
import easybuild.easyblocks.generic
reload(easybuild.easyblocks.generic)
# kick out any paths that shouldn't be there for easybuild.easyblocks and easybuild.easyblocks.generic
# to avoid that easyblocks picked up from other places cause trouble
testdir_sandbox = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'sandbox')
for pkg in ('easybuild.easyblocks', 'easybuild.easyblocks.generic'):
for path in sys.modules[pkg].__path__[:]:
if testdir_sandbox not in path:
sys.modules[pkg].__path__.remove(path)
# include extra test easyblocks
# Make them inherit from each other to trigger a known issue with changed imports, see #3779
# Choose naming so that order of naming is different than inheritance order
afoo_txt = textwrap.dedent("""
from easybuild.framework.easyblock import EasyBlock
class EB_afoo(EasyBlock):
def __init__(self, *args, **kwargs):
super(EB_afoo, self).__init__(*args, **kwargs)
""")
write_file(os.path.join(self.test_prefix, 'afoo.py'), afoo_txt)
foo_txt = textwrap.dedent("""
from easybuild.easyblocks.zfoo import EB_zfoo
class EB_foo(EB_zfoo):
def __init__(self, *args, **kwargs):
super(EB_foo, self).__init__(*args, **kwargs)
""")
write_file(os.path.join(self.test_prefix, 'foo.py'), foo_txt)
zfoo_txt = textwrap.dedent("""
from easybuild.easyblocks.afoo import EB_afoo
class EB_zfoo(EB_afoo):
def __init__(self, *args, **kwargs):
super(EB_zfoo, self).__init__(*args, **kwargs)
""")
write_file(os.path.join(self.test_prefix, 'zfoo.py'), zfoo_txt)
# clear log
write_file(self.logfile, '')
args = [
'--include-easyblocks=%s/*.py' % self.test_prefix,
'--list-easyblocks=detailed',
'--unittest-file=%s' % self.logfile,
]
self.eb_main(args, logfile=dummylogfn, raise_error=True)
logtxt = read_file(self.logfile)
path_pattern = os.path.join(self.test_prefix, '.*', 'included-easyblocks-.*', 'easybuild', 'easyblocks',
'foo.py')
foo_regex = re.compile(r"^\|-- EB_foo \(easybuild.easyblocks.foo @ %s\)" % path_pattern, re.M)
self.assertTrue(foo_regex.search(logtxt), "Pattern '%s' found in: %s" % (foo_regex.pattern, logtxt))
ec_txt = '\n'.join([
'easyblock = "EB_foo"',
'name = "pi"',
'version = "3.14"',
'homepage = "http://example.com"',
'description = "test easyconfig"',
'toolchain = SYSTEM',
])
ec = EasyConfig(path=None, rawtxt=ec_txt)
# easyblock is found via get_easyblock_class
for name in ('EB_afoo', 'EB_foo', 'EB_zfoo'):
klass = get_easyblock_class(name)
self.assertTrue(issubclass(klass, EasyBlock), "%s (%s) is an EasyBlock derivative class" % (klass, name))
eb_inst = klass(ec)
self.assertTrue(eb_inst is not None, "Instantiating the injected class %s works" % name)
# 'undo' import of the easyblocks
for name in ('afoo', 'foo', 'zfoo'):
del sys.modules['easybuild.easyblocks.' + name]
# must be run after test for --list-easyblocks, hence the '_xxx_'
# cleaning up the imported easyblocks is quite difficult...
def test_xxx_include_generic_easyblocks(self):
"""Test --include-easyblocks with a generic easyblock."""
orig_local_sys_path = sys.path[:]
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
# clear log
write_file(self.logfile, '')
# generic easyblock FooBar is not there initially
error_msg = "Failed to obtain class for FooBar easyblock"
self.assertErrorRegex(EasyBuildError, error_msg, get_easyblock_class, 'FooBar')
# include extra test easyblocks
txt = '\n'.join([
'from easybuild.framework.easyblock import EasyBlock',
'class FooBar(EasyBlock):',
' pass',
''
])
write_file(os.path.join(self.test_prefix, 'generic', 'foobar.py'), txt)
args = [
'--include-easyblocks=%s/generic/*.py' % self.test_prefix,
'--list-easyblocks=detailed',
'--unittest-file=%s' % self.logfile,
]
self.eb_main(args, logfile=dummylogfn, raise_error=True)
logtxt = read_file(self.logfile)
path_pattern = os.path.join(self.test_prefix, '.*', 'included-easyblocks-.*', 'easybuild', 'easyblocks',
'generic', 'foobar.py')
foo_regex = re.compile(r"^\|-- FooBar \(easybuild.easyblocks.generic.foobar @ %s\)" % path_pattern, re.M)
self.assertTrue(foo_regex.search(logtxt), "Pattern '%s' found in: %s" % (foo_regex.pattern, logtxt))
klass = get_easyblock_class('FooBar')
self.assertTrue(issubclass(klass, EasyBlock), "%s is an EasyBlock derivative class" % klass)
# 'undo' import of foobar easyblock
del sys.modules['easybuild.easyblocks.generic.foobar']
os.remove(os.path.join(self.test_prefix, 'generic', 'foobar.py'))
sys.path = orig_local_sys_path
import easybuild.easyblocks
reload(easybuild.easyblocks)
import easybuild.easyblocks.generic
reload(easybuild.easyblocks.generic)
# kick out any paths that shouldn't be there for easybuild.easyblocks and easybuild.easyblocks.generic
# to avoid that easyblocks picked up from other places cause trouble
testdir_sandbox = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'sandbox')
for pkg in ('easybuild.easyblocks', 'easybuild.easyblocks.generic'):
for path in sys.modules[pkg].__path__[:]:
if testdir_sandbox not in path:
sys.modules[pkg].__path__.remove(path)
error_msg = "Failed to obtain class for FooBar easyblock"
self.assertErrorRegex(EasyBuildError, error_msg, get_easyblock_class, 'FooBar')
# clear log
write_file(self.logfile, '')
# importing without specifying 'generic' also works, and generic easyblock can be imported as well
# this works thanks to a fallback mechanism in get_easyblock_class
txt = '\n'.join([
'from easybuild.framework.easyblock import EasyBlock',
'class GenericTest(EasyBlock):',
' pass',
''
])
write_file(os.path.join(self.test_prefix, 'generictest.py'), txt)
args[0] = '--include-easyblocks=%s/*.py' % self.test_prefix
self.eb_main(args, logfile=dummylogfn, raise_error=True)
logtxt = read_file(self.logfile)
mod_pattern = 'easybuild.easyblocks.generic.generictest'
path_pattern = os.path.join(self.test_prefix, '.*', 'included-easyblocks-.*', 'easybuild', 'easyblocks',
'generic', 'generictest.py')
foo_regex = re.compile(r"^\|-- GenericTest \(%s @ %s\)" % (mod_pattern, path_pattern), re.M)
self.assertTrue(foo_regex.search(logtxt), "Pattern '%s' found in: %s" % (foo_regex.pattern, logtxt))
klass = get_easyblock_class('GenericTest')
self.assertTrue(issubclass(klass, EasyBlock), "%s is an EasyBlock derivative class" % klass)
# 'undo' import of foo easyblock
del sys.modules['easybuild.easyblocks.generic.generictest']
# must be run after test for --list-easyblocks, hence the '_xxx_'
# cleaning up the imported easyblocks is quite difficult...
def test_github_xxx_include_easyblocks_from_pr(self):
"""Test --include-easyblocks-from-pr."""
if self.github_token is None:
print("Skipping test_preview_pr, no GitHub token available?")
return
orig_local_sys_path = sys.path[:]
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
# clear log
write_file(self.logfile, '')
# include extra test easyblock
foo_txt = '\n'.join([
'from easybuild.framework.easyblock import EasyBlock',
'class EB_foo(EasyBlock):',
' pass',
''
])
write_file(os.path.join(self.test_prefix, 'foo.py'), foo_txt)
args = [
'--include-easyblocks=%s/*.py' % self.test_prefix, # this shouldn't interfere
'--include-easyblocks-from-pr=1915', # a PR for CMakeMake easyblock
'--list-easyblocks=detailed',
'--unittest-file=%s' % self.logfile,
'--github-user=%s' % GITHUB_TEST_ACCOUNT,
]
self.mock_stderr(True)
self.mock_stdout(True)
self.eb_main(args, logfile=dummylogfn, raise_error=True)
stderr, stdout = self.get_stderr(), self.get_stdout()
self.mock_stderr(False)
self.mock_stdout(False)
logtxt = read_file(self.logfile)
self.assertFalse(stderr)
self.assertEqual(stdout, "== easyblock cmakemake.py included from PR #1915\n")
# easyblock included from pr is found
path_pattern = os.path.join(self.test_prefix, '.*', 'included-easyblocks-.*', 'easybuild', 'easyblocks')
cmm_pattern = os.path.join(path_pattern, 'generic', 'cmakemake.py')
cmm_regex = re.compile(r"\|-- CMakeMake \(easybuild.easyblocks.generic.cmakemake @ %s\)" % cmm_pattern, re.M)
self.assertTrue(cmm_regex.search(logtxt), "Pattern '%s' found in: %s" % (cmm_regex.pattern, logtxt))
# easyblock is found via get_easyblock_class
klass = get_easyblock_class('CMakeMake')
self.assertTrue(issubclass(klass, EasyBlock), "%s is an EasyBlock derivative class" % klass)
# 'undo' import of easyblocks
del sys.modules['easybuild.easyblocks.foo']
del sys.modules['easybuild.easyblocks.generic.cmakemake']
os.remove(os.path.join(self.test_prefix, 'foo.py'))
sys.path = orig_local_sys_path
# include test cmakemake easyblock
cmm_txt = '\n'.join([
'from easybuild.framework.easyblock import EasyBlock',
'class CMakeMake(EasyBlock):',
' pass',
''
])
write_file(os.path.join(self.test_prefix, 'cmakemake.py'), cmm_txt)
# including the same easyblock twice should work and give priority to the one from the PR
args = [
'--include-easyblocks=%s/*.py' % self.test_prefix,
'--include-easyblocks-from-pr=1915',
'--list-easyblocks=detailed',
'--unittest-file=%s' % self.logfile,
'--github-user=%s' % GITHUB_TEST_ACCOUNT,
]
self.mock_stderr(True)
self.mock_stdout(True)
self.eb_main(args, logfile=dummylogfn, raise_error=True)
stderr, stdout = self.get_stderr(), self.get_stdout()
self.mock_stderr(False)
self.mock_stdout(False)
logtxt = read_file(self.logfile)
expected = "WARNING: One or more easyblocks included from multiple locations: "
expected += "cmakemake.py (the one(s) from PR #1915 will be used)"
self.assertEqual(stderr.strip(), expected)
self.assertEqual(stdout, "== easyblock cmakemake.py included from PR #1915\n")
# easyblock included from pr is found
path_pattern = os.path.join(self.test_prefix, '.*', 'included-easyblocks-.*', 'easybuild', 'easyblocks')
cmm_pattern = os.path.join(path_pattern, 'generic', 'cmakemake.py')
cmm_regex = re.compile(r"\|-- CMakeMake \(easybuild.easyblocks.generic.cmakemake @ %s\)" % cmm_pattern, re.M)
self.assertTrue(cmm_regex.search(logtxt), "Pattern '%s' found in: %s" % (cmm_regex.pattern, logtxt))
# easyblock is found via get_easyblock_class
klass = get_easyblock_class('CMakeMake')
self.assertTrue(issubclass(klass, EasyBlock), "%s is an EasyBlock derivative class" % klass)
# 'undo' import of easyblocks
del sys.modules['easybuild.easyblocks.foo']
del sys.modules['easybuild.easyblocks.generic.cmakemake']
os.remove(os.path.join(self.test_prefix, 'cmakemake.py'))
sys.path = orig_local_sys_path
import easybuild.easyblocks
reload(easybuild.easyblocks)
import easybuild.easyblocks.generic
reload(easybuild.easyblocks.generic)
# kick out any paths that shouldn't be there for easybuild.easyblocks and easybuild.easyblocks.generic,
# to avoid that easyblocks picked up from other places cause trouble
testdir_sandbox = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'sandbox')
for pkg in ('easybuild.easyblocks', 'easybuild.easyblocks.generic'):
for path in sys.modules[pkg].__path__[:]:
if testdir_sandbox not in path:
sys.modules[pkg].__path__.remove(path)
# clear log
write_file(self.logfile, '')
args = [
'--from-pr=10487', # PR for CMake easyconfig
'--include-easyblocks-from-pr=1936,2204', # PRs for EB_CMake and Siesta easyblock
'--unittest-file=%s' % self.logfile,
'--github-user=%s' % GITHUB_TEST_ACCOUNT,
'--extended-dry-run',
]
self.mock_stderr(True)
self.mock_stdout(True)
self.eb_main(args, logfile=dummylogfn, raise_error=True)
stderr, stdout = self.get_stderr(), self.get_stdout()
self.mock_stderr(False)
self.mock_stdout(False)
logtxt = read_file(self.logfile)
self.assertFalse(stderr)
self.assertEqual(stdout, "== easyblock cmake.py included from PR #1936\n" +
"== easyblock siesta.py included from PR #2204\n")
# easyconfig from pr is found
ec_pattern = os.path.join(self.test_prefix, '.*', 'files_pr10487', 'c', 'CMake',
'CMake-3.16.4-GCCcore-9.3.0.eb')
ec_regex = re.compile(r"Parsing easyconfig file %s" % ec_pattern, re.M)
self.assertTrue(ec_regex.search(logtxt), "Pattern '%s' found in: %s" % (ec_regex.pattern, logtxt))
# easyblock included from pr is found
eb_regex = re.compile(r"Successfully obtained EB_CMake class instance from easybuild.easyblocks.cmake", re.M)
self.assertTrue(eb_regex.search(logtxt), "Pattern '%s' found in: %s" % (eb_regex.pattern, logtxt))
# easyblock is found via get_easyblock_class
klass = get_easyblock_class('EB_CMake')
self.assertTrue(issubclass(klass, EasyBlock), "%s is an EasyBlock derivative class" % klass)
# 'undo' import of easyblocks
del sys.modules['easybuild.easyblocks.cmake']
def mk_eb_test_cmd(self, args):
"""Construct test command for 'eb' with given options."""
# make sure that location to 'easybuild.main' is included in $PYTHONPATH
pythonpath = os.getenv('PYTHONPATH')
pythonpath = [pythonpath] if pythonpath else []
easybuild_loc = os.path.dirname(os.path.dirname(easybuild.main.__file__))
os.environ['PYTHONPATH'] = ':'.join([easybuild_loc] + pythonpath)
return '; '.join([
"cd %s" % self.test_prefix,
"%s -O -m easybuild.main %s" % (sys.executable, ' '.join(args)),
])
def test_include_module_naming_schemes(self):
"""Test --include-module-naming-schemes."""
# make sure that calling out to 'eb' will work by restoring $PATH & $PYTHONPATH
self.restore_env_path_pythonpath()
topdir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# try and make sure 'eb' is available via $PATH if it isn't yet
path = self.env_path
if which('eb') is None:
path = '%s:%s' % (topdir, path)
# try and make sure top-level directory is in $PYTHONPATH if it isn't yet
pythonpath = self.env_pythonpath
_, ec = run_cmd("cd %s; python -c 'import easybuild.framework'" % self.test_prefix, log_ok=False)
if ec > 0:
pythonpath = '%s:%s' % (topdir, pythonpath)
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
# clear log
write_file(self.logfile, '')
mns_regex = re.compile(r'^\s*TestIncludedMNS', re.M)
# TestIncludedMNS module naming scheme is not available by default
args = ['--avail-module-naming-schemes']
test_cmd = self.mk_eb_test_cmd(args)
logtxt, _ = run_cmd(test_cmd, simple=False)
self.assertFalse(mns_regex.search(logtxt), "Unexpected pattern '%s' found in: %s" % (mns_regex.pattern, logtxt))
# include extra test MNS
mns_txt = '\n'.join([
'from easybuild.tools.module_naming_scheme.mns import ModuleNamingScheme',
'class TestIncludedMNS(ModuleNamingScheme):',
' pass',
])
write_file(os.path.join(self.test_prefix, 'test_mns.py'), mns_txt)
# clear log
write_file(self.logfile, '')
args.append('--include-module-naming-schemes=%s/*.py' % self.test_prefix)
test_cmd = self.mk_eb_test_cmd(args)
logtxt, _ = run_cmd(test_cmd, simple=False)
self.assertTrue(mns_regex.search(logtxt), "Pattern '%s' *not* found in: %s" % (mns_regex.pattern, logtxt))
def test_use_included_module_naming_scheme(self):
"""Test using an included module naming scheme."""
# try selecting the added module naming scheme
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
# include extra test MNS
mns_txt = '\n'.join([
'import os',
'from easybuild.tools.module_naming_scheme.mns import ModuleNamingScheme',
'class AnotherTestIncludedMNS(ModuleNamingScheme):',
' def det_full_module_name(self, ec):',
" return os.path.join(ec['name'], ec['version'])",
])
write_file(os.path.join(self.test_prefix, 'test_mns.py'), mns_txt)
topdir = os.path.abspath(os.path.dirname(__file__))
eb_file = os.path.join(topdir, 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb')
args = [
'--unittest-file=%s' % self.logfile,
'--module-naming-scheme=AnotherTestIncludedMNS',
'--force',
eb_file,
]
# selecting a module naming scheme that doesn't exist leads to 'invalid choice'
error_regex = "Selected module naming scheme \'AnotherTestIncludedMNS\' is unknown"
self.assertErrorRegex(EasyBuildError, error_regex, self.eb_main, args, logfile=dummylogfn,
raise_error=True, raise_systemexit=True)
args.append('--include-module-naming-schemes=%s/*.py' % self.test_prefix)
self.eb_main(args, logfile=dummylogfn, do_build=True, raise_error=True, raise_systemexit=True, verbose=True)
toy_mod = os.path.join(self.test_installpath, 'modules', 'all', 'toy', '0.0')
if get_module_syntax() == 'Lua':
toy_mod += '.lua'
self.assertTrue(os.path.exists(toy_mod), "Found %s" % toy_mod)
def test_include_toolchains(self):
"""Test --include-toolchains."""
# make sure that calling out to 'eb' will work by restoring $PATH & $PYTHONPATH
self.restore_env_path_pythonpath()
topdir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# try and make sure 'eb' is available via $PATH if it isn't yet
path = self.env_path
if which('eb') is None:
path = '%s:%s' % (topdir, path)
# try and make sure top-level directory is in $PYTHONPATH if it isn't yet
pythonpath = self.env_pythonpath
_, ec = run_cmd("cd %s; python -c 'import easybuild.framework'" % self.test_prefix, log_ok=False)
if ec > 0:
pythonpath = '%s:%s' % (topdir, pythonpath)
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
# clear log
write_file(self.logfile, '')
# set processed attribute to false, to trigger rescan in search_toolchain
setattr(easybuild.tools.toolchain, '%s_PROCESSED' % TC_CONST_PREFIX, False)
tc_regex = re.compile(r'^\s*test_included_toolchain: TestIncludedCompiler', re.M)
# TestIncludedCompiler is not available by default
args = ['--list-toolchains']
test_cmd = self.mk_eb_test_cmd(args)
logtxt, _ = run_cmd(test_cmd, simple=False)
self.assertFalse(tc_regex.search(logtxt), "Pattern '%s' *not* found in: %s" % (tc_regex.pattern, logtxt))
# include extra test toolchain
comp_txt = '\n'.join([
'from easybuild.tools.toolchain.compiler import Compiler',
'class TestIncludedCompiler(Compiler):',
" COMPILER_MODULE_NAME = ['TestIncludedCompiler']",
])
mkdir(os.path.join(self.test_prefix, 'compiler'))
write_file(os.path.join(self.test_prefix, 'compiler', 'test_comp.py'), comp_txt)
tc_txt = '\n'.join([
'from easybuild.toolchains.compiler.test_comp import TestIncludedCompiler',
'class TestIncludedToolchain(TestIncludedCompiler):',
" NAME = 'test_included_toolchain'",
])
write_file(os.path.join(self.test_prefix, 'test_tc.py'), tc_txt)
args.append('--include-toolchains=%s/*.py,%s/*/*.py' % (self.test_prefix, self.test_prefix))
test_cmd = self.mk_eb_test_cmd(args)
logtxt, _ = run_cmd(test_cmd, simple=False)
self.assertTrue(tc_regex.search(logtxt), "Pattern '%s' found in: %s" % (tc_regex.pattern, logtxt))
def test_cleanup_tmpdir(self):
"""Test --cleanup-tmpdir."""
topdir = os.path.dirname(os.path.abspath(__file__))
args = [
os.path.join(topdir, 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb'),
'--dry-run',
'--try-software-version=1.0', # so we get a tweaked easyconfig
]
tmpdir = tempfile.gettempdir()
# just making sure this is empty before we get started
self.assertEqual(os.listdir(tmpdir), [])
# force silence (since we're not using testing mode)
self.mock_stdout(True)
# default: cleanup tmpdir & logfile
self.eb_main(args, raise_error=True, testing=False)
self.assertEqual(os.listdir(tmpdir), [])
self.assertFalse(os.path.exists(self.logfile))
# disable cleaning up tmpdir
args.append('--disable-cleanup-tmpdir')
self.eb_main(args, raise_error=True, testing=False)
tmpdir_files = os.listdir(tmpdir)
# tmpdir and logfile are still there \o/
self.assertTrue(len(tmpdir_files) == 1)
self.assertTrue(os.path.exists(self.logfile))
# tweaked easyconfigs is still there \o/
tweaked_dir = os.path.join(tmpdir, tmpdir_files[0], 'tweaked_easyconfigs')
self.assertTrue(os.path.exists(os.path.join(tweaked_dir, 'toy-1.0.eb')))
def test_github_preview_pr(self):
"""Test --preview-pr."""
if self.github_token is None:
print("Skipping test_preview_pr, no GitHub token available?")
return
self.mock_stdout(True)
test_ecs_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'test_ecs')
eb_file = os.path.join(test_ecs_path, 'b', 'bzip2', 'bzip2-1.0.6-GCC-4.9.2.eb')
args = [
'--color=never',
'--github-user=%s' % GITHUB_TEST_ACCOUNT,
'--preview-pr',
eb_file,
]
self.eb_main(args, raise_error=True)
txt = self.get_stdout()
self.mock_stdout(False)
regex = re.compile(r"^Comparing bzip2-1.0.6\S* with bzip2-1.0.6")
self.assertTrue(regex.search(txt), "Pattern '%s' not found in: %s" % (regex.pattern, txt))
def test_github_review_pr(self):
"""Test --review-pr."""
if self.github_token is None:
print("Skipping test_review_pr, no GitHub token available?")
return
self.mock_stdout(True)
self.mock_stderr(True)
# PR for gzip 1.10 easyconfig, see https://github.com/easybuilders/easybuild-easyconfigs/pull/9921
args = [
'--color=never',
'--github-user=%s' % GITHUB_TEST_ACCOUNT,
'--review-pr=9921',
]
self.eb_main(args, raise_error=True)
txt = self.get_stdout()
self.mock_stdout(False)
self.mock_stderr(False)
regex = re.compile(r"^Comparing gzip-1.10-\S* with gzip-1.10-")
self.assertTrue(regex.search(txt), "Pattern '%s' not found in: %s" % (regex.pattern, txt))
self.mock_stdout(True)
self.mock_stderr(True)
# closed PR for gzip 1.2.8 easyconfig,
# see https://github.com/easybuilders/easybuild-easyconfigs/pull/5365
args = [
'--color=never',
'--github-user=%s' % GITHUB_TEST_ACCOUNT,
'--review-pr=5365',
]
self.eb_main(args, raise_error=True, testing=True)
txt = self.get_stdout()
self.mock_stdout(False)
self.mock_stderr(False)
self.assertTrue("This PR should be labelled with 'update'" in txt)
def test_set_tmpdir(self):
"""Test set_tmpdir config function."""
self.purge_environment()
def check_tmpdir(tmpdir):
"""Test use of specified path for temporary directory"""
parent = tmpdir
if parent is None:
parent = tempfile.gettempdir()
mytmpdir = set_tmpdir(tmpdir=tmpdir)
parent = re.sub(r'[^\w/.-]', 'X', parent)
for var in ['TMPDIR', 'TEMP', 'TMP']:
self.assertTrue(os.environ[var].startswith(os.path.join(parent, 'eb-')))
self.assertEqual(os.environ[var], mytmpdir)
self.assertTrue(tempfile.gettempdir().startswith(os.path.join(parent, 'eb-')))
tempfile_tmpdir = tempfile.mkdtemp()
self.assertTrue(tempfile_tmpdir.startswith(os.path.join(parent, 'eb-')))
fd, tempfile_tmpfile = tempfile.mkstemp()
self.assertTrue(tempfile_tmpfile.startswith(os.path.join(parent, 'eb-')))
# tmp_logdir follows tmpdir
self.assertEqual(get_build_log_path(), mytmpdir)
# cleanup
os.close(fd)
shutil.rmtree(mytmpdir)
modify_env(os.environ, self.orig_environ)
tempfile.tempdir = None
orig_tmpdir = tempfile.gettempdir()
cand_tmpdirs = [
None,
os.path.join(orig_tmpdir, 'foo'),
os.path.join(orig_tmpdir, '[1234]. bleh'),
os.path.join(orig_tmpdir, '[ab @cd]%/#*'),
]
for tmpdir in cand_tmpdirs:
check_tmpdir(tmpdir)
def test_minimal_toolchains(self):
"""End-to-end test for --minimal-toolchains."""
# create test easyconfig specifically tailored for this test
# include a dependency for which no easyconfig is available with parent toolchains, only with subtoolchain
ec_file = os.path.join(self.test_prefix, 'test_minimal_toolchains.eb')
ectxt = '\n'.join([
"easyblock = 'ConfigureMake'",
"name = 'test'",
"version = '1.2.3'",
"homepage = 'http://example.com'",
"description = 'this is just a test'",
"toolchain = {'name': 'gompi', 'version': '2018a'}",
# hwloc-1.11.8-gompi-2018a.eb is *not* available, but hwloc-1.11.8-GCC-6.4.0-2.28.eb is,
# and GCC/6.4.0-2.28 is a subtoolchain of gompi/2018a
"dependencies = [('hwloc', '1.11.8'), ('SQLite', '3.8.10.2')]",
])
write_file(ec_file, ectxt)
# check requirements for test
init_config([], build_options={'robot_path': os.environ['EASYBUILD_ROBOT_PATHS']})
self.assertFalse(os.path.exists(robot_find_easyconfig('hwloc', '1.11.8-gompi-2018a') or 'nosuchfile'))
self.assertTrue(os.path.exists(robot_find_easyconfig('hwloc', '1.11.8-GCC-6.4.0-2.28')))
self.assertTrue(os.path.exists(robot_find_easyconfig('SQLite', '3.8.10.2-gompi-2018a')))
self.assertTrue(os.path.exists(robot_find_easyconfig('SQLite', '3.8.10.2-GCC-6.4.0-2.28')))
args = [
ec_file,
'--minimal-toolchains',
'--module-naming-scheme=HierarchicalMNS',
'--dry-run',
]
self.mock_stdout(True)
self.eb_main(args, do_build=True, raise_error=True, testing=False)
txt = self.get_stdout()
self.mock_stdout(False)
comp = 'Compiler/GCC/6.4.0-2.28'
sqlite_regex = re.compile(r"hwloc-1.11.8-GCC-6.4.0-2.28.eb \(module: %s \| hwloc/" % comp, re.M)
sqlite_regex = re.compile(r"SQLite-3.8.10.2-GCC-6.4.0-2.28.eb \(module: %s \| SQLite/" % comp, re.M)
self.assertTrue(sqlite_regex.search(txt), "Pattern '%s' found in: %s" % (sqlite_regex.pattern, txt))
def test_extended_dry_run(self):
"""Test use of --extended-dry-run/-x."""
ec_file = os.path.join(os.path.dirname(__file__), 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb')
args = [
ec_file,
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--debug',
]
# *no* output in testing mode (honor 'silent')
self.mock_stdout(True)
self.eb_main(args + ['--extended-dry-run'], do_build=True, raise_error=True, testing=True)
stdout = self.get_stdout()
self.mock_stdout(False)
self.assertEqual(len(stdout), 0)
msg_regexs = [
re.compile(r"the actual build \& install procedure that will be performed may diverge", re.M),
re.compile(r"^\*\*\* DRY RUN using 'EB_toy' easyblock", re.M),
re.compile(r"^== COMPLETED: Installation ended successfully \(took .* secs?\)", re.M),
re.compile(r"^\(no ignored errors during dry run\)", re.M),
]
ignoring_error_regex = re.compile(r"WARNING: ignoring error", re.M)
ignored_error_regex = re.compile(r"WARNING: One or more errors were ignored, see warnings above", re.M)
for opt in ['--extended-dry-run', '-x']:
# check for expected patterns in output of --extended-dry-run/-x
self.mock_stdout(True)
self.eb_main(args + [opt], do_build=True, raise_error=True, testing=False)
stdout = self.get_stdout()
self.mock_stdout(False)
for msg_regex in msg_regexs:
self.assertTrue(msg_regex.search(stdout), "Pattern '%s' found in: %s" % (msg_regex.pattern, stdout))
# no ignored errors should occur
for notthere_regex in [ignoring_error_regex, ignored_error_regex]:
msg = "Pattern '%s' NOT found in: %s" % (notthere_regex.pattern, stdout)
self.assertFalse(notthere_regex.search(stdout), msg)
def test_last_log(self):
"""Test --last-log."""
orig_tmpdir = os.environ['TMPDIR']
tmpdir = os.path.join(tempfile.gettempdir(), 'eb-tmpdir1')
current_log_path = os.path.join(tmpdir, 'easybuild-current.log')
# $TMPDIR determines path to build log, we need to get it right to make the test check what we want it to
os.environ['TMPDIR'] = tmpdir
write_file(current_log_path, "this is a log message")
self.assertEqual(find_last_log(current_log_path), None)
os.environ['TMPDIR'] = orig_tmpdir
self.mock_stdout(True)
mkdir(os.path.dirname(current_log_path))
self.eb_main(['--last-log'], logfile=current_log_path, raise_error=True)
txt = self.get_stdout().strip()
self.mock_stdout(False)
self.assertEqual(txt, '(none)')
# run something that fails first, we need a log file to find
last_log_path = os.path.join(tempfile.gettempdir(), 'eb-tmpdir0', 'easybuild-last.log')
mkdir(os.path.dirname(last_log_path))
self.eb_main(['thisisaneasyconfigthatdoesnotexist.eb'], logfile=last_log_path, raise_error=False)
# $TMPDIR determines path to build log, we need to get it right to make the test check what we want it to
os.environ['TMPDIR'] = tmpdir
write_file(current_log_path, "this is a log message")
last_log = find_last_log(current_log_path)
self.assertTrue(os.path.samefile(last_log, last_log_path), "%s != %s" % (last_log, last_log_path))
os.environ['TMPDIR'] = orig_tmpdir
self.mock_stdout(True)
mkdir(os.path.dirname(current_log_path))
self.eb_main(['--last-log'], logfile=current_log_path, raise_error=True)
txt = self.get_stdout().strip()
self.mock_stdout(False)
self.assertTrue(os.path.samefile(txt, last_log_path), "%s != %s" % (txt, last_log_path))
def test_fixed_installdir_naming_scheme(self):
"""Test use of --fixed-installdir-naming-scheme."""
# by default, name of install dir match module naming scheme used
topdir = os.path.abspath(os.path.dirname(__file__))
eb_file = os.path.join(topdir, 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb')
app = EasyBlock(EasyConfig(eb_file))
app.gen_installdir()
self.assertTrue(app.installdir.endswith('software/toy/0.0'))
init_config(args=['--module-naming-scheme=HierarchicalMNS'])
app = EasyBlock(EasyConfig(eb_file))
app.gen_installdir()
self.assertTrue(app.installdir.endswith('software/toy/0.0'))
# with --fixed-installdir-naming-scheme, the EasyBuild naming scheme is used
build_options = {
'fixed_installdir_naming_scheme': False,
'valid_module_classes': module_classes(),
}
init_config(args=['--module-naming-scheme=HierarchicalMNS'], build_options=build_options)
app = EasyBlock(EasyConfig(eb_file))
app.gen_installdir()
self.assertTrue(app.installdir.endswith('software/Core/toy/0.0'))
def _assert_regexs(self, regexs, txt, assert_true=True):
"""Helper function to assert presence/absence of list of regex patterns in a text"""
for regex in regexs:
regex = re.compile(regex, re.M)
if assert_true:
self.assertTrue(regex.search(txt), "Pattern '%s' found in: %s" % (regex.pattern, txt))
else:
self.assertFalse(regex.search(txt), "Pattern '%s' NOT found in: %s" % (regex.pattern, txt))
def _run_mock_eb(self, args, strip=False, **kwargs):
"""Helper function to mock easybuild runs"""
self.mock_stdout(True)
self.mock_stderr(True)
self.eb_main(args, **kwargs)
stdout_txt = self.get_stdout()
stderr_txt = self.get_stderr()
self.mock_stdout(False)
self.mock_stderr(False)
if strip:
stdout_txt = stdout_txt.strip()
stderr_txt = stderr_txt.strip()
return stdout_txt, stderr_txt
def test_new_branch_github(self):
"""Test for --new-branch-github."""
if self.github_token is None:
print("Skipping test_create_branch_github, no GitHub token available?")
return
topdir = os.path.dirname(os.path.abspath(__file__))
# test easyconfigs
test_ecs = os.path.join(topdir, 'easyconfigs', 'test_ecs')
toy_ec = os.path.join(test_ecs, 't', 'toy', 'toy-0.0.eb')
args = [
'--new-branch-github',
'--github-user=%s' % GITHUB_TEST_ACCOUNT,
toy_ec,
'-D',
]
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False)
remote = 'git@github.com:%s/easybuild-easyconfigs.git' % GITHUB_TEST_ACCOUNT
regexs = [
r"^== fetching branch 'develop' from https://github.com/easybuilders/easybuild-easyconfigs.git\.\.\.",
r"^== copying files to .*/easybuild-easyconfigs\.\.\.",
r"^== pushing branch '[0-9]{14}_new_pr_toy00' to remote '.*' \(%s\) \[DRY RUN\]" % remote,
]
self._assert_regexs(regexs, txt)
# test easyblocks
test_ebs = os.path.join(topdir, 'sandbox', 'easybuild', 'easyblocks')
toy_eb = os.path.join(test_ebs, 't', 'toy.py')
args = [
'--new-branch-github',
'--github-user=%s' % GITHUB_TEST_ACCOUNT,
toy_eb,
'--pr-title="add easyblock for toy"',
'-D',
]
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False)
remote = 'git@github.com:%s/easybuild-easyblocks.git' % GITHUB_TEST_ACCOUNT
regexs = [
r"^== fetching branch 'develop' from https://github.com/easybuilders/easybuild-easyblocks.git\.\.\.",
r"^== copying files to .*/easybuild-easyblocks\.\.\.",
r"^== pushing branch '[0-9]{14}_new_pr_toy' to remote '.*' \(%s\) \[DRY RUN\]" % remote,
]
self._assert_regexs(regexs, txt)
# test framework with tweaked copy of test_module_naming_scheme.py
test_mns_py = os.path.join(topdir, 'sandbox', 'easybuild', 'tools', 'module_naming_scheme',
'test_module_naming_scheme.py')
target_dir = os.path.join(self.test_prefix, 'easybuild-framework', 'test', 'framework', 'sandbox',
'easybuild', 'tools', 'module_naming_scheme')
mkdir(target_dir, parents=True)
copy_file(test_mns_py, target_dir)
test_mns_py = os.path.join(target_dir, os.path.basename(test_mns_py))
write_file(test_mns_py, '\n\n', append=True)
args = [
'--new-branch-github',
'--github-user=%s' % GITHUB_TEST_ACCOUNT,
test_mns_py,
'--pr-commit-msg="a test"',
'-D',
]
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False)
remote = 'git@github.com:%s/easybuild-framework.git' % GITHUB_TEST_ACCOUNT
regexs = [
r"^== fetching branch 'develop' from https://github.com/easybuilders/easybuild-framework.git\.\.\.",
r"^== copying files to .*/easybuild-framework\.\.\.",
r"^== pushing branch '[0-9]{14}_new_pr_[A-Za-z]{10}' to remote '.*' \(%s\) \[DRY RUN\]" % remote,
]
self._assert_regexs(regexs, txt)
def test_github_new_pr_from_branch(self):
"""Test --new-pr-from-branch."""
if self.github_token is None:
print("Skipping test_new_pr_from_branch, no GitHub token available?")
return
# see https://github.com/boegel/easybuild-easyconfigs/tree/test_new_pr_from_branch_DO_NOT_REMOVE
# branch created specifically for this test,
# only adds toy-0.0.eb test easyconfig compared to central develop branch
test_branch = 'test_new_pr_from_branch_DO_NOT_REMOVE'
args = [
'--new-pr-from-branch=%s' % test_branch,
'--github-user=%s' % GITHUB_TEST_ACCOUNT, # used to get GitHub token
'--github-org=boegel', # used to determine account to grab branch from
'--pr-descr="an easyconfig for toy"',
'-D',
]
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False)
regexs = [
r"^== fetching branch '%s' from https://github.com/boegel/easybuild-easyconfigs.git\.\.\." % test_branch,
r"^== syncing 'test_new_pr_from_branch_DO_NOT_REMOVE' with current 'easybuilders/develop' branch\.\.\.",
r"^== pulling latest version of 'develop' branch from easybuilders/easybuild-easyconfigs\.\.\.",
r"^== merging 'develop' branch into PR branch 'test_new_pr_from_branch_DO_NOT_REMOVE'\.\.\.",
r"^== checking out target branch 'easybuilders/develop'\.\.\.",
r"^== determining metadata for pull request based on changed files\.\.\.",
r"^== found 1 changed file\(s\) in 'boegel/test_new_pr_from_branch_DO_NOT_REMOVE' " +
"relative to 'easybuilders/develop':$",
r"^\* 1 new/changed easyconfig file\(s\):\n easybuild/easyconfigs/t/toy/toy-0\.0\.eb",
r"^== checking out PR branch 'boegel/test_new_pr_from_branch_DO_NOT_REMOVE'\.\.\.$",
r"\* target: easybuilders/easybuild-easyconfigs:develop$",
r"^\* from: boegel/easybuild-easyconfigs:test_new_pr_from_branch_DO_NOT_REMOVE$",
r'^\* title: "\{tools\}\[system/system\] toy v0\.0"$',
r'^"an easyconfig for toy"$',
r"^ 1 file changed, 32 insertions\(\+\)$",
r"^\* overview of changes:\n easybuild/easyconfigs/t/toy/toy-0\.0\.eb | 32",
]
self._assert_regexs(regexs, txt)
def test_update_branch_github(self):
"""Test --update-branch-github."""
if self.github_token is None:
print("Skipping test_update_branch_github, no GitHub token available?")
return
topdir = os.path.dirname(os.path.abspath(__file__))
test_ecs = os.path.join(topdir, 'easyconfigs', 'test_ecs')
toy_ec = os.path.join(test_ecs, 't', 'toy', 'toy-0.0.eb')
args = [
'--update-branch-github=develop',
'--github-user=boegel', # used to determine account to grab branch from (no GitHub token needed)
toy_ec,
'--pr-commit-msg="this is just a test"',
'-D',
]
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False)
full_repo = 'boegel/easybuild-easyconfigs'
regexs = [
r"^== fetching branch 'develop' from https://github.com/%s.git\.\.\." % full_repo,
r"^== copying files to .*/git-working-dir.*/easybuild-easyconfigs...",
r"^== pushing branch 'develop' to remote '.*' \(git@github.com:%s.git\) \[DRY RUN\]" % full_repo,
r"^Overview of changes:\n.*/easyconfigs/t/toy/toy-0.0.eb \| 32",
r"== pushed updated branch 'develop' to boegel/easybuild-easyconfigs \[DRY RUN\]",
]
self._assert_regexs(regexs, txt)
def test_github_new_update_pr(self):
"""Test use of --new-pr (dry run only)."""
if self.github_token is None:
print("Skipping test_new_update_pr, no GitHub token available?")
return
# copy toy test easyconfig
topdir = os.path.dirname(os.path.abspath(__file__))
test_ecs = os.path.join(topdir, 'easyconfigs', 'test_ecs')
toy_ec = os.path.join(self.test_prefix, 'toy.eb')
toy_patch_fn = 'toy-0.0_fix-silly-typo-in-printf-statement.patch'
toy_patch = os.path.join(topdir, 'sandbox', 'sources', 'toy', toy_patch_fn)
# purposely picked one with non-default toolchain/versionsuffix
copy_file(os.path.join(test_ecs, 't', 'toy', 'toy-0.0-gompi-2018a-test.eb'), toy_ec)
# modify file to mock archived easyconfig
toy_ec_txt = read_file(toy_ec)
toy_ec_txt = '\n'.join([
"# Built with EasyBuild version 3.1.2 on 2017-04-25_21-35-15",
toy_ec_txt,
"# Build statistics",
"buildstats = [{",
' "build_time": 8.34,',
' "os_type": "Linux",',
"}]",
])
write_file(toy_ec, toy_ec_txt)
args = [
'--new-pr',
'--github-user=%s' % GITHUB_TEST_ACCOUNT,
toy_ec,
'-D',
'--disable-cleanup-tmpdir',
]
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False)
# determine location of repo clone, can be used to test --git-working-dirs-path (and save time)
dirs = glob.glob(os.path.join(self.test_prefix, 'eb-*', '*', 'git-working-dir*'))
if len(dirs) == 1:
git_working_dir = dirs[0]
else:
self.assertTrue(False, "Failed to find temporary git working dir: %s" % dirs)
remote = 'git@github.com:%s/easybuild-easyconfigs.git' % GITHUB_TEST_ACCOUNT
regexs = [
r"^== fetching branch 'develop' from https://github.com/easybuilders/easybuild-easyconfigs.git...",
r"^== pushing branch '.*' to remote '.*' \(%s\)" % remote,
r"^Opening pull request \[DRY RUN\]",
r"^\* target: easybuilders/easybuild-easyconfigs:develop",
r"^\* from: %s/easybuild-easyconfigs:.*_new_pr_toy00" % GITHUB_TEST_ACCOUNT,
r"^\* title: \"\{tools\}\[gompi/2018a\] toy v0.0\"",
r"\(created using `eb --new-pr`\)", # description
r"^\* overview of changes:",
r".*/toy-0.0-gompi-2018a-test.eb\s*\|",
r"^\s*1 file(s?) changed",
]
self._assert_regexs(regexs, txt)
# add unstaged file to git working dir, to check on later
unstaged_file = os.path.join('easybuild-easyconfigs', 'easybuild', 'easyconfigs', 'test.eb')
write_file(os.path.join(git_working_dir, unstaged_file), 'test123')
# a custom commit message is required when doing more than just adding new easyconfigs (e.g., deleting a file)
args.extend([
'--git-working-dirs-path=%s' % git_working_dir,
':bzip2-1.0.6.eb',
])
error_msg = "A meaningful commit message must be specified via --pr-commit-msg"
self.mock_stdout(True)
self.assertErrorRegex(EasyBuildError, error_msg, self.eb_main, args, raise_error=True, testing=False)
self.mock_stdout(False)
# check whether unstaged file in git working dir was copied (it shouldn't)
res = glob.glob(os.path.join(self.test_prefix, 'eb-*', 'eb-*', 'git-working-dir*'))
res = [d for d in res if os.path.basename(d) != os.path.basename(git_working_dir)]
if len(res) == 1:
unstaged_file_full = os.path.join(res[0], unstaged_file)
self.assertFalse(os.path.exists(unstaged_file_full), "%s not found in %s" % (unstaged_file, res[0]))
else:
self.assertTrue(False, "Found copy of easybuild-easyconfigs working copy")
# add required commit message, try again
args.append('--pr-commit-msg=just a test')
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False)
regexs[-1] = r"^\s*2 files changed"
regexs.remove(r"^\* title: \"\{tools\}\[gompi/2018a\] toy v0.0\"")
regexs.append(r"^\* title: \"just a test\"")
regexs.append(r".*/bzip2-1.0.6.eb\s*\|")
regexs.append(r".*[0-9]+ deletions\(-\)")
self._assert_regexs(regexs, txt)
GITHUB_TEST_ORG = 'test-organization'
args.extend([
'--git-working-dirs-path=%s' % git_working_dir,
'--pr-branch-name=branch_name_for_new_pr_test',
'--pr-commit-msg="this is a commit message. really!"',
'--pr-descr="moar letters foar teh lettre box"',
'--pr-target-branch=main',
'--github-org=%s' % GITHUB_TEST_ORG,
'--pr-target-account=boegel', # we need to be able to 'clone' from here (via https)
'--pr-title=test-1-2-3',
])
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False)
regexs = [
r"^== fetching branch 'main' from https://github.com/boegel/easybuild-easyconfigs.git...",
r"^Opening pull request \[DRY RUN\]",
r"^\* target: boegel/easybuild-easyconfigs:main",
r"^\* from: %s/easybuild-easyconfigs:branch_name_for_new_pr_test" % GITHUB_TEST_ORG,
r"\(created using `eb --new-pr`\)", # description
r"moar letters foar teh lettre box", # also description (see --pr-descr)
r"^\* title: \"test-1-2-3\"",
r"^\* overview of changes:",
r".*/toy-0.0-gompi-2018a-test.eb\s*\|",
r".*/bzip2-1.0.6.eb\s*\|",
r"^\s*2 files changed",
r".*[0-9]+ deletions\(-\)",
]
self._assert_regexs(regexs, txt)
# should also work with a patch
args.append(toy_patch)
self.mock_stdout(True)
self.eb_main(args, do_build=True, raise_error=True, testing=False)
txt = self.get_stdout()
self.mock_stdout(False)
regexs[-2] = r"^\s*3 files changed"
regexs.append(r".*_fix-silly-typo-in-printf-statement.patch\s*\|")
for regex in regexs:
regex = re.compile(regex, re.M)
self.assertTrue(regex.search(txt), "Pattern '%s' found in: %s" % (regex.pattern, txt))
# modifying an existing easyconfig requires a custom PR title
gcc_ec = os.path.join(test_ecs, 'g', 'GCC', 'GCC-4.9.2.eb')
self.assertTrue(os.path.exists(gcc_ec))
args = [
'--new-pr',
'--github-user=%s' % GITHUB_TEST_ACCOUNT,
toy_ec,
gcc_ec,
'-D',
]
error_msg = "A meaningful commit message must be specified via --pr-commit-msg"
self.mock_stdout(True)
self.assertErrorRegex(EasyBuildError, error_msg, self.eb_main, args, raise_error=True)
self.mock_stdout(False)
# also specifying commit message is sufficient; PR title is inherited from commit message
args.append('--pr-commit-msg=this is just a test')
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False)
regex = re.compile(r'^\* title: "this is just a test"', re.M)
self.assertTrue(regex.search(txt), "Pattern '%s' is found in: %s" % (regex.pattern, txt))
args = [
# PR for EasyBuild v2.5.0 release
# we need a PR where the base branch is still available ('develop', in this case)
'--update-pr=2237',
'--github-user=%s' % GITHUB_TEST_ACCOUNT,
toy_ec,
'-D',
# only to speed things up
'--git-working-dirs-path=%s' % git_working_dir,
]
error_msg = "A meaningful commit message must be specified via --pr-commit-msg when using --update-pr"
self.mock_stdout(True)
self.assertErrorRegex(EasyBuildError, error_msg, self.eb_main, args, raise_error=True)
self.mock_stdout(False)
args.append('--pr-commit-msg="just a test"')
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False)
regexs = [
r"^== Determined branch name corresponding to easybuilders/easybuild-easyconfigs PR #2237: develop",
r"^== fetching branch 'develop' from https://github.com/easybuilders/easybuild-easyconfigs.git...",
r".*/toy-0.0-gompi-2018a-test.eb\s*\|",
r"^\s*1 file(s?) changed",
r"^== pushing branch 'develop' to remote '.*' \(git@github.com:easybuilders/easybuild-easyconfigs.git\)",
r"^== pushed updated branch 'develop' to easybuilders/easybuild-easyconfigs \[DRY RUN\]",
r"^== updated https://github.com/easybuilders/easybuild-easyconfigs/pull/2237 \[DRY RUN\]",
]
self._assert_regexs(regexs, txt)
# also check behaviour under --extended-dry-run/-x
args.remove('-D')
args.append('-x')
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False)
regexs.extend([
r"Full patch:",
r"^\+\+\+\s*.*toy-0.0-gompi-2018a-test.eb",
r"^\+name = 'toy'",
])
self._assert_regexs(regexs, txt)
# check whether comments/buildstats get filtered out
regexs = [
r"# Built with EasyBuild",
r"# Build statistics",
r"buildstats\s*=",
]
self._assert_regexs(regexs, txt, assert_true=False)
def test_github_sync_pr_with_develop(self):
"""Test use of --sync-pr-with-develop (dry run only)."""
if self.github_token is None:
print("Skipping test_sync_pr_with_develop, no GitHub token available?")
return
# use https://github.com/easybuilders/easybuild-easyconfigs/pull/9150,
# which is a PR from boegel:develop to easybuilders:develop
# (to sync 'develop' branch in boegel's fork with central develop branch);
# we need to test with a branch that is guaranteed to stay in place for the test to work,
# since it will actually be downloaded (only the final push to update the branch is skipped under --dry-run)
args = [
'--github-user=%s' % GITHUB_TEST_ACCOUNT,
'--sync-pr-with-develop=9150',
'--dry-run',
]
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False)
github_path = r"boegel/easybuild-easyconfigs\.git"
pattern = '\n'.join([
r"== Temporary log file in case of crash .*",
r"== Determined branch name corresponding to easybuilders/easybuild-easyconfigs PR #9150: develop",
r"== fetching branch 'develop' from https://github\.com/%s\.\.\." % github_path,
r"== pulling latest version of 'develop' branch from easybuilders/easybuild-easyconfigs\.\.\.",
r"== merging 'develop' branch into PR branch 'develop'\.\.\.",
r"== pushing branch 'develop' to remote '.*' \(git@github\.com:%s\) \[DRY RUN\]" % github_path,
])
regex = re.compile(pattern)
self.assertTrue(regex.match(txt), "Pattern '%s' doesn't match: %s" % (regex.pattern, txt))
def test_github_sync_branch_with_develop(self):
"""Test use of --sync-branch-with-develop (dry run only)."""
if self.github_token is None:
print("Skipping test_sync_pr_with_develop, no GitHub token available?")
return
# see https://github.com/boegel/easybuild-easyconfigs/tree/test_new_pr_from_branch_DO_NOT_REMOVE
test_branch = 'test_new_pr_from_branch_DO_NOT_REMOVE'
args = [
'--github-user=%s' % GITHUB_TEST_ACCOUNT,
'--github-org=boegel', # used to determine account to grab branch from
'--sync-branch-with-develop=%s' % test_branch,
'--dry-run',
]
stdout, stderr = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False)
self.assertFalse(stderr)
github_path = r"boegel/easybuild-easyconfigs\.git"
pattern = '\n'.join([
r"== Temporary log file in case of crash .*",
r"== fetching branch '%s' from https://github\.com/%s\.\.\." % (test_branch, github_path),
r"== pulling latest version of 'develop' branch from easybuilders/easybuild-easyconfigs\.\.\.",
r"== merging 'develop' branch into PR branch '%s'\.\.\." % test_branch,
r"== pushing branch '%s' to remote '.*' \(git@github\.com:%s\) \[DRY RUN\]" % (test_branch, github_path),
])
regex = re.compile(pattern)
self.assertTrue(regex.match(stdout), "Pattern '%s' doesn't match: %s" % (regex.pattern, stdout))
def test_github_new_pr_python(self):
"""Check generated PR title for --new-pr on easyconfig that includes Python dependency."""
if self.github_token is None:
print("Skipping test_new_pr_python, no GitHub token available?")
return
# copy toy test easyconfig
test_ecs = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'test_ecs')
toy_ec = os.path.join(self.test_prefix, 'toy.eb')
copy_file(os.path.join(test_ecs, 't', 'toy', 'toy-0.0.eb'), toy_ec)
# modify file to include Python dependency
toy_ec_txt = read_file(toy_ec)
write_file(toy_ec, toy_ec_txt + "\ndependencies = [('Python', '3.7.2')]")
args = [
'--new-pr',
'--github-user=%s' % GITHUB_TEST_ACCOUNT,
toy_ec,
'-D',
'--disable-cleanup-tmpdir',
]
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False)
regex = re.compile(r"^\* title: \"\{tools\}\[system/system\] toy v0.0 w/ Python 3.7.2\"$", re.M)
self.assertTrue(regex.search(txt), "Pattern '%s' found in: %s" % (regex.pattern, txt))
# if multiple easyconfigs depending on Python are included, Python version is only listed once
gzip_ec = os.path.join(self.test_prefix, 'test.eb')
copy_file(os.path.join(test_ecs, 'g', 'gzip', 'gzip-1.4.eb'), gzip_ec)
gzip_ec_txt = read_file(gzip_ec)
write_file(gzip_ec, gzip_ec_txt + "\ndependencies = [('Python', '3.7.2')]")
txt, _ = self._run_mock_eb(args + [gzip_ec], do_build=True, raise_error=True, testing=False)
regex = re.compile(r"^\* title: \"\{tools\}\[system/system\] toy v0.0, gzip v1.4 w/ Python 3.7.2\"$", re.M)
self.assertTrue(regex.search(txt), "Pattern '%s' found in: %s" % (regex.pattern, txt))
# also check with Python listed via multi_deps
write_file(toy_ec, toy_ec_txt + "\nmulti_deps = {'Python': ['3.7.2', '2.7.15']}")
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False)
regex = re.compile(r"^\* title: \"\{tools\}\[system/system\] toy v0.0 w/ Python 2.7.15 \+ 3.7.2\"$", re.M)
self.assertTrue(regex.search(txt), "Pattern '%s' found in: %s" % (regex.pattern, txt))
def test_github_new_pr_delete(self):
"""Test use of --new-pr to delete easyconfigs."""
if self.github_token is None:
print("Skipping test_new_pr_delete, no GitHub token available?")
return
args = [
'--new-pr',
'--github-user=%s' % GITHUB_TEST_ACCOUNT,
':bzip2-1.0.6.eb',
'-D',
'--disable-cleanup-tmpdir',
'--pr-title=delete bzip2-1.6.0',
'--pr-commit-msg="delete bzip2-1.6.0.eb"'
]
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False)
regexs = [
r"^== fetching branch 'develop' from https://github.com/easybuilders/easybuild-easyconfigs.git...",
r'title: "delete bzip2-1.6.0"',
r"1 file(s?) changed,( 0 insertions\(\+\),)? [0-9]+ deletions\(-\)",
]
self._assert_regexs(regexs, txt)
def test_github_new_pr_dependencies(self):
"""Test use of --new-pr with automatic dependency lookup."""
if self.github_token is None:
print("Skipping test_new_pr_dependencies, no GitHub token available?")
return
foo_eb = '\n'.join([
'easyblock = "ConfigureMake"',
'name = "foo"',
'version = "1.0"',
'homepage = "http://example.com"',
'description = "test easyconfig"',
'toolchain = SYSTEM',
'dependencies = [("bar", "2.0")]'
])
bar_eb = '\n'.join([
'easyblock = "ConfigureMake"',
'name = "bar"',
'version = "2.0"',
'homepage = "http://example.com"',
'description = "test easyconfig"',
'toolchain = SYSTEM',
])
write_file(os.path.join(self.test_prefix, 'foo-1.0.eb'), foo_eb)
write_file(os.path.join(self.test_prefix, 'bar-2.0.eb'), bar_eb)
args = [
'--new-pr',
'--github-user=%s' % GITHUB_TEST_ACCOUNT,
os.path.join(self.test_prefix, 'foo-1.0.eb'),
'-D',
'--disable-cleanup-tmpdir',
'-r%s' % self.test_prefix,
]
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False)
regexs = [
r"^\* overview of changes:",
r".*/foo-1\.0\.eb\s*\|",
r".*/bar-2\.0\.eb\s*\|",
r"^\s*2 files changed",
]
self._assert_regexs(regexs, txt)
def test_github_merge_pr(self):
"""
Test use of --merge-pr (dry run)"""
if self.github_token is None:
print("Skipping test_merge_pr, no GitHub token available?")
return
# start by making sure --merge-pr without dry-run errors out for a closed PR
args = [
'--merge-pr',
'11753', # closed PR
'--github-user=%s' % GITHUB_TEST_ACCOUNT,
]
error_msg = r"This PR is closed."
self.mock_stdout(True)
self.assertErrorRegex(EasyBuildError, error_msg, self.eb_main, args, raise_error=True)
self.mock_stdout(False)
# and also for an already merged PR
args = [
'--merge-pr',
'11769', # already merged PR
'--github-user=%s' % GITHUB_TEST_ACCOUNT,
]
error_msg = r"This PR is already merged."
self.mock_stdout(True)
self.assertErrorRegex(EasyBuildError, error_msg, self.eb_main, args, raise_error=True)
self.mock_stdout(False)
# merged PR for EasyBuild-3.3.0.eb, is missing approved review
args = [
'--merge-pr',
'4781', # PR for easyconfig for EasyBuild-3.3.0.eb
'-D',
'--github-user=%s' % GITHUB_TEST_ACCOUNT,
'--pr-target-branch=some_branch',
]
stdout, stderr = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False)
expected_stdout = '\n'.join([
"Checking eligibility of easybuilders/easybuild-easyconfigs PR #4781 for merging...",
"* test suite passes: OK",
"* last test report is successful: OK",
"* no pending change requests: OK",
"* milestone is set: OK (3.3.1)",
"* mergeable state is clean: PR is already merged",
])
expected_stderr = '\n'.join([
"* targets some_branch branch: FAILED; found 'develop' => not eligible for merging!",
"* approved review: MISSING => not eligible for merging!",
'',
"WARNING: Review indicates this PR should not be merged (use -f/--force to do so anyway)",
])
self.assertEqual(stderr.strip(), expected_stderr)
self.assertTrue(stdout.strip().endswith(expected_stdout), "'%s' ends with '%s'" % (stdout, expected_stdout))
# full eligible merged PR, default target branch
del args[-1]
args[1] = '4832'
stdout, stderr = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False)
expected_stdout = '\n'.join([
"Checking eligibility of easybuilders/easybuild-easyconfigs PR #4832 for merging...",
"* targets develop branch: OK",
"* test suite passes: OK",
"* last test report is successful: OK",
"* no pending change requests: OK",
"* approved review: OK (by wpoely86)",
"* milestone is set: OK (3.3.1)",
"* mergeable state is clean: PR is already merged",
'',
"Review OK, merging pull request!",
'',
"[DRY RUN] Adding comment to easybuild-easyconfigs issue #4832: 'Going in, thanks @boegel!'",
"[DRY RUN] Merged easybuilders/easybuild-easyconfigs pull request #4832",
])
expected_stderr = ''
self.assertEqual(stderr.strip(), expected_stderr)
self.assertTrue(stdout.strip().endswith(expected_stdout), "'%s' ends with '%s'" % (stdout, expected_stdout))
# --merge-pr also works on easyblocks (& framework) PRs
args = [
'--merge-pr',
'1206',
'--pr-target-repo=easybuild-easyblocks',
'-D',
'--github-user=%s' % GITHUB_TEST_ACCOUNT,
]
stdout, stderr = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False)
self.assertEqual(stderr.strip(), '')
expected_stdout = '\n'.join([
"Checking eligibility of easybuilders/easybuild-easyblocks PR #1206 for merging...",
"* targets develop branch: OK",
"* test suite passes: OK",
"* no pending change requests: OK",
"* approved review: OK (by migueldiascosta)",
"* milestone is set: OK (3.3.1)",
"* mergeable state is clean: PR is already merged",
'',
"Review OK, merging pull request!",
])
self.assertTrue(expected_stdout in stdout)
def test_github_empty_pr(self):
"""Test use of --new-pr (dry run only) with no changes"""
if self.github_token is None:
print("Skipping test_empty_pr, no GitHub token available?")
return
# get file from develop branch
full_url = URL_SEPARATOR.join([GITHUB_RAW, GITHUB_EB_MAIN, GITHUB_EASYCONFIGS_REPO,
'develop/easybuild/easyconfigs/z/zlib/zlib-1.2.11-GCCcore-6.4.0.eb'])
ec_fn = os.path.basename(full_url)
ec = download_file(ec_fn, full_url, path=os.path.join(self.test_prefix, ec_fn))
# try to open new pr with unchanged file
args = [
'--new-pr',
ec,
'-D',
'--github-user=%s' % GITHUB_TEST_ACCOUNT,
'--pr-commit-msg=blabla',
]
self.mock_stdout(True)
error_msg = "No changed files found when comparing to current develop branch."
self.assertErrorRegex(EasyBuildError, error_msg, self.eb_main, args, do_build=True, raise_error=True)
self.mock_stdout(False)
def test_show_config(self):
""""Test --show-config and --show-full-config."""
# only retain $EASYBUILD_* environment variables we expect for this test
retained_eb_env_vars = [
'EASYBUILD_DEPRECATED',
'EASYBUILD_IGNORECONFIGFILES',
'EASYBUILD_INSTALLPATH',
'EASYBUILD_ROBOT_PATHS',
'EASYBUILD_SOURCEPATH',
]
for key in os.environ.keys():
if key.startswith('EASYBUILD_') and key not in retained_eb_env_vars:
del os.environ[key]
cfgfile = os.path.join(self.test_prefix, 'test.cfg')
cfgtxt = '\n'.join([
"[config]",
"subdir-modules = mods",
])
write_file(cfgfile, cfgtxt)
args = ['--configfiles=%s' % cfgfile, '--show-config', '--buildpath=/weird/build/dir']
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False, strip=True)
default_prefix = os.path.join(os.environ['HOME'], '.local', 'easybuild')
test_dir = os.path.dirname(os.path.abspath(__file__))
expected_lines = [
r"#",
r"# Current EasyBuild configuration",
r"# \(C: command line argument, D: default value, E: environment variable, F: configuration file\)",
r"#",
r"buildpath\s* \(C\) = /weird/build/dir",
r"configfiles\s* \(C\) = .*" + cfgfile,
r"containerpath\s* \(D\) = %s" % os.path.join(default_prefix, 'containers'),
r"deprecated\s* \(E\) = 10000000",
r"ignoreconfigfiles\s* \(E\) = %s" % ', '.join(os.environ['EASYBUILD_IGNORECONFIGFILES'].split(',')),
r"installpath\s* \(E\) = " + os.path.join(self.test_prefix, 'tmp.*'),
r"repositorypath\s* \(D\) = " + os.path.join(default_prefix, 'ebfiles_repo'),
r"robot-paths\s* \(E\) = " + os.path.join(test_dir, 'easyconfigs', 'test_ecs'),
r"sourcepath\s* \(E\) = " + os.path.join(test_dir, 'sandbox', 'sources'),
r"subdir-modules\s* \(F\) = mods",
]
regex = re.compile('\n'.join(expected_lines))
self.assertTrue(regex.match(txt), "Pattern '%s' found in: %s" % (regex.pattern, txt))
args = ['--configfiles=%s' % cfgfile, '--show-full-config', '--buildpath=/weird/build/dir']
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False)
# output of --show-full-config includes additional lines for options with default values
expected_lines.extend([
r"force\s* \(D\) = False",
r"modules-tool\s* \(D\) = Lmod",
r"module-syntax\s* \(D\) = Lua",
r"umask\s* \(D\) = None",
])
for expected_line in expected_lines:
self.assertTrue(re.search(expected_line, txt, re.M), "Found '%s' in: %s" % (expected_line, txt))
# --show-config should also work if no configuration files are available
# (existing config files are ignored via $EASYBUILD_IGNORECONFIGFILES)
self.assertFalse(os.environ.get('EASYBUILD_CONFIGFILES', False))
args = ['--show-config', '--buildpath=/weird/build/dir']
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False, strip=True)
self.assertTrue(re.search(r"buildpath\s* \(C\) = /weird/build/dir", txt))
# --show-config should not break including of easyblocks via $EASYBUILD_INCLUDE_EASYBLOCKS (see bug #1696)
txt = '\n'.join([
'from easybuild.framework.easyblock import EasyBlock',
'class EB_testeasyblocktoinclude(EasyBlock):',
' pass',
''
])
testeasyblocktoinclude = os.path.join(self.test_prefix, 'testeasyblocktoinclude.py')
write_file(testeasyblocktoinclude, txt)
os.environ['EASYBUILD_INCLUDE_EASYBLOCKS'] = testeasyblocktoinclude
args = ['--show-config']
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False, strip=True)
regex = re.compile(r'^include-easyblocks \(E\) = .*/testeasyblocktoinclude.py$', re.M)
self.assertTrue(regex.search(txt), "Pattern '%s' found in: %s" % (regex.pattern, txt))
def test_show_config_cfg_levels(self):
"""Test --show-config in relation to how configuring across multiple configuration levels interacts with it."""
# make sure default module syntax is used
if 'EASYBUILD_MODULE_SYNTAX' in os.environ:
del os.environ['EASYBUILD_MODULE_SYNTAX']
# configuring --modules-tool and --module-syntax on different levels should NOT cause problems
# cfr. bug report https://github.com/easybuilders/easybuild-framework/issues/2564
os.environ['EASYBUILD_MODULES_TOOL'] = 'EnvironmentModulesC'
args = [
'--module-syntax=Tcl',
'--show-config',
]
# set init_config to False to avoid that eb_main (called by _run_mock_eb) re-initialises configuration
# this fails because $EASYBUILD_MODULES_TOOL=EnvironmentModulesC conflicts with default module syntax (Lua)
stdout, _ = self._run_mock_eb(args, raise_error=True, redo_init_config=False)
patterns = [
r"^# Current EasyBuild configuration",
r"^module-syntax\s*\(C\) = Tcl",
r"^modules-tool\s*\(E\) = EnvironmentModulesC",
]
for pattern in patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(stdout), "Pattern '%s' found in: %s" % (regex.pattern, stdout))
def test_modules_tool_vs_syntax_check(self):
"""Verify that check for modules tool vs syntax works."""
# make sure default module syntax is used
if 'EASYBUILD_MODULE_SYNTAX' in os.environ:
del os.environ['EASYBUILD_MODULE_SYNTAX']
# using EnvironmentModulesC modules tool with default module syntax (Lua) is a problem
os.environ['EASYBUILD_MODULES_TOOL'] = 'EnvironmentModulesC'
args = ['--show-full-config']
error_pattern = "Generating Lua module files requires Lmod as modules tool"
self.assertErrorRegex(EasyBuildError, error_pattern, self._run_mock_eb, args, raise_error=True)
patterns = [
r"^# Current EasyBuild configuration",
r"^module-syntax\s*\(C\) = Tcl",
r"^modules-tool\s*\(E\) = EnvironmentModulesC",
]
# EnvironmentModulesC modules tool + Tcl module syntax is fine
args.append('--module-syntax=Tcl')
stdout, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False, redo_init_config=False)
for pattern in patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(stdout), "Pattern '%s' found in: %s" % (regex.pattern, stdout))
# default modules tool (Lmod) with Tcl module syntax is also fine
del os.environ['EASYBUILD_MODULES_TOOL']
patterns[-1] = r"^modules-tool\s*\(D\) = Lmod"
stdout, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False, redo_init_config=False)
for pattern in patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(stdout), "Pattern '%s' found in: %s" % (regex.pattern, stdout))
def test_prefix_option(self):
"""Test which configuration settings are affected by --prefix."""
txt, _ = self._run_mock_eb(['--show-full-config', '--prefix=%s' % self.test_prefix], raise_error=True)
regex = re.compile(r"(?P<cfg_opt>\S*).*%s.*" % self.test_prefix, re.M)
expected = ['buildpath', 'containerpath', 'installpath', 'packagepath', 'prefix', 'repositorypath']
self.assertEqual(sorted(regex.findall(txt)), expected)
def test_dump_env_config(self):
"""Test for --dump-env-config."""
fftw = 'FFTW-3.3.7-gompic-2018a'
gcc = 'GCC-4.9.2'
openmpi = 'OpenMPI-2.1.2-GCC-4.6.4'
args = ['%s.eb' % ec for ec in [fftw, gcc, openmpi]] + ['--dump-env-script']
os.chdir(self.test_prefix)
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False, strip=True)
for name in [fftw, gcc, openmpi]:
# check stdout
regex = re.compile("^Script to set up build environment for %s.eb dumped to %s.env" % (name, name), re.M)
self.assertTrue(regex.search(txt), "Pattern '%s' found in: %s" % (regex.pattern, txt))
# check whether scripts were dumped
env_script = os.path.join(self.test_prefix, '%s.env' % name)
self.assertTrue(os.path.exists(env_script))
# existing .env files are not overwritten, unless forced
os.chdir(self.test_prefix)
args = ['%s.eb' % openmpi, '--dump-env-script']
error_msg = r"Script\(s\) already exists, not overwriting them \(unless --force is used\): %s.env" % openmpi
self.assertErrorRegex(EasyBuildError, error_msg, self.eb_main, args, do_build=True, raise_error=True)
os.chdir(self.test_prefix)
args.append('--force')
self._run_mock_eb(args, do_build=True, raise_error=True)
# check contents of script
env_script = os.path.join(self.test_prefix, '%s.env' % openmpi)
txt = read_file(env_script)
patterns = [
"module load GCC/4.6.4", # loading of toolchain module
"module load hwloc/1.11.8-GCC-4.6.4", # loading of dependency module
# defining build env
"export FC='gfortran'",
"export CFLAGS='-O2 -ftree-vectorize -march=native -fno-math-errno'",
]
for pattern in patterns:
regex = re.compile("^%s$" % pattern, re.M)
self.assertTrue(regex.search(txt), "Pattern '%s' found in: %s" % (regex.pattern, txt))
out, ec = run_cmd("function module { echo $@; } && source %s && echo FC: $FC" % env_script, simple=False)
expected_out = '\n'.join([
"load GCC/4.6.4",
"load hwloc/1.11.8-GCC-4.6.4",
"FC: gfortran",
])
self.assertEqual(out.strip(), expected_out)
def test_stop(self):
"""Test use of --stop."""
args = ['toy-0.0.eb', '--force', '--stop=configure']
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False, strip=True)
regex = re.compile(r"COMPLETED: Installation STOPPED successfully \(took .* secs?\)", re.M)
self.assertTrue(regex.search(txt), "Pattern '%s' found in: %s" % (regex.pattern, txt))
def test_fetch(self):
options = EasyBuildOptions(go_args=['--fetch'])
self.assertTrue(options.options.fetch)
self.assertEqual(options.options.stop, 'fetch')
self.assertEqual(options.options.modules_tool, None)
self.assertTrue(options.options.ignore_locks)
self.assertTrue(options.options.ignore_osdeps)
# in this test we want to fake the case were no modules tool are in the system so tweak it
self.modtool = None
# create lock dir to see whether --fetch trips over it (it shouldn't)
lock_fn = os.path.join(self.test_installpath, 'software', 'toy', '0.0').replace('/', '_') + '.lock'
lock_path = os.path.join(self.test_installpath, 'software', '.locks', lock_fn)
mkdir(lock_path, parents=True)
args = ['toy-0.0.eb', '--fetch']
stdout, stderr = self._run_mock_eb(args, raise_error=True, strip=True, testing=False)
patterns = [
r"^== fetching files\.\.\.$",
r"^== COMPLETED: Installation STOPPED successfully \(took .* secs?\)$",
]
for pattern in patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(stdout), "Pattern '%s' not found in: %s" % (regex.pattern, stdout))
regex = re.compile(r"^== creating build dir, resetting environment\.\.\.$")
self.assertFalse(regex.search(stdout), "Pattern '%s' found in: %s" % (regex.pattern, stdout))
def test_parse_external_modules_metadata(self):
"""Test parse_external_modules_metadata function."""
# by default, provided external module metadata cfg files are picked up
metadata = parse_external_modules_metadata(None)
# just a selection
for mod in ['cray-libsci/13.2.0', 'cray-netcdf/4.3.2', 'fftw/3.3.4.3']:
self.assertTrue(mod in metadata)
netcdf = {
'name': ['netCDF', 'netCDF-Fortran'],
'version': ['4.3.2', '4.3.2'],
'prefix': 'NETCDF_DIR',
}
self.assertEqual(metadata['cray-netcdf/4.3.2'], netcdf)
libsci = {
'name': ['LibSci'],
'version': ['13.2.0'],
'prefix': 'CRAY_LIBSCI_PREFIX_DIR',
}
self.assertEqual(metadata['cray-libsci/13.2.0'], libsci)
testcfgtxt = EXTERNAL_MODULES_METADATA
testcfg = os.path.join(self.test_prefix, 'test_external_modules_metadata.cfg')
write_file(testcfg, testcfgtxt)
metadata = parse_external_modules_metadata([testcfg])
# default metadata is overruled, and not available anymore
for mod in ['cray-libsci/13.2.0', 'cray-netcdf/4.3.2', 'fftw/3.3.4.3']:
self.assertFalse(mod in metadata)
foobar1 = {
'name': ['foo', 'bar'],
'version': ['1.2.3', '3.2.1'],
'prefix': 'FOOBAR_DIR',
}
self.assertEqual(metadata['foobar/1.2.3'], foobar1)
foobar2 = {
'name': ['foobar'],
'version': ['2.0'],
'prefix': 'FOOBAR_PREFIX',
}
self.assertEqual(metadata['foobar/2.0'], foobar2)
# impartial metadata is fine
self.assertEqual(metadata['foo'], {'name': ['Foo'], 'prefix': '/foo'})
self.assertEqual(metadata['bar/1.2.3'], {'name': ['bar'], 'version': ['1.2.3']})
# if both names and versions are specified, lists must have same lengths
write_file(testcfg, '\n'.join(['[foo/1.2.3]', 'name = foo,bar', 'version = 1.2.3']))
err_msg = "Different length for lists of names/versions in metadata for external module"
self.assertErrorRegex(EasyBuildError, err_msg, parse_external_modules_metadata, [testcfg])
# if path to non-existing file is used, an error is reported
doesnotexist = os.path.join(self.test_prefix, 'doesnotexist')
error_pattern = "Specified path for file with external modules metadata does not exist"
self.assertErrorRegex(EasyBuildError, error_pattern, parse_external_modules_metadata, [doesnotexist])
# glob pattern can be used to specify file locations to parse_external_modules_metadata
cfg1 = os.path.join(self.test_prefix, 'cfg_one.ini')
write_file(cfg1, '\n'.join(['[one/1.0]', 'name = one', 'version = 1.0']))
cfg2 = os.path.join(self.test_prefix, 'cfg_two.ini')
write_file(cfg2, '\n'.join([
'[two/2.0]', 'name = two', 'version = 2.0',
'[two/2.1]', 'name = two', 'version = 2.1',
]))
cfg3 = os.path.join(self.test_prefix, 'cfg3.ini')
write_file(cfg3, '\n'.join(['[three/3.0]', 'name = three', 'version = 3.0']))
cfg4 = os.path.join(self.test_prefix, 'cfg_more.ini')
write_file(cfg4, '\n'.join(['[one/1.2.3]', 'name = one', 'version = 1.2.3', 'prefix = /one/1.2.3/']))
metadata = parse_external_modules_metadata([os.path.join(self.test_prefix, 'cfg*.ini')])
self.assertEqual(sorted(metadata.keys()), ['one/1.0', 'one/1.2.3', 'three/3.0', 'two/2.0', 'two/2.1'])
self.assertEqual(metadata['one/1.0'], {'name': ['one'], 'version': ['1.0']})
self.assertEqual(metadata['one/1.2.3'], {'name': ['one'], 'version': ['1.2.3'], 'prefix': '/one/1.2.3/'})
self.assertEqual(metadata['two/2.0'], {'name': ['two'], 'version': ['2.0']})
self.assertEqual(metadata['two/2.1'], {'name': ['two'], 'version': ['2.1']})
self.assertEqual(metadata['three/3.0'], {'name': ['three'], 'version': ['3.0']})
# check whether entries with unknown keys result in an error
cfg1 = os.path.join(self.test_prefix, 'broken_cfg1.cfg')
write_file(cfg1, "[one/1.0]\nname = one\nversion = 1.0\nfoo = bar")
cfg2 = os.path.join(self.test_prefix, 'cfg2.cfg')
write_file(cfg2, "[two/2.0]\nname = two\nversion = 2.0")
cfg3 = os.path.join(self.test_prefix, 'broken_cfg3.cfg')
write_file(cfg3, "[three/3.0]\nnaem = three\nzzz=zzz\nvresion = 3.0\naaa = aaa")
cfg4 = os.path.join(self.test_prefix, 'broken_cfg4.cfg')
write_file(cfg4, "[four/4]\nprfeix = /software/four/4")
broken_cfgs = [cfg1, cfg2, cfg3, cfg4]
error_pattern = '\n'.join([
r"Found metadata entries with unknown keys:",
r"\* four/4: prfeix",
r"\* one/1.0: foo",
r"\* three/3.0: aaa, naem, vresion, zzz",
])
self.assertErrorRegex(EasyBuildError, error_pattern, parse_external_modules_metadata, broken_cfgs)
def test_zip_logs(self):
"""Test use of --zip-logs"""
toy_eb_install_dir = os.path.join(self.test_installpath, 'software', 'toy', '0.0', 'easybuild')
for zip_logs in ['', '--zip-logs', '--zip-logs=gzip', '--zip-logs=bzip2']:
shutil.rmtree(self.test_installpath)
args = ['toy-0.0.eb', '--force', '--debug']
if zip_logs:
args.append(zip_logs)
self.eb_main(args, do_build=True)
logs = glob.glob(os.path.join(toy_eb_install_dir, 'easybuild-toy-0.0*log*'))
self.assertEqual(len(logs), 1, "Found exactly 1 log file in %s: %s" % (toy_eb_install_dir, logs))
zip_logs_arg = zip_logs.split('=')[-1]
if zip_logs == '--zip-logs' or zip_logs_arg == 'gzip':
ext = 'log.gz'
elif zip_logs_arg == 'bzip2':
ext = 'log.bz2'
else:
ext = 'log'
self.assertTrue(logs[0].endswith(ext), "%s has correct '%s' extension for %s" % (logs[0], ext, zip_logs))
def test_debug_lmod(self):
"""Test use of --debug-lmod."""
if isinstance(self.modtool, Lmod):
init_config(build_options={'debug_lmod': True})
out = self.modtool.run_module('avail', return_output=True)
for pattern in [r"^Lmod version", r"^lmod\(--terse -D avail\)\{", "Master:avail"]:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(out), "Pattern '%s' found in: %s" % (regex.pattern, out))
else:
print("Skipping test_debug_lmod, requires Lmod as modules tool")
def test_use_color(self):
"""Test use_color function."""
self.assertTrue(use_color('always'))
self.assertFalse(use_color('never'))
easybuild.tools.options.terminal_supports_colors = lambda _: True
self.assertTrue(use_color('auto'))
easybuild.tools.options.terminal_supports_colors = lambda _: False
self.assertFalse(use_color('auto'))
def test_list_prs(self):
"""Test --list-prs."""
args = ['--list-prs', 'foo']
error_msg = r"must be one of \['open', 'closed', 'all'\]"
self.assertErrorRegex(EasyBuildError, error_msg, self.eb_main, args, raise_error=True)
args = ['--list-prs', 'open,foo']
error_msg = r"must be one of \['created', 'updated', 'popularity', 'long-running'\]"
self.assertErrorRegex(EasyBuildError, error_msg, self.eb_main, args, raise_error=True)
args = ['--list-prs', 'open,created,foo']
error_msg = r"must be one of \['asc', 'desc'\]"
self.assertErrorRegex(EasyBuildError, error_msg, self.eb_main, args, raise_error=True)
args = ['--list-prs', 'open,created,asc,foo']
error_msg = r"must be in the format 'state\[,order\[,direction\]\]"
self.assertErrorRegex(EasyBuildError, error_msg, self.eb_main, args, raise_error=True)
args = ['--list-prs', 'closed,updated,asc']
txt, _ = self._run_mock_eb(args, testing=False)
expected = "Listing PRs with parameters: direction=asc, per_page=100, sort=updated, state=closed"
self.assertTrue(expected in txt)
def test_list_software(self):
"""Test --list-software and --list-installed-software."""
# copy selected test easyconfigs for testing --list-*software options with;
# full test is a nuisance, because all dependencies must be available and toolchains like intel must have
# all expected components when testing with HierarchicalMNS (which the test easyconfigs don't always have)
topdir = os.path.dirname(os.path.abspath(__file__))
cray_ec = os.path.join(topdir, 'easyconfigs', 'test_ecs', 'c', 'CrayCCE', 'CrayCCE-5.1.29.eb')
gcc_ec = os.path.join(topdir, 'easyconfigs', 'test_ecs', 'g', 'GCC', 'GCC-4.6.3.eb')
gzip_ec = os.path.join(topdir, 'easyconfigs', 'v1.0', 'g', 'gzip', 'gzip-1.4-GCC-4.6.3.eb')
gzip_system_ec = os.path.join(topdir, 'easyconfigs', 'v1.0', 'g', 'gzip', 'gzip-1.4.eb')
test_ecs = os.path.join(self.test_prefix, 'test_ecs')
for ec in [cray_ec, gcc_ec, gzip_ec, gzip_system_ec]:
subdirs = os.path.dirname(ec).split(os.path.sep)[-2:]
target_dir = os.path.join(test_ecs, *subdirs)
mkdir(target_dir, parents=True)
copy_file(ec, target_dir)
# add (fake) HPL easyconfig using CrayCCE toolchain
# (required to trigger bug reported in https://github.com/easybuilders/easybuild-framework/issues/3265)
hpl_cray_ec_txt = '\n'.join([
'easyblock = "ConfigureMake"',
'name = "HPL"',
'version = "2.3"',
"homepage = 'http://www.netlib.org/benchmark/hpl/'",
'description = "HPL"',
'toolchain = {"name": "CrayCCE", "version": "5.1.29"}',
])
hpl_cray_ec = os.path.join(self.test_prefix, 'test_ecs', 'h', 'HPL', 'HPL-2.3-CrayCCE-5.1.29.eb')
write_file(hpl_cray_ec, hpl_cray_ec_txt)
# put dummy Core/GCC/4.6.3 in place
modpath = os.path.join(self.test_prefix, 'modules')
write_file(os.path.join(modpath, 'Core', 'GCC', '4.6.3'), '#%Module')
self.modtool.use(modpath)
# test with different module naming scheme active
# (see https://github.com/easybuilders/easybuild-framework/issues/3265)
for mns in ['EasyBuildMNS', 'HierarchicalMNS']:
args = [
'--list-software',
'--robot-paths=%s' % test_ecs,
'--module-naming-scheme=%s' % mns,
]
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False, verbose=True)
patterns = [
r"^.*\s*== Processed 5/5 easyconfigs...",
r"^== Found 4 different software packages",
r"^\* CrayCCE",
r"^\* GCC",
r"^\* gzip",
r"^\* HPL",
]
for pattern in patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(txt), "Pattern '%s' found in: %s" % (regex.pattern, txt))
args = [
'--list-software=detailed',
'--output-format=rst',
'--robot-paths=%s' % test_ecs,
'--module-naming-scheme=%s' % mns,
]
txt, _ = self._run_mock_eb(args, testing=False, raise_error=True, verbose=True)
patterns = [
r"^.*\s*== Processed 5/5 easyconfigs...",
r"^== Found 4 different software packages",
r'^\*CrayCCE\*',
r'^``5.1.29``\s+``system``',
r'^\*GCC\*',
r'^``4.6.3``\s+``system``',
r'^\*gzip\*',
r'^``1.4`` ``GCC/4.6.3``, ``system``',
]
for pattern in patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(txt), "Pattern '%s' found in: %s" % (regex.pattern, txt))
args = [
'--list-installed-software',
'--output-format=rst',
'--robot-paths=%s' % test_ecs,
'--module-naming-scheme=%s' % mns,
]
txt, _ = self._run_mock_eb(args, testing=False, raise_error=True, verbose=True)
patterns = [
r"^.*\s*== Processed 5/5 easyconfigs...",
r"^== Found 4 different software packages",
r"^== Retained 1 installed software packages",
r'^\* GCC',
]
for pattern in patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(txt), "Pattern '%s' found in: %s" % (regex.pattern, txt))
self.assertFalse(re.search(r'gzip', txt, re.M))
self.assertFalse(re.search(r'CrayCCE', txt, re.M))
args = [
'--list-installed-software=detailed',
'--robot-paths=%s' % test_ecs,
'--module-naming-scheme=%s' % mns,
]
txt, _ = self._run_mock_eb(args, testing=False, raise_error=True, verbose=True)
patterns = [
r"^.*\s*== Processed 5/5 easyconfigs...",
r"^== Found 4 different software packages",
r"^== Retained 1 installed software packages",
r'^\* GCC',
r'^\s+\* GCC v4.6.3: system',
]
for pattern in patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(txt), "Pattern '%s' found in: %s" % (regex.pattern, txt))
self.assertFalse(re.search(r'gzip', txt, re.M))
self.assertFalse(re.search(r'CrayCCE', txt, re.M))
def test_parse_optarch(self):
"""Test correct parsing of optarch option."""
# Check that it is not parsed if we are submitting a job
options = EasyBuildOptions(go_args=['--job'])
optarch_string = 'Intel:something;GCC:somethinglese'
options.options.optarch = optarch_string
options.postprocess()
self.assertEqual(options.options.optarch, optarch_string)
# Use no arguments for the rest of the tests
options = EasyBuildOptions()
# Check for EasyBuildErrors
error_msg = "The optarch option has an incorrect syntax"
options.options.optarch = 'Intel:something;GCC'
self.assertErrorRegex(EasyBuildError, error_msg, options.postprocess)
options.options.optarch = 'Intel:something;'
self.assertErrorRegex(EasyBuildError, error_msg, options.postprocess)
options.options.optarch = 'Intel:something:somethingelse'
self.assertErrorRegex(EasyBuildError, error_msg, options.postprocess)
error_msg = "The optarch option contains duplicated entries for compiler"
options.options.optarch = 'Intel:something;GCC:somethingelse;Intel:anothersomething'
self.assertErrorRegex(EasyBuildError, error_msg, options.postprocess)
# Check the parsing itself
gcc_generic_flags = "march=x86-64 -mtune=generic"
test_cases = [
('', ''),
('xHost', 'xHost'),
('GENERIC', 'GENERIC'),
('Intel:xHost', {'Intel': 'xHost'}),
('Intel:GENERIC', {'Intel': 'GENERIC'}),
('Intel:xHost;GCC:%s' % gcc_generic_flags, {'Intel': 'xHost', 'GCC': gcc_generic_flags}),
('Intel:;GCC:%s' % gcc_generic_flags, {'Intel': '', 'GCC': gcc_generic_flags}),
]
for optarch_string, optarch_parsed in test_cases:
options.options.optarch = optarch_string
options.postprocess()
self.assertEqual(options.options.optarch, optarch_parsed)
def test_check_contrib_style(self):
"""Test style checks performed by --check-contrib + dedicated --check-style option."""
try:
import pycodestyle # noqa
except ImportError:
try:
import pep8 # noqa
except ImportError:
print("Skipping test_check_contrib_style, since pycodestyle or pep8 is not available")
return
regex = re.compile(r"Running style check on 2 easyconfig\(s\)(.|\n)*>> All style checks PASSed!", re.M)
args = [
'--check-style',
'GCC-4.9.2.eb',
'toy-0.0.eb',
]
stdout, _ = self._run_mock_eb(args, raise_error=True)
self.assertTrue(regex.search(stdout), "Pattern '%s' found in: %s" % (regex.pattern, stdout))
# --check-contrib fails because of missing checksums, but style test passes
args[0] = '--check-contrib'
self.mock_stdout(True)
error_pattern = "One or more contribution checks FAILED"
self.assertErrorRegex(EasyBuildError, error_pattern, self.eb_main, args, raise_error=True)
stdout = self.get_stdout().strip()
self.mock_stdout(False)
self.assertTrue(regex.search(stdout), "Pattern '%s' found in: %s" % (regex.pattern, stdout))
# copy toy-0.0.eb test easyconfig, fiddle with it to make style check fail
toy = os.path.join(self.test_prefix, 'toy.eb')
copy_file(os.path.join(os.path.dirname(__file__), 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb'), toy)
toytxt = read_file(toy)
# introduce whitespace issues
toytxt = toytxt.replace("name = 'toy'", "name\t='toy' ")
# introduce long line
toytxt = toytxt.replace('description = "Toy C program, 100% toy."', 'description = "%s"' % ('toy ' * 30))
write_file(toy, toytxt)
for check_type in ['contribution', 'style']:
args = [
'--check-%s' % check_type[:7],
toy,
]
self.mock_stdout(True)
error_pattern = "One or more %s checks FAILED!" % check_type
self.assertErrorRegex(EasyBuildError, error_pattern, self.eb_main, args, raise_error=True)
stdout = self.get_stdout()
self.mock_stdout(False)
patterns = [
"toy.eb:1:5: E223 tab before operator",
"toy.eb:1:7: E225 missing whitespace around operator",
"toy.eb:1:12: W299 trailing whitespace",
r"toy.eb:5:121: E501 line too long \(136 > 120 characters\)",
]
for pattern in patterns:
self.assertTrue(re.search(pattern, stdout, re.M), "Pattern '%s' found in: %s" % (pattern, stdout))
def test_check_contrib_non_style(self):
"""Test non-style checks performed by --check-contrib."""
if not ('pycodestyle' in sys.modules or 'pep8' in sys.modules):
print("Skipping test_check_contrib_non_style (no pycodestyle or pep8 available)")
return
args = [
'--check-contrib',
'toy-0.0.eb',
]
self.mock_stdout(True)
self.mock_stderr(True)
error_pattern = "One or more contribution checks FAILED"
self.assertErrorRegex(EasyBuildError, error_pattern, self.eb_main, args, raise_error=True)
stdout = self.get_stdout().strip()
stderr = self.get_stderr().strip()
self.mock_stdout(False)
self.mock_stderr(False)
self.assertEqual(stderr, '')
# SHA256 checksum checks fail
patterns = [
r"\[FAIL\] .*/toy-0.0.eb$",
r"^Checksums missing for one or more sources/patches in toy-0.0.eb: "
r"found 1 sources \+ 2 patches vs 1 checksums$",
r"^>> One or more SHA256 checksums checks FAILED!",
]
for pattern in patterns:
self.assertTrue(re.search(pattern, stdout, re.M), "Pattern '%s' found in: %s" % (pattern, stdout))
# --check-contrib passes if None values are used as checksum, but produces warning
toy = os.path.join(self.test_prefix, 'toy.eb')
copy_file(os.path.join(os.path.dirname(__file__), 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb'), toy)
toytxt = read_file(toy)
toytxt = toytxt + '\n'.join([
'checksums = [',
" None, # toy-0.0.tar.gz",
" # toy-0.0_fix-silly-typo-in-printf-statement.patch",
" '81a3accc894592152f81814fbf133d39afad52885ab52c25018722c7bda92487',",
" '4196b56771140d8e2468fb77f0240bc48ddbf5dabafe0713d612df7fafb1e458', # toy-extra.txt",
']\n',
])
write_file(toy, toytxt)
args = ['--check-contrib', toy]
self.mock_stdout(True)
self.mock_stderr(True)
self.eb_main(args, raise_error=True)
stderr = self.get_stderr().strip()
self.mock_stdout(False)
self.mock_stderr(False)
self.assertEqual(stderr, "WARNING: Found 1 None checksum value(s), please make sure this is intended!")
def test_allow_use_as_root(self):
"""Test --allow-use-as-root-and-accept-consequences"""
# pretend we're running as root by monkey patching os.getuid used in main
easybuild.main.os.getuid = lambda: 0
# running as root is disallowed by default
error_msg = "You seem to be running EasyBuild with root privileges which is not wise, so let's end this here"
self.assertErrorRegex(EasyBuildError, error_msg, self.eb_main, ['toy-0.0.eb'], raise_error=True)
# running as root is allowed under --allow-use-as-root, but does result in a warning being printed to stderr
args = ['toy-0.0.eb', '--allow-use-as-root-and-accept-consequences']
_, stderr = self._run_mock_eb(args, raise_error=True, strip=True)
expected = "WARNING: Using EasyBuild as root is NOT recommended, please proceed with care!\n"
expected += "(this is only allowed because EasyBuild was configured with "
expected += "--allow-use-as-root-and-accept-consequences)"
self.assertEqual(stderr, expected)
def test_verify_easyconfig_filenames(self):
"""Test --verify-easyconfig-filename"""
test_easyconfigs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs')
fd, dummylogfn = tempfile.mkstemp(prefix='easybuild-dummy', suffix='.log')
os.close(fd)
toy_ec = os.path.join(test_easyconfigs_dir, 'test_ecs', 't', 'toy', 'toy-0.0.eb')
test_ec = os.path.join(self.test_prefix, 'test.eb')
copy_file(toy_ec, test_ec)
args = [
test_ec,
'--dry-run', # implies enabling dependency resolution
'--unittest-file=%s' % self.logfile,
]
# filename of provided easyconfig doesn't matter by default
self.eb_main(args, logfile=dummylogfn, raise_error=True)
logtxt = read_file(self.logfile)
self.assertTrue('module: toy/0.0' in logtxt)
write_file(self.logfile, '')
# when --verify-easyconfig-filenames is enabled, EB gets picky about the easyconfig filename
args.append('--verify-easyconfig-filenames')
error_pattern = r"Easyconfig filename 'test.eb' does not match with expected filename 'toy-0.0.eb' \(specs: "
error_pattern += r"name: 'toy'; version: '0.0'; versionsuffix: ''; "
error_pattern += r"toolchain name, version: 'system', 'system'\)"
self.assertErrorRegex(EasyBuildError, error_pattern, self.eb_main, args, logfile=dummylogfn, raise_error=True)
write_file(self.logfile, '')
args[0] = toy_ec
self.eb_main(args, logfile=dummylogfn, raise_error=True)
logtxt = read_file(self.logfile)
self.assertTrue('module: toy/0.0' in logtxt)
def test_set_default_module(self):
"""Test use of --set-default-module"""
topdir = os.path.dirname(os.path.abspath(__file__))
toy_ec = os.path.join(topdir, 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0-deps.eb')
self.eb_main([toy_ec, '--set-default-module'], do_build=True, raise_error=True)
toy_mod_dir = os.path.join(self.test_installpath, 'modules', 'all', 'toy')
toy_mod = os.path.join(toy_mod_dir, '0.0-deps')
if get_module_syntax() == 'Lua':
toy_mod += '.lua'
self.assertTrue(os.path.exists(toy_mod))
if get_module_syntax() == 'Lua':
self.assertTrue(os.path.islink(os.path.join(toy_mod_dir, 'default')))
self.assertEqual(os.readlink(os.path.join(toy_mod_dir, 'default')), '0.0-deps.lua')
elif get_module_syntax() == 'Tcl':
toy_dot_version = os.path.join(toy_mod_dir, '.version')
self.assertTrue(os.path.exists(toy_dot_version))
toy_dot_version_txt = read_file(toy_dot_version)
self.assertTrue("set ModulesVersion 0.0-deps" in toy_dot_version_txt)
else:
self.assertTrue(False, "Uknown module syntax: %s" % get_module_syntax())
# make sure default is also set for moduleclass symlink
toy_mod_symlink_dir = os.path.join(self.test_installpath, 'modules', 'tools', 'toy')
if get_module_syntax() == 'Lua':
self.assertEqual(sorted(os.listdir(toy_mod_symlink_dir)), ['0.0-deps.lua', 'default'])
default_symlink = os.path.join(toy_mod_symlink_dir, 'default')
mod_symlink = os.path.join(toy_mod_symlink_dir, '0.0-deps.lua')
self.assertTrue(os.path.islink(default_symlink))
self.assertTrue(os.path.islink(mod_symlink))
self.assertEqual(os.readlink(default_symlink), '0.0-deps.lua')
modfile_path = os.path.join(toy_mod_dir, '0.0-deps.lua')
self.assertTrue(os.path.samefile(os.readlink(mod_symlink), modfile_path))
elif get_module_syntax() == 'Tcl':
self.assertEqual(sorted(os.listdir(toy_mod_symlink_dir)), ['.version', '0.0-deps'])
version_symlink = os.path.join(toy_mod_symlink_dir, '.version')
mod_symlink = os.path.join(toy_mod_symlink_dir, '0.0-deps')
self.assertTrue(os.path.islink(version_symlink))
self.assertTrue(os.path.islink(mod_symlink))
versionfile_path = os.path.join(toy_mod_dir, '.version')
self.assertEqual(os.readlink(version_symlink), versionfile_path)
modfile_path = os.path.join(toy_mod_dir, '0.0-deps')
self.assertTrue(os.path.samefile(os.readlink(mod_symlink), modfile_path))
else:
self.assertTrue(False, "Uknown module syntax: %s" % get_module_syntax())
def test_set_default_module_robot(self):
"""Test use of --set-default-module --robot."""
# create two test easyconfigs, one depending on the other
# (using dummy Toolchain easyblock included in the tests)
test_ec = os.path.join(self.test_prefix, 'test.eb')
write_file(test_ec, '\n'.join([
"easyblock = 'Toolchain'",
"name = 'test'",
"version = '1.0'",
"homepage = 'https://example.com'",
"description = 'this is just a test'",
"toolchain = SYSTEM",
"dependencies = [('thisisjustatestdep', '3.14')]",
]))
testdep_ec = os.path.join(self.test_prefix, 'thisisjustatestdep-3.14.eb')
write_file(testdep_ec, '\n'.join([
"easyblock = 'Toolchain'",
"name = 'thisisjustatestdep'",
"version = '3.14'",
"homepage = 'https://example.com'",
"description = 'this is just a test'",
"toolchain = SYSTEM",
]))
args = [
test_ec,
'--force',
'--set-default-module',
'--robot',
self.test_prefix,
]
self.eb_main(args, do_build=True, raise_error=True)
# default module is set for specified easyconfig, but *not* for its dependency
modfiles_dir = os.path.join(self.test_installpath, 'modules', 'all')
self.assertEqual(sorted(os.listdir(modfiles_dir)), ['test', 'thisisjustatestdep'])
test_mod_dir = os.path.join(modfiles_dir, 'test')
testdep_mod_dir = os.path.join(modfiles_dir, 'thisisjustatestdep')
if get_module_syntax() == 'Lua':
# only 'default' symlink for test/1.0, not for thisisjustadep/3.14
self.assertEqual(sorted(os.listdir(test_mod_dir)), ['1.0.lua', 'default'])
self.assertEqual(sorted(os.listdir(testdep_mod_dir)), ['3.14.lua'])
default_symlink = os.path.join(test_mod_dir, 'default')
self.assertTrue(os.path.islink(default_symlink))
self.assertEqual(os.readlink(default_symlink), '1.0.lua')
elif get_module_syntax() == 'Tcl':
self.assertEqual(sorted(os.listdir(test_mod_dir)), ['.version', '1.0'])
self.assertEqual(sorted(os.listdir(testdep_mod_dir)), ['3.14'])
dot_version_file = os.path.join(test_mod_dir, '.version')
self.assertTrue("set ModulesVersion 1.0" in read_file(dot_version_file))
else:
self.assertTrue(False, "Uknown module syntax: %s" % get_module_syntax())
def test_inject_checksums(self):
"""Test for --inject-checksums"""
topdir = os.path.dirname(os.path.abspath(__file__))
toy_ec = os.path.join(topdir, 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0-gompi-2018a-test.eb')
# checksums are injected in existing easyconfig, so test with a copy
test_ec = os.path.join(self.test_prefix, 'test.eb')
copy_file(toy_ec, test_ec)
# if existing checksums are found, --force is required
args = [test_ec, '--inject-checksums']
self.mock_stdout(True)
self.mock_stderr(True)
self.assertErrorRegex(EasyBuildError, "Found existing checksums", self.eb_main, args, raise_error=True)
stdout = self.get_stdout().strip()
stderr = self.get_stderr().strip()
self.mock_stdout(False)
self.mock_stderr(False)
# make sure software install directory is *not* created (see bug issue #3064)
self.assertFalse(os.path.exists(os.path.join(self.test_installpath, 'software', 'toy')))
# SHA256 is default type of checksums used
self.assertTrue("injecting sha256 checksums in" in stdout)
self.assertEqual(stderr, '')
args.append('--force')
stdout, stderr = self._run_mock_eb(args, raise_error=True, strip=True)
toy_source_sha256 = '44332000aa33b99ad1e00cbd1a7da769220d74647060a10e807b916d73ea27bc'
toy_patch_sha256 = '81a3accc894592152f81814fbf133d39afad52885ab52c25018722c7bda92487'
bar_tar_gz_sha256 = 'f3676716b610545a4e8035087f5be0a0248adee0abb3930d3edb76d498ae91e7'
bar_patch = 'bar-0.0_fix-silly-typo-in-printf-statement.patch'
bar_patch_sha256 = '84db53592e882b5af077976257f9c7537ed971cb2059003fd4faa05d02cae0ab'
bar_patch_bis = 'bar-0.0_fix-very-silly-typo-in-printf-statement.patch'
bar_patch_bis_sha256 = 'd0bf102f9c5878445178c5f49b7cd7546e704c33fe2060c7354b7e473cfeb52b'
patterns = [
r"^== injecting sha256 checksums in .*/test\.eb$",
r"^== fetching sources & patches for test\.eb\.\.\.$",
r"^== backup of easyconfig file saved to .*/test\.eb\.bak_[0-9]+_[0-9]+\.\.\.$",
r"^== injecting sha256 checksums for sources & patches in test\.eb\.\.\.$",
r"^== \* toy-0.0\.tar\.gz: %s$" % toy_source_sha256,
r"^== \* toy-0\.0_fix-silly-typo-in-printf-statement\.patch: %s$" % toy_patch_sha256,
r"^== injecting sha256 checksums for extensions in test\.eb\.\.\.$",
r"^== \* bar-0\.0\.tar\.gz: %s$" % bar_tar_gz_sha256,
r"^== \* %s: %s$" % (bar_patch, bar_patch_sha256),
r"^== \* %s: %s$" % (bar_patch_bis, bar_patch_bis_sha256),
r"^== \* barbar-0\.0\.tar\.gz: d5bd9908cdefbe2d29c6f8d5b45b2aaed9fd904b5e6397418bb5094fbdb3d838$",
]
for pattern in patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(stdout), "Pattern '%s' found in: %s" % (regex.pattern, stdout))
warning_msg = "WARNING: Found existing checksums in test.eb, overwriting them (due to use of --force)..."
self.assertEqual(stderr, warning_msg)
ec_txt = read_file(test_ec)
# some checks on 'raw' easyconfig contents
# single-line checksum for barbar extension since there's only one
self.assertTrue("'checksums': ['d5bd9908cdefbe2d29c6f8d5b45b2aaed9fd904b5e6397418bb5094fbdb3d838']," in ec_txt)
# single-line checksum entry for bar source tarball
regex = re.compile("^[ ]*'%s', # bar-0.0.tar.gz$" % bar_tar_gz_sha256, re.M)
self.assertTrue(regex.search(ec_txt), "Pattern '%s' found in: %s" % (regex.pattern, ec_txt))
# no single-line checksum entry for bar patches, since line would be > 120 chars
bar_patch_patterns = [
r"^[ ]*# %s\n[ ]*'%s',$" % (bar_patch, bar_patch_sha256),
r"^[ ]*# %s\n[ ]*'%s',$" % (bar_patch_bis, bar_patch_bis_sha256),
]
for pattern in bar_patch_patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(ec_txt), "Pattern '%s' found in: %s" % (regex.pattern, ec_txt))
# no single-line entry for bar patches themselves, since line would be too long
bar_patch_patterns = [
r"^[ ]*'%s',$" % bar_patch,
r"^[ ]*'%s',$" % bar_patch_bis,
]
for pattern in bar_patch_patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(ec_txt), "Pattern '%s' found in: %s" % (regex.pattern, ec_txt))
# name/version of toy should NOT be hardcoded in exts_list, 'name'/'version' parameters should be used
self.assertTrue(' (name, version, {' in ec_txt)
# make sure checksums are only there once...
# exactly one definition of 'checksums' easyconfig parameter
self.assertEqual(re.findall('^checksums', ec_txt, re.M), ['checksums'])
# exactly three checksum specs for extensions, one list of checksums for each extension
self.assertEqual(re.findall("[ ]*'checksums'", ec_txt, re.M), [" 'checksums'"] * 3)
# there should be only one hit for 'source_urls', i.e. the one in exts_default_options
self.assertEqual(len(re.findall('source_urls*.*$', ec_txt, re.M)), 1)
# no parse errors for updated easyconfig file...
ec = EasyConfigParser(test_ec).get_config_dict()
self.assertEqual(ec['sources'], ['%(name)s-%(version)s.tar.gz'])
self.assertEqual(ec['patches'], ['toy-0.0_fix-silly-typo-in-printf-statement.patch'])
self.assertEqual(ec['checksums'], [toy_source_sha256, toy_patch_sha256])
self.assertEqual(ec['exts_default_options'], {'source_urls': ['http://example.com/%(name)s']})
self.assertEqual(ec['exts_list'][0], 'ls')
self.assertEqual(ec['exts_list'][1], ('bar', '0.0', {
'buildopts': " && gcc bar.c -o anotherbar",
'checksums': [
bar_tar_gz_sha256,
bar_patch_sha256,
bar_patch_bis_sha256,
],
'exts_filter': ("cat | grep '^bar$'", '%(name)s'),
'patches': [bar_patch, bar_patch_bis],
'toy_ext_param': "mv anotherbar bar_bis",
'unknowneasyconfigparameterthatshouldbeignored': 'foo',
'keepsymlinks': True,
}))
self.assertEqual(ec['exts_list'][2], ('barbar', '0.0', {
'checksums': ['d5bd9908cdefbe2d29c6f8d5b45b2aaed9fd904b5e6397418bb5094fbdb3d838'],
'start_dir': 'src',
}))
# backup of easyconfig was created
ec_backups = glob.glob(test_ec + '.bak_*')
self.assertEqual(len(ec_backups), 1)
self.assertEqual(read_file(toy_ec), read_file(ec_backups[0]))
self.assertTrue("injecting sha256 checksums in" in stdout)
self.assertEqual(stderr, warning_msg)
remove_file(ec_backups[0])
# if any checksums are present already, it doesn't matter if they're wrong (since they will be replaced)
ectxt = read_file(test_ec)
for chksum in ec['checksums'] + [c for e in ec['exts_list'][1:] for c in e[2]['checksums']]:
ectxt = ectxt.replace(chksum, chksum[::-1])
write_file(test_ec, ectxt)
stdout, stderr = self._run_mock_eb(args, raise_error=True, strip=True)
ec = EasyConfigParser(test_ec).get_config_dict()
self.assertEqual(ec['checksums'], [toy_source_sha256, toy_patch_sha256])
ec_backups = glob.glob(test_ec + '.bak_*')
self.assertEqual(len(ec_backups), 1)
remove_file(ec_backups[0])
# also test injecting of MD5 checksums into easyconfig that doesn't include checksums already
toy_ec = os.path.join(topdir, 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb')
toy_ec_txt = read_file(toy_ec)
# get rid of existing checksums
regex = re.compile(r'^checksums(?:.|\n)*?\]\s*$', re.M)
toy_ec_txt = regex.sub('', toy_ec_txt)
self.assertFalse('checksums = ' in toy_ec_txt)
write_file(test_ec, toy_ec_txt)
args = [test_ec, '--inject-checksums=md5']
stdout, stderr = self._run_mock_eb(args, raise_error=True, strip=True)
patterns = [
r"^== injecting md5 checksums in .*/test\.eb$",
r"^== fetching sources & patches for test\.eb\.\.\.$",
r"^== backup of easyconfig file saved to .*/test\.eb\.bak_[0-9]+_[0-9]+\.\.\.$",
r"^== injecting md5 checksums for sources & patches in test\.eb\.\.\.$",
r"^== \* toy-0.0\.tar\.gz: be662daa971a640e40be5c804d9d7d10$",
r"^== \* toy-0\.0_fix-silly-typo-in-printf-statement\.patch: a99f2a72cee1689a2f7e3ace0356efb1$",
r"^== \* toy-extra\.txt: 3b0787b3bf36603ae1398c4a49097893$",
]
for pattern in patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(stdout), "Pattern '%s' found in: %s" % (regex.pattern, stdout))
self.assertEqual(stderr, '')
# backup of easyconfig was created
ec_backups = glob.glob(test_ec + '.bak_*')
self.assertEqual(len(ec_backups), 1)
self.assertEqual(toy_ec_txt, read_file(ec_backups[0]))
# no parse errors for updated easyconfig file...
ec = EasyConfigParser(test_ec).get_config_dict()
checksums = [
'be662daa971a640e40be5c804d9d7d10',
'a99f2a72cee1689a2f7e3ace0356efb1',
'3b0787b3bf36603ae1398c4a49097893',
]
self.assertEqual(ec['checksums'], checksums)
# check whether empty list of checksums is stripped out by --inject-checksums
toy_ec_txt = read_file(toy_ec)
regex = re.compile(r'^checksums(?:.|\n)*?\]\s*$', re.M)
toy_ec_txt = regex.sub('', toy_ec_txt)
toy_ec_txt += "\nchecksums = []"
write_file(test_ec, toy_ec_txt)
args = [test_ec, '--inject-checksums', '--force']
self._run_mock_eb(args, raise_error=True, strip=True)
ec_txt = read_file(test_ec)
regex = re.compile(r"^checksums = \[\]", re.M)
self.assertFalse(regex.search(ec_txt), "Pattern '%s' should not be found in: %s" % (regex.pattern, ec_txt))
ec = EasyConfigParser(test_ec).get_config_dict()
expected_checksums = [
'44332000aa33b99ad1e00cbd1a7da769220d74647060a10e807b916d73ea27bc',
'81a3accc894592152f81814fbf133d39afad52885ab52c25018722c7bda92487',
'4196b56771140d8e2468fb77f0240bc48ddbf5dabafe0713d612df7fafb1e458'
]
self.assertEqual(ec['checksums'], expected_checksums)
# passing easyconfig filename as argument to --inject-checksums results in error being reported,
# because it's not a valid type of checksum
args = ['--inject-checksums', test_ec]
self.mock_stdout(True)
self.mock_stderr(True)
self.assertErrorRegex(SystemExit, '.*', self.eb_main, args, raise_error=True, raise_systemexit=True)
stdout = self.get_stdout().strip()
stderr = self.get_stderr().strip()
self.mock_stdout(False)
self.mock_stderr(False)
self.assertEqual(stdout, '')
self.assertTrue("option --inject-checksums: invalid choice" in stderr)
def test_force_download(self):
"""Test --force-download"""
topdir = os.path.dirname(os.path.abspath(__file__))
toy_ec = os.path.join(topdir, 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb')
toy_srcdir = os.path.join(topdir, 'sandbox', 'sources', 'toy')
copy_file(toy_ec, self.test_prefix)
toy_tar = 'toy-0.0.tar.gz'
copy_file(os.path.join(toy_srcdir, toy_tar), os.path.join(self.test_prefix, 't', 'toy', toy_tar))
toy_ec = os.path.join(self.test_prefix, os.path.basename(toy_ec))
write_file(toy_ec, "\nsource_urls = ['file://%s']" % toy_srcdir, append=True)
args = [
toy_ec,
'--force',
'--force-download',
'--sourcepath=%s' % self.test_prefix,
]
stdout, stderr = self._run_mock_eb(args, do_build=True, raise_error=True, verbose=True, strip=True)
self.assertEqual(stdout, '')
regex = re.compile(r"^WARNING: Found file toy-0.0.tar.gz at .*, but re-downloading it anyway\.\.\.$")
self.assertTrue(regex.match(stderr), "Pattern '%s' matches: %s" % (regex.pattern, stderr))
# check that existing source tarball was backed up
toy_tar_backups = glob.glob(os.path.join(self.test_prefix, 't', 'toy', '*.bak_*'))
self.assertEqual(len(toy_tar_backups), 1)
self.assertTrue(os.path.basename(toy_tar_backups[0]).startswith('toy-0.0.tar.gz.bak_'))
def test_enforce_checksums(self):
"""Test effect of --enforce-checksums"""
topdir = os.path.dirname(os.path.abspath(__file__))
toy_ec = os.path.join(topdir, 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0-gompi-2018a-test.eb')
test_ec = os.path.join(self.test_prefix, 'test.eb')
args = [
test_ec,
'--stop=source',
'--enforce-checksums',
]
# checksum is missing for patch of 'bar' extension, so --enforce-checksums should result in an error
copy_file(toy_ec, test_ec)
error_pattern = r"Missing checksum for bar-0.0[^ ]*\.patch"
self.assertErrorRegex(EasyBuildError, error_pattern, self.eb_main, args, do_build=True, raise_error=True)
# get rid of checksums for extensions, should result in different error message
# because of missing checksum for source of 'bar' extension
regex = re.compile("^.*'checksums':.*$", re.M)
test_ec_txt = regex.sub('', read_file(test_ec))
self.assertFalse("'checksums':" in test_ec_txt)
write_file(test_ec, test_ec_txt)
error_pattern = r"Missing checksum for bar-0\.0\.tar\.gz"
self.assertErrorRegex(EasyBuildError, error_pattern, self.eb_main, args, do_build=True, raise_error=True)
# wipe both exts_list and checksums, so we can check whether missing checksum for main source is caught
test_ec_txt = read_file(test_ec)
for param in ['checksums', 'exts_list']:
regex = re.compile(r'^%s(?:.|\n)*?\]\s*$' % param, re.M)
test_ec_txt = regex.sub('', test_ec_txt)
self.assertFalse('%s = ' % param in test_ec_txt)
write_file(test_ec, test_ec_txt)
error_pattern = "Missing checksum for toy-0.0.tar.gz"
self.assertErrorRegex(EasyBuildError, error_pattern, self.eb_main, args, do_build=True, raise_error=True)
def test_show_system_info(self):
"""Test for --show-system-info."""
txt, _ = self._run_mock_eb(['--show-system-info'], raise_error=True)
patterns = [
r"^System information \(.*\):$",
r"^\* OS:$",
r"^ -> name: ",
r"^ -> type: ",
r"^ -> version: ",
r"^ -> platform name: ",
r"^\* CPU:$",
r"^ -> vendor: ",
r"^ -> architecture: ",
r"^ -> family: ",
r"^ -> model: ",
r"^ -> speed: [0-9.]+",
r"^ -> cores: [0-9]+",
r"^ -> features: ",
r"^\* software:$",
r"^ -> glibc version: ",
r"^ -> Python binary: .*/[pP]ython[0-9]?",
r"^ -> Python version: [0-9.]+",
]
if HAVE_ARCHSPEC:
patterns.append(r"^ -> arch name: \w+$")
else:
patterns.append(r"^ -> arch name: UNKNOWN \(archspec is not installed\?\)$")
for pattern in patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(txt), "Pattern '%s' found in: %s" % (regex.pattern, txt))
def test_check_eb_deps(self):
"""Test for --check-eb-deps."""
txt, _ = self._run_mock_eb(['--check-eb-deps'], raise_error=True)
# keep in mind that these patterns should match with both normal output and Rich output!
opt_dep_info_pattern = r'([0-9.]+|\(NOT FOUND\)|not found|\(unknown version\))'
tool_info_pattern = r'([0-9.]+|\(NOT FOUND\)|not found|\(found, UNKNOWN version\)|version\?\!)'
patterns = [
r"Required dependencies",
r"Python.* [23][0-9.]+",
r"modules tool.* [A-Za-z0-9.\s-]+",
r"Optional dependencies",
r"archspec.* %s.*determining name" % opt_dep_info_pattern,
r"GitPython.* %s.*GitHub integration" % opt_dep_info_pattern,
r"Rich.* %s.*eb command rich terminal output" % opt_dep_info_pattern,
r"setuptools.* %s.*information on Python packages" % opt_dep_info_pattern,
r"System tools",
r"make.* %s" % tool_info_pattern,
r"patch.* %s" % tool_info_pattern,
r"sed.* %s" % tool_info_pattern,
r"Slurm.* %s" % tool_info_pattern,
]
for pattern in patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(txt), "Pattern '%s' found in: %s" % (regex.pattern, txt))
def test_tmp_logdir(self):
"""Test use of --tmp-logdir."""
topdir = os.path.abspath(os.path.dirname(__file__))
toy_ec = os.path.join(topdir, 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb')
# purposely use a non-existing directory as log directory
tmp_logdir = os.path.join(self.test_prefix, 'tmp-logs')
self.assertFalse(os.path.exists(tmp_logdir))
# force passing logfile=None to main in eb_main
self.logfile = None
# check log message with --skip for existing module
args = [
toy_ec,
'--sourcepath=%s' % self.test_sourcepath,
'--buildpath=%s' % self.test_buildpath,
'--installpath=%s' % self.test_installpath,
'--force',
'--debug',
'--tmp-logdir=%s' % tmp_logdir,
]
self.eb_main(args, do_build=True, raise_error=True)
tmp_logs = os.listdir(tmp_logdir)
self.assertEqual(len(tmp_logs), 1)
logtxt = read_file(os.path.join(tmp_logdir, tmp_logs[0]))
self.assertTrue("COMPLETED: Installation ended successfully" in logtxt)
def test_sanity_check_only(self):
"""Test use of --sanity-check-only."""
topdir = os.path.abspath(os.path.dirname(__file__))
toy_ec = os.path.join(topdir, 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb')
test_ec = os.path.join(self.test_prefix, 'test.ec')
test_ec_txt = read_file(toy_ec)
test_ec_txt += '\n' + '\n'.join([
"sanity_check_commands = ['barbar', 'toy']",
"sanity_check_paths = {'files': ['bin/barbar', 'bin/toy'], 'dirs': ['bin']}",
"exts_list = [",
" ('barbar', '0.0', {",
" 'start_dir': 'src',",
" 'exts_filter': ('ls -l lib/lib%(ext_name)s.a', ''),",
" })",
"]",
])
write_file(test_ec, test_ec_txt)
# sanity check fails if software was not installed yet
outtxt, error_thrown = self.eb_main([test_ec, '--sanity-check-only'], do_build=True, return_error=True)
self.assertTrue("Sanity check failed" in str(error_thrown))
# actually install, then try --sanity-check-only again;
# need to use --force to install toy because module already exists (but installation doesn't)
self.eb_main([test_ec, '--force'], do_build=True, raise_error=True)
args = [test_ec, '--sanity-check-only']
stdout = self.mocked_main(args + ['--trace'], do_build=True, raise_error=True, testing=False)
skipped = [
"fetching files",
"creating build dir, resetting environment",
"unpacking",
"patching",
"preparing",
"configuring",
"building",
"testing",
"installing",
"taking care of extensions",
"restore after iterating",
"postprocessing",
"cleaning up",
"creating module",
"permissions",
"packaging"
]
for skip in skipped:
self.assertTrue("== %s [skipped]" % skip)
self.assertTrue("== sanity checking..." in stdout)
self.assertTrue("COMPLETED: Installation ended successfully" in stdout)
msgs = [
" >> file 'bin/barbar' found: OK",
" >> file 'bin/toy' found: OK",
" >> (non-empty) directory 'bin' found: OK",
" >> loading modules: toy/0.0...",
" >> result for command 'toy': OK",
"ls -l lib/libbarbar.a", # sanity check for extension barbar (via exts_filter)
]
for msg in msgs:
self.assertTrue(msg in stdout, "'%s' found in: %s" % (msg, stdout))
ebroottoy = os.path.join(self.test_installpath, 'software', 'toy', '0.0')
# check if sanity check for extension fails if a file provided by that extension,
# which is checked by the sanity check for that extension, is no longer there
libbarbar = os.path.join(ebroottoy, 'lib', 'libbarbar.a')
move_file(libbarbar, libbarbar + '.moved')
outtxt, error_thrown = self.eb_main(args + ['--debug'], do_build=True, return_error=True)
error_msg = str(error_thrown)
error_patterns = [
r"Sanity check failed",
r'command "ls -l lib/libbarbar\.a" failed',
]
for error_pattern in error_patterns:
regex = re.compile(error_pattern)
self.assertTrue(regex.search(error_msg), "Pattern '%s' should be found in: %s" % (regex.pattern, error_msg))
# failing sanity check for extension can be bypassed via --skip-extensions
outtxt = self.eb_main(args + ['--skip-extensions'], do_build=True, raise_error=True)
self.assertTrue("Sanity check for toy successful" in outtxt)
# restore fail, we want a passing sanity check for the next check
move_file(libbarbar + '.moved', libbarbar)
# check use of --sanity-check-only when installation directory is read-only;
# cfr. https://github.com/easybuilders/easybuild-framework/issues/3757
adjust_permissions(ebroottoy, stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH, add=False, recursive=True)
stdout = self.mocked_main(args + ['--trace'], do_build=True, raise_error=True, testing=False)
def test_skip_extensions(self):
"""Test use of --skip-extensions."""
topdir = os.path.abspath(os.path.dirname(__file__))
toy_ec = os.path.join(topdir, 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb')
# add extension, which should be skipped
test_ec = os.path.join(self.test_prefix, 'test.ec')
test_ec_txt = read_file(toy_ec)
test_ec_txt += '\n' + '\n'.join([
"exts_list = [",
" ('barbar', '0.0', {",
" 'start_dir': 'src',",
" 'exts_filter': ('ls -l lib/lib%(ext_name)s.a', ''),",
" })",
"]",
])
write_file(test_ec, test_ec_txt)
args = [test_ec, '--force', '--skip-extensions']
self.eb_main(args, do_build=True, return_error=True)
toy_mod = os.path.join(self.test_installpath, 'modules', 'all', 'toy', '0.0')
if get_module_syntax() == 'Lua':
toy_mod += '.lua'
self.assertTrue(os.path.exists(toy_mod), "%s should exist" % toy_mod)
toy_installdir = os.path.join(self.test_installpath, 'software', 'toy', '0.0')
for path in (os.path.join('bin', 'barbar'), os.path.join('lib', 'libbarbar.a')):
path = os.path.join(toy_installdir, path)
self.assertFalse(os.path.exists(path), "Path %s should not exist" % path)
def test_fake_vsc_include(self):
"""Test whether fake 'vsc' namespace is triggered for modules included via --include-*."""
topdir = os.path.abspath(os.path.dirname(__file__))
toy_ec = os.path.join(topdir, 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb')
test_mns = os.path.join(self.test_prefix, 'test_mns.py')
test_mns_txt = '\n'.join([
"import vsc",
"from easybuild.tools.module_naming_scheme.easybuild_mns import EasyBuildMNS",
"class TestMNS(EasyBuildMNS):",
" pass",
])
write_file(test_mns, test_mns_txt)
args = [
toy_ec,
'--dry-run',
'--include-module-naming-schemes=%s' % test_mns,
]
self.mock_stderr(True)
self.assertErrorRegex(SystemExit, '1', self.eb_main, args, do_build=True, raise_error=True, verbose=True)
stderr = self.get_stderr()
self.mock_stderr(False)
regex = re.compile("ERROR: Detected import from 'vsc' namespace in .*/test_mns.py")
self.assertTrue(regex.search(stderr), "Pattern '%s' found in: %s" % (regex.pattern, stderr))
def test_installdir(self):
"""Check naming scheme of installation directory."""
topdir = os.path.abspath(os.path.dirname(__file__))
toy_ec = os.path.join(topdir, 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb')
eb = EasyBlock(EasyConfig(toy_ec))
self.assertTrue(eb.installdir.endswith('/software/toy/0.0'))
# even with HierarchicalMNS the installation directory remains the same,
# due to --fixed-installdir-naming-scheme being enabled by default
args = ['--module-naming-scheme=HierarchicalMNS']
init_config(args=args)
eb = EasyBlock(EasyConfig(toy_ec))
self.assertTrue(eb.installdir.endswith('/software/toy/0.0'))
# things change when --disable-fixed-installdir-naming-scheme is used
init_config(args=args, build_options={'fixed_installdir_naming_scheme': False})
eb = EasyBlock(EasyConfig(toy_ec))
self.assertTrue(eb.installdir.endswith('/software/Core/toy/0.0'))
def test_sort_looseversions(self):
"""Test sort_looseversions funuction."""
ver1 = LooseVersion('1.2.3')
ver2 = LooseVersion('4.5.6')
ver3 = LooseVersion('1.2.3dev')
ver4 = LooseVersion('system')
ver5 = LooseVersion('rc3')
ver6 = LooseVersion('v1802')
# some versions are included multiple times on purpose,
# to also test comparison between equal LooseVersion instances
input = [ver3, ver5, ver1, ver2, ver4, ver6, ver3, ver4, ver1]
expected = [ver1, ver1, ver3, ver3, ver2, ver5, ver4, ver4, ver6]
self.assertEqual(sort_looseversions(input), expected)
# also test on list of tuples consisting of a LooseVersion instance + a string
# (as in the list_software_* functions)
suff1 = ''
suff2 = '-foo'
suff3 = '-bar'
input = [(ver3, suff1), (ver5, suff3), (ver1, suff2), (ver2, suff3), (ver4, suff1),
(ver6, suff2), (ver3, suff3), (ver4, suff3), (ver1, suff1)]
expected = [(ver1, suff1), (ver1, suff2), (ver3, suff1), (ver3, suff3), (ver2, suff3),
(ver5, suff3), (ver4, suff1), (ver4, suff3), (ver6, suff2)]
self.assertEqual(sort_looseversions(input), expected)
def test_cuda_compute_capabilities(self):
"""Test --cuda-compute-capabilities configuration option."""
args = ['--cuda-compute-capabilities=3.5,6.2,7.0', '--show-config']
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False, strip=True)
regex = re.compile(r"^cuda-compute-capabilities\s*\(C\)\s*=\s*3\.5, 6\.2, 7\.0$", re.M)
self.assertTrue(regex.search(txt), "Pattern '%s' not found in: %s" % (regex.pattern, txt))
def test_create_index(self):
"""Test --create-index option."""
test_ecs = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'easyconfigs', 'test_ecs')
remove_dir(self.test_prefix)
copy_dir(test_ecs, self.test_prefix)
args = ['--create-index', self.test_prefix]
stdout, stderr = self._run_mock_eb(args, raise_error=True)
self.assertEqual(stderr, '')
patterns = [
r"^Creating index for %s\.\.\.$",
r"^Index created at %s/\.eb-path-index \([0-9]+ files\)$",
]
for pattern in patterns:
regex = re.compile(pattern % self.test_prefix, re.M)
self.assertTrue(regex.search(stdout), "Pattern %s matches in: %s" % (regex.pattern, stdout))
# check contents of index
index_fp = os.path.join(self.test_prefix, '.eb-path-index')
index_txt = read_file(index_fp)
datestamp_pattern = r"[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]+"
patterns = [
r"^# created at: " + datestamp_pattern + '$',
r"^# valid until: " + datestamp_pattern + '$',
r"^g/GCC/GCC-7.3.0-2.30.eb",
r"^t/toy/toy-0\.0\.eb",
]
for pattern in patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(index_txt), "Pattern '%s' found in: %s" % (regex.pattern, index_txt))
# existing index is not overwritten without --force
error_pattern = "File exists, not overwriting it without --force: .*/.eb-path-index"
self.assertErrorRegex(EasyBuildError, error_pattern, self._run_mock_eb, args, raise_error=True)
# also test creating index that's infinitely valid
args.extend(['--index-max-age=0', '--force'])
self._run_mock_eb(args, raise_error=True)
index_txt = read_file(index_fp)
regex = re.compile(r"^# valid until: 9999-12-31 23:59:59", re.M)
self.assertTrue(regex.search(index_txt), "Pattern '%s' found in: %s" % (regex.pattern, index_txt))
def test_sysroot(self):
"""Test use of --sysroot option."""
self.assertTrue(os.path.exists(self.test_prefix))
sysroot_arg = '--sysroot=' + self.test_prefix
stdout, stderr = self._run_mock_eb([sysroot_arg, '--show-config'], raise_error=True)
self.assertEqual(stderr, '')
sysroot_regex = re.compile(r'^sysroot\s*\(C\) = %s$' % self.test_prefix, re.M)
self.assertTrue(sysroot_regex.search(stdout), "Pattern '%s' not found in: %s" % (sysroot_regex, stdout))
os.environ['EASYBUILD_SYSROOT'] = self.test_prefix
stdout, stderr = self._run_mock_eb(['--show-config'], raise_error=True)
self.assertEqual(stderr, '')
sysroot_regex = re.compile(r'^sysroot\s*\(E\) = %s$' % self.test_prefix, re.M)
self.assertTrue(sysroot_regex.search(stdout), "Pattern '%s' not found in: %s" % (sysroot_regex, stdout))
# specifying a non-existing path results in an error
doesnotexist = os.path.join(self.test_prefix, 'non-existing-subdirectory')
sysroot_arg = '--sysroot=' + doesnotexist
args = [sysroot_arg, '--show-config']
error_pattern = r"Specified sysroot '%s' does not exist!" % doesnotexist
self.assertErrorRegex(EasyBuildError, error_pattern, self._run_mock_eb, args, raise_error=True)
os.environ['EASYBUILD_SYSROOT'] = doesnotexist
self.assertErrorRegex(EasyBuildError, error_pattern, self._run_mock_eb, ['--show-config'], raise_error=True)
def test_accept_eula_for(self):
"""Test --accept-eula-for configuration option."""
# use toy-0.0.eb easyconfig file that comes with the tests
topdir = os.path.abspath(os.path.dirname(__file__))
toy_ec = os.path.join(topdir, 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb')
test_ec = os.path.join(self.test_prefix, 'test.eb')
test_ec_txt = '\n'.join([
"easyblock = 'EB_toy_eula'",
'',
read_file(toy_ec),
])
write_file(test_ec, test_ec_txt)
# by default, no EULAs are accepted at all
args = [test_ec, '--force']
error_pattern = r"The End User License Argreement \(EULA\) for toy is currently not accepted!"
self.assertErrorRegex(EasyBuildError, error_pattern, self.eb_main, args, do_build=True, raise_error=True)
toy_modfile = os.path.join(self.test_installpath, 'modules', 'all', 'toy', '0.0')
if get_module_syntax() == 'Lua':
toy_modfile += '.lua'
# installation proceeds if EasyBuild is configured to accept EULA for specified software via --accept-eula-for
for val in ('foo,toy,bar', '.*', 't.y'):
self.eb_main(args + ['--accept-eula-for=' + val], do_build=True, raise_error=True)
self.assertTrue(os.path.exists(toy_modfile))
remove_dir(self.test_installpath)
self.assertFalse(os.path.exists(toy_modfile))
# also check use of $EASYBUILD_ACCEPT_EULA to accept EULA for specified software
os.environ['EASYBUILD_ACCEPT_EULA_FOR'] = val
self.eb_main(args, do_build=True, raise_error=True)
self.assertTrue(os.path.exists(toy_modfile))
remove_dir(self.test_installpath)
self.assertFalse(os.path.exists(toy_modfile))
del os.environ['EASYBUILD_ACCEPT_EULA_FOR']
# also check deprecated --accept-eula configuration option
self.allow_deprecated_behaviour()
self.mock_stderr(True)
self.eb_main(args + ['--accept-eula=foo,toy,bar'], do_build=True, raise_error=True)
stderr = self.get_stderr()
self.mock_stderr(False)
self.assertTrue("Use accept-eula-for configuration setting rather than accept-eula" in stderr)
remove_dir(self.test_installpath)
self.assertFalse(os.path.exists(toy_modfile))
# also via $EASYBUILD_ACCEPT_EULA
self.mock_stderr(True)
os.environ['EASYBUILD_ACCEPT_EULA'] = 'toy'
self.eb_main(args, do_build=True, raise_error=True)
stderr = self.get_stderr()
self.mock_stderr(False)
self.assertTrue(os.path.exists(toy_modfile))
self.assertTrue("Use accept-eula-for configuration setting rather than accept-eula" in stderr)
remove_dir(self.test_installpath)
self.assertFalse(os.path.exists(toy_modfile))
# also check accepting EULA via 'accept_eula = True' in easyconfig file
self.disallow_deprecated_behaviour()
del os.environ['EASYBUILD_ACCEPT_EULA']
write_file(test_ec, test_ec_txt + '\naccept_eula = True')
self.eb_main(args, do_build=True, raise_error=True)
self.assertTrue(os.path.exists(toy_modfile))
def test_config_abs_path(self):
"""Test ensuring of absolute path values for path configuration options."""
test_topdir = os.path.join(self.test_prefix, 'test_topdir')
test_subdir = os.path.join(test_topdir, 'test_middle_dir', 'test_subdir')
mkdir(test_subdir, parents=True)
change_dir(test_subdir)
# a relative path specified in a configuration file is positively weird, but fine :)
cfgfile = os.path.join(self.test_prefix, 'test.cfg')
cfgtxt = '\n'.join([
"[config]",
"containerpath = ..",
"repositorypath = /apps/easyconfigs_archive, somesubdir",
])
write_file(cfgfile, cfgtxt)
# relative paths in environment variables is also weird,
# but OK for the sake of testing...
os.environ['EASYBUILD_INSTALLPATH'] = '../..'
os.environ['EASYBUILD_ROBOT_PATHS'] = '../..'
args = [
'--configfiles=%s' % cfgfile,
'--prefix=..',
'--sourcepath=.',
'--show-config',
]
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False, strip=True)
patterns = [
r"^containerpath\s+\(F\) = /.*/test_topdir/test_middle_dir$",
r"^installpath\s+\(E\) = /.*/test_topdir$",
r"^prefix\s+\(C\) = /.*/test_topdir/test_middle_dir$",
r"^repositorypath\s+\(F\) = \('/apps/easyconfigs_archive', ' somesubdir'\)$",
r"^sourcepath\s+\(C\) = /.*/test_topdir/test_middle_dir/test_subdir$",
r"^robot-paths\s+\(E\) = /.*/test_topdir$",
]
for pattern in patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(txt), "Pattern '%s' should be found in: %s" % (pattern, txt))
# paths specified via --robot have precedence over those specified via $EASYBUILD_ROBOT_PATHS
change_dir(test_subdir)
args.append('--robot=..:.')
txt, _ = self._run_mock_eb(args, do_build=True, raise_error=True, testing=False, strip=True)
patterns.pop(-1)
robot_value_pattern = ', '.join([
r'/.*/test_topdir/test_middle_dir', # via --robot (first path)
r'/.*/test_topdir/test_middle_dir/test_subdir', # via --robot (second path)
r'/.*/test_topdir', # via $EASYBUILD_ROBOT_PATHS
])
patterns.extend([
r"^robot-paths\s+\(C\) = %s$" % robot_value_pattern,
r"^robot\s+\(C\) = %s$" % robot_value_pattern,
])
for pattern in patterns:
regex = re.compile(pattern, re.M)
self.assertTrue(regex.search(txt), "Pattern '%s' should be found in: %s" % (pattern, txt))
# end-to-end testing of unknown filename
def test_easystack_wrong_read(self):
"""Test for --easystack <easystack.yaml> when wrong name is provided"""
topdir = os.path.dirname(os.path.abspath(__file__))
toy_easystack = os.path.join(topdir, 'easystacks', 'test_easystack_nonexistent.yaml')
args = ['--easystack', toy_easystack, '--experimental']
expected_err = "No such file or directory: '%s'" % toy_easystack
self.assertErrorRegex(EasyBuildError, expected_err, self.eb_main, args, raise_error=True)
# testing basics - end-to-end
# expecting successful build
def test_easystack_basic(self):
"""Test for --easystack <easystack.yaml> -> success case"""
topdir = os.path.dirname(os.path.abspath(__file__))
toy_easystack = os.path.join(topdir, 'easystacks', 'test_easystack_basic.yaml')
args = ['--easystack', toy_easystack, '--debug', '--experimental', '--dry-run']
stdout = self.eb_main(args, do_build=True, raise_error=True)
patterns = [
r"[\S\s]*INFO Building from easystack:[\S\s]*",
r"[\S\s]*DEBUG EasyStack parsed\. Proceeding to install these Easyconfigs: "
r"binutils-2.25-GCCcore-4.9.3.eb, binutils-2.26-GCCcore-4.9.3.eb, "
r"foss-2018a.eb, toy-0.0-gompi-2018a-test.eb",
r"\* \[ \] .*/test_ecs/b/binutils/binutils-2.25-GCCcore-4.9.3.eb \(module: binutils/2.25-GCCcore-4.9.3\)",
r"\* \[ \] .*/test_ecs/b/binutils/binutils-2.26-GCCcore-4.9.3.eb \(module: binutils/2.26-GCCcore-4.9.3\)",
r"\* \[ \] .*/test_ecs/t/toy/toy-0.0-gompi-2018a-test.eb \(module: toy/0.0-gompi-2018a-test\)",
r"\* \[x\] .*/test_ecs/f/foss/foss-2018a.eb \(module: foss/2018a\)",
]
for pattern in patterns:
regex = re.compile(pattern)
self.assertTrue(regex.search(stdout), "Pattern '%s' should be found in: %s" % (regex.pattern, stdout))
def suite():
""" returns all the testcases in this module """
return TestLoaderFiltered().loadTestsFromTestCase(CommandLineOptionsTest, sys.argv[1:])
if __name__ == '__main__':
res = TextTestRunner(verbosity=1).run(suite())
sys.exit(len(res.failures))
|
akesandgren/easybuild-framework
|
test/framework/options.py
|
Python
|
gpl-2.0
| 296,951
|
[
"NetCDF",
"SIESTA"
] |
660d2fed7e5bd07621ebd93d11678e8c35b49a366fabe77a01ccdf8cb74d9b65
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import os
import re
import random
import shutil
import socket
import string
import json
import charms.leadership
from shlex import split
from subprocess import check_call
from subprocess import check_output
from subprocess import CalledProcessError
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import remove_state
from charms.reactive import set_state
from charms.reactive import is_state
from charms.reactive import when, when_any, when_not
from charms.reactive.helpers import data_changed, any_file_changed
from charms.kubernetes.common import get_version
from charms.kubernetes.common import retry
from charms.kubernetes.flagmanager import FlagManager
from charmhelpers.core import hookenv
from charmhelpers.core import host
from charmhelpers.core import unitdata
from charmhelpers.core.templating import render
from charmhelpers.fetch import apt_install
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
def service_cidr():
''' Return the charm's service-cidr config '''
db = unitdata.kv()
frozen_cidr = db.get('kubernetes-master.service-cidr')
return frozen_cidr or hookenv.config('service-cidr')
def freeze_service_cidr():
''' Freeze the service CIDR. Once the apiserver has started, we can no
longer safely change this value. '''
db = unitdata.kv()
db.set('kubernetes-master.service-cidr', service_cidr())
@hook('upgrade-charm')
def reset_states_for_delivery():
'''An upgrade charm event was triggered by Juju, react to that here.'''
migrate_from_pre_snaps()
install_snaps()
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
def rename_file_idempotent(source, destination):
if os.path.isfile(source):
os.rename(source, destination)
def migrate_from_pre_snaps():
# remove old states
remove_state('kubernetes.components.installed')
remove_state('kubernetes.dashboard.available')
remove_state('kube-dns.available')
remove_state('kubernetes-master.app_version.set')
# disable old services
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
host.service_stop(service)
# rename auth files
os.makedirs('/root/cdk', exist_ok=True)
rename_file_idempotent('/etc/kubernetes/serviceaccount.key',
'/root/cdk/serviceaccount.key')
rename_file_idempotent('/srv/kubernetes/basic_auth.csv',
'/root/cdk/basic_auth.csv')
rename_file_idempotent('/srv/kubernetes/known_tokens.csv',
'/root/cdk/known_tokens.csv')
# cleanup old files
files = [
"/lib/systemd/system/kube-apiserver.service",
"/lib/systemd/system/kube-controller-manager.service",
"/lib/systemd/system/kube-scheduler.service",
"/etc/default/kube-defaults",
"/etc/default/kube-apiserver.defaults",
"/etc/default/kube-controller-manager.defaults",
"/etc/default/kube-scheduler.defaults",
"/srv/kubernetes",
"/home/ubuntu/kubectl",
"/usr/local/bin/kubectl",
"/usr/local/bin/kube-apiserver",
"/usr/local/bin/kube-controller-manager",
"/usr/local/bin/kube-scheduler",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
# clear the flag managers
FlagManager('kube-apiserver').destroy_all()
FlagManager('kube-controller-manager').destroy_all()
FlagManager('kube-scheduler').destroy_all()
def install_snaps():
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-apiserver snap')
snap.install('kube-apiserver', channel=channel)
hookenv.status_set('maintenance',
'Installing kube-controller-manager snap')
snap.install('kube-controller-manager', channel=channel)
hookenv.status_set('maintenance', 'Installing kube-scheduler snap')
snap.install('kube-scheduler', channel=channel)
hookenv.status_set('maintenance', 'Installing cdk-addons snap')
snap.install('cdk-addons', channel=channel)
set_state('kubernetes-master.snaps.installed')
@when('config.changed.channel')
def channel_changed():
install_snaps()
@when('config.changed.client_password', 'leadership.is_leader')
def password_changed():
"""Handle password change via the charms config."""
password = hookenv.config('client_password')
if password == "" and is_state('client.password.initialised'):
# password_changed is called during an upgrade. Nothing to do.
return
elif password == "":
# Password not initialised
password = token_generator()
setup_basic_auth(password, "admin", "admin")
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
set_state('client.password.initialised')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set master configuration on the CNI relation. This lets the CNI
subordinate know that we're the master so it can respond accordingly. '''
cni.set_config(is_master=True, kubeconfig_path='')
@when('leadership.is_leader')
@when_not('authentication.setup')
def setup_leader_authentication():
'''Setup basic authentication and token access for the cluster.'''
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
api_opts.add('basic-auth-file', basic_auth)
api_opts.add('token-auth-file', known_tokens)
hookenv.status_set('maintenance', 'Rendering authentication templates.')
keys = [service_key, basic_auth, known_tokens]
# Try first to fetch data from an old leadership broadcast.
if not get_keys_from_leader(keys) \
or is_state('reconfigure.authentication.setup'):
last_pass = get_password('basic_auth.csv', 'admin')
setup_basic_auth(last_pass, 'admin', 'admin')
if not os.path.isfile(known_tokens):
setup_tokens(None, 'admin', 'admin')
setup_tokens(None, 'kubelet', 'kubelet')
setup_tokens(None, 'kube_proxy', 'kube_proxy')
# Generate the default service account token key
os.makedirs('/root/cdk', exist_ok=True)
if not os.path.isfile(service_key):
cmd = ['openssl', 'genrsa', '-out', service_key,
'2048']
check_call(cmd)
remove_state('reconfigure.authentication.setup')
api_opts.add('service-account-key-file', service_key)
controller_opts.add('service-account-private-key-file', service_key)
# read service account key for syndication
leader_data = {}
for f in [known_tokens, basic_auth, service_key]:
with open(f, 'r') as fp:
leader_data[f] = fp.read()
# this is slightly opaque, but we are sending file contents under its file
# path as a key.
# eg:
# {'/root/cdk/serviceaccount.key': 'RSA:2471731...'}
charms.leadership.leader_set(leader_data)
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
@when_not('leadership.is_leader')
def setup_non_leader_authentication():
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
keys = [service_key, basic_auth, known_tokens]
if not get_keys_from_leader(keys):
# the keys were not retrieved. Non-leaders have to retry.
return
if not any_file_changed(keys) and is_state('authentication.setup'):
# No change detected and we have already setup the authentication
return
hookenv.status_set('maintenance', 'Rendering authentication templates.')
api_opts = FlagManager('kube-apiserver')
api_opts.add('basic-auth-file', basic_auth)
api_opts.add('token-auth-file', known_tokens)
api_opts.add('service-account-key-file', service_key)
controller_opts = FlagManager('kube-controller-manager')
controller_opts.add('service-account-private-key-file', service_key)
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
def get_keys_from_leader(keys):
"""
Gets the broadcasted keys from the leader and stores them in
the corresponding files.
Args:
keys: list of keys. Keys are actually files on the FS.
Returns: True if all key were fetched, False if not.
"""
# This races with other codepaths, and seems to require being created first
# This block may be extracted later, but for now seems to work as intended
os.makedirs('/root/cdk', exist_ok=True)
for k in keys:
# If the path does not exist, assume we need it
if not os.path.exists(k):
# Fetch data from leadership broadcast
contents = charms.leadership.leader_get(k)
# Default to logging the warning and wait for leader data to be set
if contents is None:
msg = "Waiting on leaders crypto keys."
hookenv.status_set('waiting', msg)
hookenv.log('Missing content for file {}'.format(k))
return False
# Write out the file and move on to the next item
with open(k, 'w+') as fp:
fp.write(contents)
return True
@when('kubernetes-master.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
version = check_output(['kube-apiserver', '--version'])
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('cdk-addons.configured', 'kube-api-endpoint.available',
'kube-control.connected')
def idle_status(kube_api, kube_control):
''' Signal at the end of the run that we are running. '''
if not all_kube_system_pods_running():
hookenv.status_set('waiting', 'Waiting for kube-system pods to start')
elif hookenv.config('service-cidr') != service_cidr():
msg = 'WARN: cannot change service-cidr, still using ' + service_cidr()
hookenv.status_set('active', msg)
else:
hookenv.status_set('active', 'Kubernetes master running.')
@when('etcd.available', 'tls_client.server.certificate.saved',
'authentication.setup')
@when_not('kubernetes-master.components.started')
def start_master(etcd):
'''Run the Kubernetes master components.'''
hookenv.status_set('maintenance',
'Configuring the Kubernetes master services.')
freeze_service_cidr()
if not etcd.get_connection_string():
# etcd is not returning a connection string. This hapens when
# the master unit disconnects from etcd and is ready to terminate.
# No point in trying to start master services and fail. Just return.
return
handle_etcd_relation(etcd)
configure_master_services()
hookenv.status_set('maintenance',
'Starting the Kubernetes master services.')
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
for service in services:
host.service_restart('snap.%s.daemon' % service)
hookenv.open_port(6443)
set_state('kubernetes-master.components.started')
@when('etcd.available')
def etcd_data_change(etcd):
''' Etcd scale events block master reconfiguration due to the
kubernetes-master.components.started state. We need a way to
handle these events consistenly only when the number of etcd
units has actually changed '''
# key off of the connection string
connection_string = etcd.get_connection_string()
# If the connection string changes, remove the started state to trigger
# handling of the master components
if data_changed('etcd-connect', connection_string):
remove_state('kubernetes-master.components.started')
@when('kube-control.connected')
@when('cdk-addons.configured')
def send_cluster_dns_detail(kube_control):
''' Send cluster DNS info '''
# Note that the DNS server doesn't necessarily exist at this point. We know
# where we're going to put it, though, so let's send the info anyway.
dns_ip = get_dns_ip()
kube_control.set_dns(53, hookenv.config('dns_domain'), dns_ip)
@when('kube-control.auth.requested')
@when('authentication.setup')
@when('leadership.is_leader')
def send_tokens(kube_control):
"""Send the tokens to the workers."""
kubelet_token = get_token('kubelet')
proxy_token = get_token('kube_proxy')
admin_token = get_token('admin')
# Send the data
requests = kube_control.auth_user()
for request in requests:
kube_control.sign_auth_request(request[0], kubelet_token,
proxy_token, admin_token)
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-worker:kube-control'.format(
hookenv.service_name()))
@when('kube-api-endpoint.available')
def push_service_data(kube_api):
''' Send configuration to the load balancer, and close access to the
public interface '''
kube_api.configure(port=6443)
@when('certificates.available')
def send_data(tls):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Get the SDN gateway based on the cidr address.
kubernetes_service_ip = get_kubernetes_service_ip()
domain = hookenv.config('dns_domain')
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
hookenv.unit_private_ip(),
socket.gethostname(),
kubernetes_service_ip,
'kubernetes',
'kubernetes.{0}'.format(domain),
'kubernetes.default',
'kubernetes.default.svc',
'kubernetes.default.svc.{0}'.format(domain)
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api.connected')
def push_api_data(kube_api):
''' Send configuration to remote consumer.'''
# Since all relations already have the private ip address, only
# send the port on the relation object to all consumers.
# The kubernetes api-server uses 6443 for the default secure port.
kube_api.set_api_port('6443')
@when('kubernetes-master.components.started')
def configure_cdk_addons():
''' Configure CDK addons '''
remove_state('cdk-addons.configured')
dbEnabled = str(hookenv.config('enable-dashboard-addons')).lower()
args = [
'arch=' + arch(),
'dns-ip=' + get_dns_ip(),
'dns-domain=' + hookenv.config('dns_domain'),
'enable-dashboard=' + dbEnabled
]
check_call(['snap', 'set', 'cdk-addons'] + args)
if not addons_ready():
hookenv.status_set('waiting', 'Waiting to retry addon deployment')
remove_state('cdk-addons.configured')
return
set_state('cdk-addons.configured')
@retry(times=3, delay_secs=20)
def addons_ready():
"""
Test if the add ons got installed
Returns: True is the addons got applied
"""
try:
check_call(['cdk-addons.apply'])
return True
except CalledProcessError:
hookenv.log("Addons are not ready yet.")
return False
@when('loadbalancer.available', 'certificates.ca.available',
'certificates.client.cert.available', 'authentication.setup')
def loadbalancer_kubeconfig(loadbalancer, ca, client):
# Get the potential list of loadbalancers from the relation object.
hosts = loadbalancer.get_addresses_ports()
# Get the public address of loadbalancers so users can access the cluster.
address = hosts[0].get('public-address')
# Get the port of the loadbalancer so users can access the cluster.
port = hosts[0].get('port')
server = 'https://{0}:{1}'.format(address, port)
build_kubeconfig(server)
@when('certificates.ca.available', 'certificates.client.cert.available',
'authentication.setup')
@when_not('loadbalancer.available')
def create_self_config(ca, client):
'''Create a kubernetes configuration for the master unit.'''
server = 'https://{0}:{1}'.format(hookenv.unit_get('public-address'), 6443)
build_kubeconfig(server)
@when('ceph-storage.available')
def ceph_state_control(ceph_admin):
''' Determine if we should remove the state that controls the re-render
and execution of the ceph-relation-changed event because there
are changes in the relationship data, and we should re-render any
configs, keys, and/or service pre-reqs '''
ceph_relation_data = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'hostname': socket.gethostname(),
'key': ceph_admin.key()
}
# Re-execute the rendering if the data has changed.
if data_changed('ceph-config', ceph_relation_data):
remove_state('ceph-storage.configured')
@when('ceph-storage.available')
@when_not('ceph-storage.configured')
def ceph_storage(ceph_admin):
'''Ceph on kubernetes will require a few things - namely a ceph
configuration, and the ceph secret key file used for authentication.
This method will install the client package, and render the requisit files
in order to consume the ceph-storage relation.'''
ceph_context = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'use_syslog': "true",
'ceph_public_network': '',
'ceph_cluster_network': '',
'loglevel': 1,
'hostname': socket.gethostname(),
}
# Install the ceph common utilities.
apt_install(['ceph-common'], fatal=True)
etc_ceph_directory = '/etc/ceph'
if not os.path.isdir(etc_ceph_directory):
os.makedirs(etc_ceph_directory)
charm_ceph_conf = os.path.join(etc_ceph_directory, 'ceph.conf')
# Render the ceph configuration from the ceph conf template
render('ceph.conf', charm_ceph_conf, ceph_context)
# The key can rotate independently of other ceph config, so validate it
admin_key = os.path.join(etc_ceph_directory,
'ceph.client.admin.keyring')
try:
with open(admin_key, 'w') as key_file:
key_file.write("[client.admin]\n\tkey = {}\n".format(
ceph_admin.key()))
except IOError as err:
hookenv.log("IOError writing admin.keyring: {}".format(err))
# Enlist the ceph-admin key as a kubernetes secret
if ceph_admin.key():
encoded_key = base64.b64encode(ceph_admin.key().encode('utf-8'))
else:
# We didn't have a key, and cannot proceed. Do not set state and
# allow this method to re-execute
return
context = {'secret': encoded_key.decode('ascii')}
render('ceph-secret.yaml', '/tmp/ceph-secret.yaml', context)
try:
# At first glance this is deceptive. The apply stanza will create if
# it doesn't exist, otherwise it will update the entry, ensuring our
# ceph-secret is always reflective of what we have in /etc/ceph
# assuming we have invoked this anytime that file would change.
cmd = ['kubectl', 'apply', '-f', '/tmp/ceph-secret.yaml']
check_call(cmd)
os.remove('/tmp/ceph-secret.yaml')
except:
# the enlistment in kubernetes failed, return and prepare for re-exec
return
# when complete, set a state relating to configuration of the storage
# backend that will allow other modules to hook into this and verify we
# have performed the necessary pre-req steps to interface with a ceph
# deployment.
set_state('ceph-storage.configured')
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-master.components.started')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def is_privileged():
"""Return boolean indicating whether or not to set allow-privileged=true.
"""
privileged = hookenv.config('allow-privileged')
if privileged == 'auto':
return is_state('kubernetes-master.gpu.enabled')
else:
return privileged == 'true'
@when('config.changed.allow-privileged')
@when('kubernetes-master.components.started')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
remove_state('kubernetes-master.components.started')
remove_state('config.changed.allow-privileged')
@when('kube-control.gpu.available')
@when('kubernetes-master.components.started')
@when_not('kubernetes-master.gpu.enabled')
def on_gpu_available(kube_control):
"""The remote side (kubernetes-worker) is gpu-enabled.
We need to run in privileged mode.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
remove_state('kubernetes-master.components.started')
set_state('kubernetes-master.gpu.enabled')
@when('kubernetes-master.gpu.enabled')
@when_not('kubernetes-master.privileged')
def disable_gpu_mode():
"""We were in gpu mode, but the operator has set allow-privileged="false",
so we can't run in gpu mode anymore.
"""
remove_state('kubernetes-master.gpu.enabled')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def build_kubeconfig(server):
'''Gather the relevant data for Kubernetes configuration objects and create
a config object with that information.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
ca_exists = ca and os.path.isfile(ca)
client_pass = get_password('basic_auth.csv', 'admin')
# Do we have everything we need?
if ca_exists and client_pass:
# Create an absolute path for the kubeconfig file.
kubeconfig_path = os.path.join(os.sep, 'home', 'ubuntu', 'config')
# Create the kubeconfig on this system so users can access the cluster.
create_kubeconfig(kubeconfig_path, server, ca,
user='admin', password=client_pass)
# Make the config file readable by the ubuntu users so juju scp works.
cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
check_call(cmd)
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def get_dns_ip():
'''Get an IP address for the DNS server on the provided cidr.'''
# Remove the range from the cidr.
ip = service_cidr().split('/')[0]
# Take the last octet off the IP address and replace it with 10.
return '.'.join(ip.split('.')[0:-1]) + '.10'
def get_kubernetes_service_ip():
'''Get the IP address for the kubernetes service based on the cidr.'''
# Remove the range from the cidr.
ip = service_cidr().split('/')[0]
# Remove the last octet and replace it with 1.
return '.'.join(ip.split('.')[0:-1]) + '.1'
def handle_etcd_relation(reldata):
''' Save the client credentials and set appropriate daemon flags when
etcd declares itself as available'''
connection_string = reldata.get_connection_string()
# Define where the etcd tls files will be kept.
etcd_dir = '/root/cdk/etcd'
# Create paths to the etcd client ca, key, and cert file locations.
ca = os.path.join(etcd_dir, 'client-ca.pem')
key = os.path.join(etcd_dir, 'client-key.pem')
cert = os.path.join(etcd_dir, 'client-cert.pem')
# Save the client credentials (in relation data) to the paths provided.
reldata.save_client_credentials(key, cert, ca)
api_opts = FlagManager('kube-apiserver')
# Never use stale data, always prefer whats coming in during context
# building. if its stale, its because whats in unitdata is stale
data = api_opts.data
if data.get('etcd-servers-strict') or data.get('etcd-servers'):
api_opts.destroy('etcd-cafile')
api_opts.destroy('etcd-keyfile')
api_opts.destroy('etcd-certfile')
api_opts.destroy('etcd-servers', strict=True)
api_opts.destroy('etcd-servers')
# Set the apiserver flags in the options manager
api_opts.add('etcd-cafile', ca)
api_opts.add('etcd-keyfile', key)
api_opts.add('etcd-certfile', cert)
api_opts.add('etcd-servers', connection_string, strict=True)
def configure_master_services():
''' Add remaining flags for the master services and configure snaps to use
them '''
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
scheduler_opts = FlagManager('kube-scheduler')
scheduler_opts.add('v', '2')
# Get the tls paths from the layer data.
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
client_cert_path = layer_options.get('client_certificate_path')
client_key_path = layer_options.get('client_key_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
if is_privileged():
api_opts.add('allow-privileged', 'true', strict=True)
set_state('kubernetes-master.privileged')
else:
api_opts.add('allow-privileged', 'false', strict=True)
remove_state('kubernetes-master.privileged')
# Handle static options for now
api_opts.add('service-cluster-ip-range', service_cidr())
api_opts.add('min-request-timeout', '300')
api_opts.add('v', '4')
api_opts.add('tls-cert-file', server_cert_path)
api_opts.add('tls-private-key-file', server_key_path)
api_opts.add('kubelet-certificate-authority', ca_cert_path)
api_opts.add('kubelet-client-certificate', client_cert_path)
api_opts.add('kubelet-client-key', client_key_path)
api_opts.add('logtostderr', 'true')
api_opts.add('insecure-bind-address', '127.0.0.1')
api_opts.add('insecure-port', '8080')
api_opts.add('storage-backend', 'etcd2') # FIXME: add etcd3 support
admission_control = [
'Initializers',
'NamespaceLifecycle',
'LimitRanger',
'ServiceAccount',
'ResourceQuota',
'DefaultTolerationSeconds'
]
if get_version('kube-apiserver') < (1, 6):
hookenv.log('Removing DefaultTolerationSeconds from admission-control')
admission_control.remove('DefaultTolerationSeconds')
if get_version('kube-apiserver') < (1, 7):
hookenv.log('Removing Initializers from admission-control')
admission_control.remove('Initializers')
api_opts.add('admission-control', ','.join(admission_control), strict=True)
# Default to 3 minute resync. TODO: Make this configureable?
controller_opts.add('min-resync-period', '3m')
controller_opts.add('v', '2')
controller_opts.add('root-ca-file', ca_cert_path)
controller_opts.add('logtostderr', 'true')
controller_opts.add('master', 'http://127.0.0.1:8080')
scheduler_opts.add('v', '2')
scheduler_opts.add('logtostderr', 'true')
scheduler_opts.add('master', 'http://127.0.0.1:8080')
cmd = ['snap', 'set', 'kube-apiserver'] + api_opts.to_s().split(' ')
check_call(cmd)
cmd = (
['snap', 'set', 'kube-controller-manager'] +
controller_opts.to_s().split(' ')
)
check_call(cmd)
cmd = ['snap', 'set', 'kube-scheduler'] + scheduler_opts.to_s().split(' ')
check_call(cmd)
def setup_basic_auth(password=None, username='admin', uid='admin'):
'''Create the htacces file and the tokens.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
htaccess = os.path.join(root_cdk, 'basic_auth.csv')
if not password:
password = token_generator()
with open(htaccess, 'w') as stream:
stream.write('{0},{1},{2}'.format(password, username, uid))
def setup_tokens(token, username, user):
'''Create a token file for kubernetes authentication.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
known_tokens = os.path.join(root_cdk, 'known_tokens.csv')
if not token:
token = token_generator()
with open(known_tokens, 'a') as stream:
stream.write('{0},{1},{2}\n'.format(token, username, user))
def get_password(csv_fname, user):
'''Get the password of user within the csv file provided.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
return None
tokens_fname = os.path.join(root_cdk, csv_fname)
with open(tokens_fname, 'r') as stream:
for line in stream:
record = line.split(',')
if record[1] == user:
return record[0]
return None
def get_token(username):
"""Grab a token from the static file if present. """
return get_password('known_tokens.csv', username)
def set_token(password, save_salt):
''' Store a token so it can be recalled later by token_generator.
param: password - the password to be stored
param: save_salt - the key to store the value of the token.'''
db = unitdata.kv()
db.set(save_salt, password)
return db.get(save_salt)
def token_generator(length=32):
''' Generate a random token for use in passwords and account tokens.
param: length - the length of the token to generate'''
alpha = string.ascii_letters + string.digits
token = ''.join(random.SystemRandom().choice(alpha) for _ in range(length))
return token
@retry(times=3, delay_secs=10)
def all_kube_system_pods_running():
''' Check pod status in the kube-system namespace. Returns True if all
pods are running, False otherwise. '''
cmd = ['kubectl', 'get', 'po', '-n', 'kube-system', '-o', 'json']
try:
output = check_output(cmd).decode('utf-8')
except CalledProcessError:
hookenv.log('failed to get kube-system pod status')
return False
result = json.loads(output)
for pod in result['items']:
status = pod['status']['phase']
if status != 'Running':
return False
return True
def apiserverVersion():
cmd = 'kube-apiserver --version'.split()
version_string = check_output(cmd).decode('utf-8')
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
|
bgrant0607/kubernetes
|
cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py
|
Python
|
apache-2.0
| 36,298
|
[
"CDK"
] |
91255537c68f49129eb918da951d2e4bc4562ae180fbce5362b5cf6e1b4076bf
|
#!/usr/bin/env python
"""
Sensor handler for color ball detection
"""
from socket import *
import sys, struct, time, os
import array as pyarray
class sensorHandler:
def __init__(self, proj, shared_data):
"""
Opens socket, subscribes to multicast group, and calculates
local vs. Vicon clock offset (to ensure most recent data).
Hostnames and ports are read in from the robot description file.
"""
### Connect to Orca:
try:
self.COLOR_GROUP = proj.robot_data['ColorDetectionGroup'][0]
self.COLOR_PORT = int(proj.robot_data['ColorDetectionPort'][0])
except KeyError, ValueError:
print "(SENSOR) ERROR: Cannot find Orca network settings ('ColorDetectionGroup', 'ColorDetectionPort') in robot description file."
sys.exit(-1)
# Open up sockets
print '(SENSOR) Subscribing to Orca multicast stream...'
self.color_sock = socket(AF_INET, SOCK_DGRAM)
self.color_sock.bind(('', self.COLOR_PORT))
# Join group
group_bin = inet_pton(AF_INET, self.COLOR_GROUP)
mreq = group_bin + struct.pack('=I', INADDR_ANY)
self.color_sock.setsockopt(IPPROTO_IP, IP_ADD_MEMBERSHIP, mreq)
# Calculate clock offset
data = self.color_sock.recv(1500)
now = time.time()
packet_doubles = pyarray.array('d')
packet_doubles.fromstring(data)
time_stamp = packet_doubles[0] + (1e-6)*packet_doubles[1]
self.time_offset = time_stamp - now
print "(SENSOR) Detected time delay of %fsec." % self.time_offset
print "(SENSOR) OK! We've successfully connected."
self.last_update = 0
# Initialize off
# Index for ring buffer
self.sensorValues = [{'name': 'wolf',
'values': [0]*6,
'index': 0,
'range': [0.5, 1.5]},
{'name': 'sheep',
'values': [0]*6,
'index': 0,
'range': [1.5, 2.5]},
{'name': 'fire',
'values': [0]*6,
'index': 0,
'range': [2.5, 3.5]}]
def readFromOrca(self):
MIN_DELAY = 0.01 # seconds
#TODO: It would be nice if we could actually just flush the socket...
now = time.time() + self.time_offset
time_stamp = 0.0
while (now-time_stamp)>MIN_DELAY:
data = self.color_sock.recv(1500)
#print "Packet size: " + str(len(data))
packet_doubles = pyarray.array('d')
packet_doubles.fromstring(data)
time_stamp = packet_doubles[1] + (1e-6)*packet_doubles[2]
value = packet_doubles[0]
#print "t = " + str(time_stamp) + ", detecting " + str(value)
return value
def getSensorValue(self, sensor_name):
"""
Return a boolean value corresponding to the state of the sensor with name ``sensor_name``
If such a sensor does not exist, returns ``None``
"""
MIN_BLOB_PERIOD = 0.1
if sensor_name in ['fire', 'wolf', 'sheep']: # hazard is blue ball, wolf is red ball, fire is both ball
now = time.time()
if (now - self.last_update) > 0.1:
orca_val = self.readFromOrca()
for value in self.sensorValues:
value['values'][value['index']] = (orca_val > value['range'][0] and orca_val < value['range'][1])
value['index'] += 1
if value['index'] == len(value['values']):
value['index'] = 0
self.last_update = now
for value in self.sensorValues:
if sensor_name == value['name']:
return sum(value['values']) > 1
else:
print "WARNING: Sensor %s is unknown!" % sensor_name
return None
|
jadecastro/LTLMoP
|
src/lib/handlers/deprecated/sensor/colorBallSensor.py
|
Python
|
gpl-3.0
| 4,087
|
[
"ORCA"
] |
83ffcf1a3b2d84841ce6238c140c687d421536fcd0c57a863939fc0561d0b1e1
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os, sys
def fn_name(): return sys._getframe(1).f_code.co_name
try:
import gi
except ImportError:
raise ImportError("Gtk3 backend requires pygobject to be installed.")
try:
gi.require_version("Gtk", "3.0")
except AttributeError:
raise ImportError(
"pygobject version too old -- it must have require_version")
except ValueError:
raise ImportError(
"Gtk3 backend requires the GObject introspection bindings for Gtk 3 "
"to be installed.")
try:
from gi.repository import Gtk, Gdk, GObject, GLib
except ImportError:
raise ImportError("Gtk3 backend requires pygobject to be installed.")
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors, TimerBase
from matplotlib.backend_bases import (ShowBase, ToolContainerBase,
StatusbarBase)
from matplotlib.backend_managers import ToolManager
from matplotlib import backend_tools
from matplotlib.cbook import is_string_like, is_writable_file_like
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
from matplotlib import cbook, colors as mcolors, lines, verbose, rcParams
backend_version = "%s.%s.%s" % (Gtk.get_major_version(), Gtk.get_micro_version(), Gtk.get_minor_version())
_debug = False
#_debug = True
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 96
cursord = {
cursors.MOVE : Gdk.Cursor.new(Gdk.CursorType.FLEUR),
cursors.HAND : Gdk.Cursor.new(Gdk.CursorType.HAND2),
cursors.POINTER : Gdk.Cursor.new(Gdk.CursorType.LEFT_PTR),
cursors.SELECT_REGION : Gdk.Cursor.new(Gdk.CursorType.TCROSS),
}
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
class Show(ShowBase):
def mainloop(self):
if Gtk.main_level() == 0:
Gtk.main()
show = Show()
class TimerGTK3(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses GTK3 for timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def _timer_start(self):
# Need to stop it, otherwise we potentially leak a timer id that will
# never be stopped.
self._timer_stop()
self._timer = GLib.timeout_add(self._interval, self._on_timer)
def _timer_stop(self):
if self._timer is not None:
GLib.source_remove(self._timer)
self._timer = None
def _timer_set_interval(self):
# Only stop and restart it if the timer has already been started
if self._timer is not None:
self._timer_stop()
self._timer_start()
def _on_timer(self):
TimerBase._on_timer(self)
# Gtk timeout_add() requires that the callback returns True if it
# is to be called again.
if len(self.callbacks) > 0 and not self._single:
return True
else:
self._timer = None
return False
class FigureCanvasGTK3 (Gtk.DrawingArea, FigureCanvasBase):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
# Setting this as a static constant prevents
# this resulting expression from leaking
event_mask = (Gdk.EventMask.BUTTON_PRESS_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.EXPOSURE_MASK |
Gdk.EventMask.KEY_PRESS_MASK |
Gdk.EventMask.KEY_RELEASE_MASK |
Gdk.EventMask.ENTER_NOTIFY_MASK |
Gdk.EventMask.LEAVE_NOTIFY_MASK |
Gdk.EventMask.POINTER_MOTION_MASK |
Gdk.EventMask.POINTER_MOTION_HINT_MASK|
Gdk.EventMask.SCROLL_MASK)
def __init__(self, figure):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
FigureCanvasBase.__init__(self, figure)
GObject.GObject.__init__(self)
self._idle_draw_id = 0
self._need_redraw = True
self._lastCursor = None
self.connect('scroll_event', self.scroll_event)
self.connect('button_press_event', self.button_press_event)
self.connect('button_release_event', self.button_release_event)
self.connect('configure_event', self.configure_event)
self.connect('draw', self.on_draw_event)
self.connect('key_press_event', self.key_press_event)
self.connect('key_release_event', self.key_release_event)
self.connect('motion_notify_event', self.motion_notify_event)
self.connect('leave_notify_event', self.leave_notify_event)
self.connect('enter_notify_event', self.enter_notify_event)
self.connect('size_allocate', self.size_allocate)
self.set_events(self.__class__.event_mask)
self.set_double_buffered(True)
self.set_can_focus(True)
self._renderer_init()
default_context = GLib.main_context_get_thread_default() or GLib.main_context_default()
def destroy(self):
#Gtk.DrawingArea.destroy(self)
self.close_event()
if self._idle_draw_id != 0:
GLib.source_remove(self._idle_draw_id)
def scroll_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
if event.direction==Gdk.ScrollDirection.UP:
step = 1
else:
step = -1
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
return False # finish event propagation?
def button_press_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
FigureCanvasBase.button_press_event(self, x, y, event.button, guiEvent=event)
return False # finish event propagation?
def button_release_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
FigureCanvasBase.button_release_event(self, x, y, event.button, guiEvent=event)
return False # finish event propagation?
def key_press_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
key = self._get_key(event)
if _debug: print("hit", key)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
return True # stop event propagation
def key_release_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
key = self._get_key(event)
if _debug: print("release", key)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
return True # stop event propagation
def motion_notify_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
if event.is_hint:
t, x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.get_state()
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
return False # finish event propagation?
def leave_notify_event(self, widget, event):
FigureCanvasBase.leave_notify_event(self, event)
def enter_notify_event(self, widget, event):
FigureCanvasBase.enter_notify_event(self, event)
def size_allocate(self, widget, allocation):
if _debug:
print("FigureCanvasGTK3.%s" % fn_name())
print("size_allocate (%d x %d)" % (allocation.width, allocation.height))
dpival = self.figure.dpi
winch = allocation.width / dpival
hinch = allocation.height / dpival
self.figure.set_size_inches(winch, hinch, forward=False)
FigureCanvasBase.resize_event(self)
self.draw_idle()
def _get_key(self, event):
if event.keyval in self.keyvald:
key = self.keyvald[event.keyval]
elif event.keyval < 256:
key = chr(event.keyval)
else:
key = None
modifiers = [
(Gdk.ModifierType.MOD4_MASK, 'super'),
(Gdk.ModifierType.MOD1_MASK, 'alt'),
(Gdk.ModifierType.CONTROL_MASK, 'ctrl'),
]
for key_mask, prefix in modifiers:
if event.state & key_mask:
key = '{0}+{1}'.format(prefix, key)
return key
def configure_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
if widget.get_property("window") is None:
return
w, h = event.width, event.height
if w < 3 or h < 3:
return # empty fig
# resize the figure (in inches)
dpi = self.figure.dpi
self.figure.set_size_inches(w/dpi, h/dpi, forward=False)
self._need_redraw = True
return False # finish event propagation?
def on_draw_event(self, widget, ctx):
# to be overwritten by GTK3Agg or GTK3Cairo
pass
def draw(self):
self._need_redraw = True
if self.get_visible() and self.get_mapped():
self.queue_draw()
# do a synchronous draw (its less efficient than an async draw,
# but is required if/when animation is used)
self.get_property("window").process_updates (False)
def draw_idle(self):
if self._idle_draw_id != 0:
return
def idle_draw(*args):
try:
self.draw()
finally:
self._idle_draw_id = 0
return False
self._idle_draw_id = GLib.idle_add(idle_draw)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerGTK3(*args, **kwargs)
def flush_events(self):
Gdk.threads_enter()
while Gtk.events_pending():
Gtk.main_iteration()
Gdk.flush()
Gdk.threads_leave()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerGTK3(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The Gtk.Toolbar (gtk only)
vbox : The Gtk.VBox containing the canvas and toolbar (gtk only)
window : The Gtk.Window (gtk only)
"""
def __init__(self, canvas, num):
if _debug: print('FigureManagerGTK3.%s' % fn_name())
FigureManagerBase.__init__(self, canvas, num)
self.window = Gtk.Window()
self.set_window_title("Figure %d" % num)
try:
self.window.set_icon_from_file(window_icon)
except (SystemExit, KeyboardInterrupt):
# re-raise exit type Exceptions
raise
except:
# some versions of gtk throw a glib.GError but not
# all, so I am not sure how to catch it. I am unhappy
# doing a blanket catch here, but am not sure what a
# better way is - JDH
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
self.vbox = Gtk.Box()
self.vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
self.window.add(self.vbox)
self.vbox.show()
self.canvas.show()
self.vbox.pack_start(self.canvas, True, True, 0)
# calculate size for window
w = int (self.canvas.figure.bbox.width)
h = int (self.canvas.figure.bbox.height)
self.toolmanager = self._get_toolmanager()
self.toolbar = self._get_toolbar()
self.statusbar = None
def add_widget(child, expand, fill, padding):
child.show()
self.vbox.pack_end(child, False, False, 0)
size_request = child.size_request()
return size_request.height
if self.toolmanager:
backend_tools.add_tools_to_manager(self.toolmanager)
if self.toolbar:
backend_tools.add_tools_to_container(self.toolbar)
self.statusbar = StatusbarGTK3(self.toolmanager)
h += add_widget(self.statusbar, False, False, 0)
h += add_widget(Gtk.HSeparator(), False, False, 0)
if self.toolbar is not None:
self.toolbar.show()
h += add_widget(self.toolbar, False, False, 0)
self.window.set_default_size (w, h)
def destroy(*args):
Gcf.destroy(num)
self.window.connect("destroy", destroy)
self.window.connect("delete_event", destroy)
if matplotlib.is_interactive():
self.window.show()
self.canvas.draw_idle()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolmanager is not None:
pass
elif self.toolbar is not None:
self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.canvas.grab_focus()
def destroy(self, *args):
if _debug: print('FigureManagerGTK3.%s' % fn_name())
self.vbox.destroy()
self.window.destroy()
self.canvas.destroy()
if self.toolbar:
self.toolbar.destroy()
if Gcf.get_num_fig_managers()==0 and \
not matplotlib.is_interactive() and \
Gtk.main_level() >= 1:
Gtk.main_quit()
def show(self):
# show the figure window
self.window.show()
def full_screen_toggle (self):
self._full_screen_flag = not self._full_screen_flag
if self._full_screen_flag:
self.window.fullscreen()
else:
self.window.unfullscreen()
_full_screen_flag = False
def _get_toolbar(self):
# must be inited after the window, drawingArea and figure
# attrs are set
if rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2GTK3 (self.canvas, self.window)
elif rcParams['toolbar'] == 'toolmanager':
toolbar = ToolbarGTK3(self.toolmanager)
else:
toolbar = None
return toolbar
def _get_toolmanager(self):
# must be initialised after toolbar has been setted
if rcParams['toolbar'] == 'toolmanager':
toolmanager = ToolManager(self.canvas)
else:
toolmanager = None
return toolmanager
def get_window_title(self):
return self.window.get_title()
def set_window_title(self, title):
self.window.set_title(title)
def resize(self, width, height):
'set the canvas size in pixels'
#_, _, cw, ch = self.canvas.allocation
#_, _, ww, wh = self.window.allocation
#self.window.resize (width-cw+ww, height-ch+wh)
self.window.resize(width, height)
class NavigationToolbar2GTK3(NavigationToolbar2, Gtk.Toolbar):
def __init__(self, canvas, window):
self.win = window
GObject.GObject.__init__(self)
NavigationToolbar2.__init__(self, canvas)
self.ctx = None
def set_message(self, s):
self.message.set_label(s)
def set_cursor(self, cursor):
self.canvas.get_property("window").set_cursor(cursord[cursor])
#self.canvas.set_cursor(cursord[cursor])
def release(self, event):
try: del self._pixmapBack
except AttributeError: pass
def dynamic_update(self):
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
self.ctx = self.canvas.get_property("window").cairo_create()
# todo: instead of redrawing the entire figure, copy the part of
# the figure that was covered by the previous rubberband rectangle
self.canvas.draw()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val) for val in (min(x0,x1), min(y0, y1), w, h)]
self.ctx.new_path()
self.ctx.set_line_width(0.5)
self.ctx.rectangle(rect[0], rect[1], rect[2], rect[3])
self.ctx.set_source_rgb(0, 0, 0)
self.ctx.stroke()
def _init_toolbar(self):
self.set_style(Gtk.ToolbarStyle.ICONS)
basedir = os.path.join(rcParams['datapath'],'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.insert( Gtk.SeparatorToolItem(), -1 )
continue
fname = os.path.join(basedir, image_file + '.png')
image = Gtk.Image()
image.set_from_file(fname)
tbutton = Gtk.ToolButton()
tbutton.set_label(text)
tbutton.set_icon_widget(image)
self.insert(tbutton, -1)
tbutton.connect('clicked', getattr(self, callback))
tbutton.set_tooltip_text(tooltip_text)
toolitem = Gtk.SeparatorToolItem()
self.insert(toolitem, -1)
toolitem.set_draw(False)
toolitem.set_expand(True)
toolitem = Gtk.ToolItem()
self.insert(toolitem, -1)
self.message = Gtk.Label()
toolitem.add(self.message)
self.show_all()
def get_filechooser(self):
fc = FileChooserDialog(
title='Save the figure',
parent=self.win,
path=os.path.expanduser(rcParams.get('savefig.directory', '')),
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
fc.set_current_name(self.canvas.get_default_filename())
return fc
def save_figure(self, *args):
chooser = self.get_filechooser()
fname, format = chooser.get_filename_from_user()
chooser.destroy()
if fname:
startpath = os.path.expanduser(rcParams.get('savefig.directory', ''))
if startpath == '':
# explicitly missing key or empty str signals to use cwd
rcParams['savefig.directory'] = startpath
else:
# save dir for next time
rcParams['savefig.directory'] = os.path.dirname(six.text_type(fname))
try:
self.canvas.print_figure(fname, format=format)
except Exception as e:
error_msg_gtk(str(e), parent=self)
def configure_subplots(self, button):
toolfig = Figure(figsize=(6,3))
canvas = self._get_canvas(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
w = int (toolfig.bbox.width)
h = int (toolfig.bbox.height)
window = Gtk.Window()
try:
window.set_icon_from_file(window_icon)
except (SystemExit, KeyboardInterrupt):
# re-raise exit type Exceptions
raise
except:
# we presumably already logged a message on the
# failure of the main plot, don't keep reporting
pass
window.set_title("Subplot Configuration Tool")
window.set_default_size(w, h)
vbox = Gtk.Box()
vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
window.add(vbox)
vbox.show()
canvas.show()
vbox.pack_start(canvas, True, True, 0)
window.show()
def _get_canvas(self, fig):
return self.canvas.__class__(fig)
class FileChooserDialog(Gtk.FileChooserDialog):
"""GTK+ file selector which remembers the last file/directory
selected and presents the user with a menu of supported image formats
"""
def __init__ (self,
title = 'Save file',
parent = None,
action = Gtk.FileChooserAction.SAVE,
buttons = (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK),
path = None,
filetypes = [],
default_filetype = None
):
super (FileChooserDialog, self).__init__ (title, parent, action,
buttons)
self.set_default_response (Gtk.ResponseType.OK)
if not path: path = os.getcwd() + os.sep
# create an extra widget to list supported image formats
self.set_current_folder (path)
self.set_current_name ('image.' + default_filetype)
hbox = Gtk.Box(spacing=10)
hbox.pack_start(Gtk.Label(label="File Format:"), False, False, 0)
liststore = Gtk.ListStore(GObject.TYPE_STRING)
cbox = Gtk.ComboBox() #liststore)
cbox.set_model(liststore)
cell = Gtk.CellRendererText()
cbox.pack_start(cell, True)
cbox.add_attribute(cell, 'text', 0)
hbox.pack_start(cbox, False, False, 0)
self.filetypes = filetypes
self.sorted_filetypes = list(six.iteritems(filetypes))
self.sorted_filetypes.sort()
default = 0
for i, (ext, name) in enumerate(self.sorted_filetypes):
liststore.append(["%s (*.%s)" % (name, ext)])
if ext == default_filetype:
default = i
cbox.set_active(default)
self.ext = default_filetype
def cb_cbox_changed (cbox, data=None):
"""File extension changed"""
head, filename = os.path.split(self.get_filename())
root, ext = os.path.splitext(filename)
ext = ext[1:]
new_ext = self.sorted_filetypes[cbox.get_active()][0]
self.ext = new_ext
if ext in self.filetypes:
filename = root + '.' + new_ext
elif ext == '':
filename = filename.rstrip('.') + '.' + new_ext
self.set_current_name (filename)
cbox.connect ("changed", cb_cbox_changed)
hbox.show_all()
self.set_extra_widget(hbox)
def get_filename_from_user (self):
while True:
filename = None
if self.run() != int(Gtk.ResponseType.OK):
break
filename = self.get_filename()
break
return filename, self.ext
class RubberbandGTK3(backend_tools.RubberbandBase):
def __init__(self, *args, **kwargs):
backend_tools.RubberbandBase.__init__(self, *args, **kwargs)
self.ctx = None
def draw_rubberband(self, x0, y0, x1, y1):
# 'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/
# Recipe/189744'
self.ctx = self.figure.canvas.get_property("window").cairo_create()
# todo: instead of redrawing the entire figure, copy the part of
# the figure that was covered by the previous rubberband rectangle
self.figure.canvas.draw()
height = self.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val) for val in (min(x0, x1), min(y0, y1), w, h)]
self.ctx.new_path()
self.ctx.set_line_width(0.5)
self.ctx.rectangle(rect[0], rect[1], rect[2], rect[3])
self.ctx.set_source_rgb(0, 0, 0)
self.ctx.stroke()
class ToolbarGTK3(ToolContainerBase, Gtk.Box):
def __init__(self, toolmanager):
ToolContainerBase.__init__(self, toolmanager)
Gtk.Box.__init__(self)
self.set_property("orientation", Gtk.Orientation.VERTICAL)
self._toolarea = Gtk.Box()
self._toolarea.set_property('orientation', Gtk.Orientation.HORIZONTAL)
self.pack_start(self._toolarea, False, False, 0)
self._toolarea.show_all()
self._groups = {}
self._toolitems = {}
def add_toolitem(self, name, group, position, image_file, description,
toggle):
if toggle:
tbutton = Gtk.ToggleToolButton()
else:
tbutton = Gtk.ToolButton()
tbutton.set_label(name)
if image_file is not None:
image = Gtk.Image()
image.set_from_file(image_file)
tbutton.set_icon_widget(image)
if position is None:
position = -1
self._add_button(tbutton, group, position)
signal = tbutton.connect('clicked', self._call_tool, name)
tbutton.set_tooltip_text(description)
tbutton.show_all()
self._toolitems.setdefault(name, [])
self._toolitems[name].append((tbutton, signal))
def _add_button(self, button, group, position):
if group not in self._groups:
if self._groups:
self._add_separator()
toolbar = Gtk.Toolbar()
toolbar.set_style(Gtk.ToolbarStyle.ICONS)
self._toolarea.pack_start(toolbar, False, False, 0)
toolbar.show_all()
self._groups[group] = toolbar
self._groups[group].insert(button, position)
def _call_tool(self, btn, name):
self.trigger_tool(name)
def toggle_toolitem(self, name, toggled):
if name not in self._toolitems:
return
for toolitem, signal in self._toolitems[name]:
toolitem.handler_block(signal)
toolitem.set_active(toggled)
toolitem.handler_unblock(signal)
def remove_toolitem(self, name):
if name not in self._toolitems:
self.toolmanager.message_event('%s Not in toolbar' % name, self)
return
for group in self._groups:
for toolitem, _signal in self._toolitems[name]:
if toolitem in self._groups[group]:
self._groups[group].remove(toolitem)
del self._toolitems[name]
def _add_separator(self):
sep = Gtk.Separator()
sep.set_property("orientation", Gtk.Orientation.VERTICAL)
self._toolarea.pack_start(sep, False, True, 0)
sep.show_all()
class StatusbarGTK3(StatusbarBase, Gtk.Statusbar):
def __init__(self, *args, **kwargs):
StatusbarBase.__init__(self, *args, **kwargs)
Gtk.Statusbar.__init__(self)
self._context = self.get_context_id('message')
def set_message(self, s):
self.pop(self._context)
self.push(self._context, s)
class SaveFigureGTK3(backend_tools.SaveFigureBase):
def get_filechooser(self):
fc = FileChooserDialog(
title='Save the figure',
parent=self.figure.canvas.manager.window,
path=os.path.expanduser(rcParams.get('savefig.directory', '')),
filetypes=self.figure.canvas.get_supported_filetypes(),
default_filetype=self.figure.canvas.get_default_filetype())
fc.set_current_name(self.figure.canvas.get_default_filename())
return fc
def trigger(self, *args, **kwargs):
chooser = self.get_filechooser()
fname, format_ = chooser.get_filename_from_user()
chooser.destroy()
if fname:
startpath = os.path.expanduser(
rcParams.get('savefig.directory', ''))
if startpath == '':
# explicitly missing key or empty str signals to use cwd
rcParams['savefig.directory'] = startpath
else:
# save dir for next time
rcParams['savefig.directory'] = os.path.dirname(
six.text_type(fname))
try:
self.figure.canvas.print_figure(fname, format=format_)
except Exception as e:
error_msg_gtk(str(e), parent=self)
class SetCursorGTK3(backend_tools.SetCursorBase):
def set_cursor(self, cursor):
self.figure.canvas.get_property("window").set_cursor(cursord[cursor])
class ConfigureSubplotsGTK3(backend_tools.ConfigureSubplotsBase, Gtk.Window):
def __init__(self, *args, **kwargs):
backend_tools.ConfigureSubplotsBase.__init__(self, *args, **kwargs)
self.window = None
def init_window(self):
if self.window:
return
self.window = Gtk.Window(title="Subplot Configuration Tool")
try:
self.window.window.set_icon_from_file(window_icon)
except (SystemExit, KeyboardInterrupt):
# re-raise exit type Exceptions
raise
except:
# we presumably already logged a message on the
# failure of the main plot, don't keep reporting
pass
self.vbox = Gtk.Box()
self.vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
self.window.add(self.vbox)
self.vbox.show()
self.window.connect('destroy', self.destroy)
toolfig = Figure(figsize=(6, 3))
canvas = self.figure.canvas.__class__(toolfig)
toolfig.subplots_adjust(top=0.9)
SubplotTool(self.figure, toolfig)
w = int(toolfig.bbox.width)
h = int(toolfig.bbox.height)
self.window.set_default_size(w, h)
canvas.show()
self.vbox.pack_start(canvas, True, True, 0)
self.window.show()
def destroy(self, *args):
self.window.destroy()
self.window = None
def _get_canvas(self, fig):
return self.canvas.__class__(fig)
def trigger(self, sender, event, data=None):
self.init_window()
self.window.present()
# Define the file to use as the GTk icon
if sys.platform == 'win32':
icon_filename = 'matplotlib.png'
else:
icon_filename = 'matplotlib.svg'
window_icon = os.path.join(matplotlib.rcParams['datapath'], 'images', icon_filename)
def error_msg_gtk(msg, parent=None):
if parent is not None: # find the toplevel Gtk.Window
parent = parent.get_toplevel()
if not parent.is_toplevel():
parent = None
if not is_string_like(msg):
msg = ','.join(map(str,msg))
dialog = Gtk.MessageDialog(
parent = parent,
type = Gtk.MessageType.ERROR,
buttons = Gtk.ButtonsType.OK,
message_format = msg)
dialog.run()
dialog.destroy()
backend_tools.ToolSaveFigure = SaveFigureGTK3
backend_tools.ToolConfigureSubplots = ConfigureSubplotsGTK3
backend_tools.ToolSetCursor = SetCursorGTK3
backend_tools.ToolRubberband = RubberbandGTK3
Toolbar = ToolbarGTK3
FigureCanvas = FigureCanvasGTK3
FigureManager = FigureManagerGTK3
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/matplotlib/backends/backend_gtk3.py
|
Python
|
bsd-2-clause
| 33,857
|
[
"FLEUR"
] |
21868f896a7d119d3f04b0dace2e44da0f949baf08d3e958ae132b3f5d34bba4
|
""" Data Dictionary CSV Validator
@author: Victor Meyerson
"""
import os
import csv
# Header columns for data dictionary
FIELD_NAME = "Variable / Field Name"
FORM = "Form Name"
FIELD_TYPE = "Field Type"
FIELD_LABEL = "Field Label"
CHOICES = "Choices, Calculations, OR Slider Labels"
TEXT_TYPE = "Text Validation Type OR Show Slider Number"
TEXT_MIN = "Text Validation Min"
TEXT_MAX = "Text Validation Max"
HEADERS = [FIELD_NAME, FORM, FIELD_TYPE, FIELD_LABEL, CHOICES, TEXT_TYPE,
TEXT_MIN, TEXT_MAX]
class Validator(object):
"""Performs validation of a REDCap Data Dictionary.
The validation outputs a summary of the missing information from a data
dictionary that is recommended to complete for conversion to RDF.
"""
def __init__(self):
# clear internal data structures
self._warnings = {}
self._errors = {}
self.verbose = False
def process(self, dd, first_rows):
"""Runs the validation process.
Args:
dd (str): Path to the data dictionary.
first_rows (list): Variable names that should start the dictionary
(e.g. subject, arm, visit)
Returns:
None
"""
if not os.path.isfile(dd):
raise(IOError("{} file not found".format(dd)))
if self.verbose:
print("Processing: {}".format(dd))
if first_rows and self.verbose:
print("Running extra check for first rows")
with open(dd) as f:
reader = csv.DictReader(f)
# check headers
self._check_headers(reader.fieldnames)
# check each row
tmp_counter = 0
for row in reader:
self._check_row(row, reader.line_num)
if tmp_counter < len(first_rows):
if row[FIELD_NAME] != first_rows[tmp_counter]:
message = "field should be '{}' found '{}'"
msg = message.format(first_rows[tmp_counter],
row[FIELD_NAME])
self._append_error(row[FIELD_NAME], msg)
tmp_counter += 1
if self.verbose:
self._print_summary()
def enable_verbose(self):
"""Sets Verbose printing to True.
Returns:
None
"""
self.verbose = True
@property
def errors(self):
"""Get reported errors.
Returns:
A dict of errors.
"""
return self._errors
@property
def warnings(self):
"""Get reported warnings.
Returns:
A dict of warnings.
"""
return self._warnings
# check functions
def _check_headers(self, headers):
for field in HEADERS:
if field not in headers:
msg = "Could not find: '{}' in the header".format(field)
self._append_error("HEADERS", msg)
def _check_row(self, row, number):
message = "form: {}, field: {}, value type: {}, line: {}"
if self.verbose:
print(message.format(row[FORM], row[FIELD_NAME],
row[FIELD_TYPE], number))
self._check_label_exists(row)
self._check_value_type(row)
def _check_label_exists(self, row):
label = row[FIELD_LABEL]
if not label:
msg = "No label is present."
self._append_warning(row[FIELD_NAME], msg)
def _check_value_type(self, row):
if row[FIELD_TYPE] == "dropdown":
self._validate_dropdown(row[FIELD_NAME], row[CHOICES])
elif row[FIELD_TYPE] == "yesno":
self._validate_yes_no(row[FIELD_NAME], row)
elif row[FIELD_TYPE] == "text":
self._validate_text(row[FIELD_NAME], row)
else:
val_type = row[FIELD_TYPE]
msg = "Skipping validation of type: '{}'".format(val_type)
self._append_warning(row[FIELD_NAME], msg)
# validate various field type functions
def _validate_dropdown(self, field, choices_str):
choices = choices_str.split('|')
if not choices[0]:
msg = "There should be at least one choice."
self._append_error(field, msg)
for choice in choices:
breakdown = choice.split(",")
if len(breakdown) != 2:
msg = "This is an invalid choice: {}".format(choice)
self._append_error(field, msg)
def _validate_yes_no(self, field, row):
if len(row[CHOICES]) > 0:
msg = "YesNo field should not have choices"
self._append_error(field, msg)
def _validate_text(self, field, row):
if self.verbose:
print(" Item: {} is a '{}'".format(row[FIELD_NAME],
row[TEXT_TYPE]))
if row[TEXT_TYPE] == "number":
self._validate_numeric_range(field, row[TEXT_MIN], row[TEXT_MAX])
else:
msg = "No validation rules for type: '{}'".format(row[TEXT_TYPE])
self._append_warning(field, msg)
def _validate_numeric_range(self, field, low_str, high_str):
if self.verbose:
print(" Range: [{},{}]".format(low_str, high_str))
# Parse into None or float.
if low_str:
low = float(low_str)
else:
low = None
if high_str:
high = float(high_str)
else:
high = None
# Check for range issues.
if type(high) is float and type(low) is float:
if high < low:
msg = "Max value ({}) should not be less than min value ({})."
self._append_error(field, msg.format(high_str, low_str))
elif type(high) is float:
if not low:
msg = "No minimum value set."
self._append_warning(field, msg)
elif type(low) is float:
if not high:
msg = "No maximum value set."
self._append_warning(field, msg)
else:
msg = "No maximum or minimum value set."
self._append_warning(field, msg)
# accumulate messages
def _append_error(self, key, msg):
if key not in self._errors:
self._errors[key] = [msg]
else:
self._errors[key].append(msg)
def _append_warning(self, key, msg):
if key not in self._warnings:
self._warnings[key] = [msg]
else:
self._warnings[key].append(msg)
# print helpers
def _print_summary(self):
print("SUMMARY")
print("-------")
message = "There are {} error(s)."
print(message.format(len(self._errors)))
self._print_details(self._errors)
print("")
message = "There are {} warning(s)."
print(message.format(len(self._warnings)))
self._print_details(self._warnings)
def _print_details(self, ds):
for field, msgs in ds.items():
print("Field: '{}'".format(field))
for msg in msgs:
print(" {}".format(msg))
def csv_to_list(arg):
return map(str, arg.split(','))
|
victorSRI/redcap_rdf
|
redcap_rdf/datadict_validator.py
|
Python
|
bsd-3-clause
| 7,209
|
[
"VisIt"
] |
ae00434ddd60f4575198d2b27e7e4045f0520d4e559d7b899d8ea20a1892f0d1
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# Giorgio Patrini <giorgio.patrini@anu.edu.au>
# Eric Chang <ericchang2017@u.northwestern.edu>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from scipy import stats
from scipy import optimize
from scipy.special import boxcox
from ..base import (
BaseEstimator,
TransformerMixin,
_OneToOneFeatureMixin,
_ClassNamePrefixFeaturesOutMixin,
)
from ..utils import check_array
from ..utils.extmath import _incremental_mean_and_var, row_norms
from ..utils.sparsefuncs_fast import (
inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2,
)
from ..utils.sparsefuncs import (
inplace_column_scale,
mean_variance_axis,
incr_mean_variance_axis,
min_max_axis,
)
from ..utils.validation import (
check_is_fitted,
check_random_state,
_check_sample_weight,
FLOAT_DTYPES,
)
from ._encoders import OneHotEncoder
BOUNDS_THRESHOLD = 1e-7
__all__ = [
"Binarizer",
"KernelCenterer",
"MinMaxScaler",
"MaxAbsScaler",
"Normalizer",
"OneHotEncoder",
"RobustScaler",
"StandardScaler",
"QuantileTransformer",
"PowerTransformer",
"add_dummy_feature",
"binarize",
"normalize",
"scale",
"robust_scale",
"maxabs_scale",
"minmax_scale",
"quantile_transform",
"power_transform",
]
def _is_constant_feature(var, mean, n_samples):
"""Detect if a feature is indistinguishable from a constant feature.
The detection is based on its computed variance and on the theoretical
error bounds of the '2 pass algorithm' for variance computation.
See "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
"""
# In scikit-learn, variance is always computed using float64 accumulators.
eps = np.finfo(np.float64).eps
upper_bound = n_samples * eps * var + (n_samples * mean * eps) ** 2
return var <= upper_bound
def _handle_zeros_in_scale(scale, copy=True, constant_mask=None):
"""Set scales of near constant features to 1.
The goal is to avoid division by very small or zero values.
Near constant features are detected automatically by identifying
scales close to machine precision unless they are precomputed by
the caller and passed with the `constant_mask` kwarg.
Typically for standard scaling, the scales are the standard
deviation while near constant features are better detected on the
computed variances which are closer to machine precision by
construction.
"""
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0.0:
scale = 1.0
return scale
elif isinstance(scale, np.ndarray):
if constant_mask is None:
# Detect near constant values to avoid dividing by a very small
# value that could lead to surprising results and numerical
# stability issues.
constant_mask = scale < 10 * np.finfo(scale.dtype).eps
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[constant_mask] = 1.0
return scale
def scale(X, *, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis.
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to center and scale.
axis : int, default=0
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : bool, default=True
If True, center the data before scaling.
with_std : bool, default=True
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : bool, default=True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
The transformed data.
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
NaNs are treated as missing values: disregarded to compute the statistics,
and maintained during the data transformation.
We use a biased estimator for the standard deviation, equivalent to
`numpy.std(x, ddof=0)`. Note that the choice of `ddof` is unlikely to
affect model performance.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
.. warning:: Risk of data leak
Do not use :func:`~sklearn.preprocessing.scale` unless you know
what you are doing. A common mistake is to apply it to the entire data
*before* splitting into training and test sets. This will bias the
model evaluation because information would have leaked from the test
set to the training set.
In general, we recommend using
:class:`~sklearn.preprocessing.StandardScaler` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking: `pipe = make_pipeline(StandardScaler(), LogisticRegression())`.
See Also
--------
StandardScaler : Performs scaling to unit variance using the Transformer
API (e.g. as part of a preprocessing
:class:`~sklearn.pipeline.Pipeline`).
""" # noqa
X = check_array(
X,
accept_sparse="csc",
copy=copy,
ensure_2d=False,
estimator="the scale function",
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives."
)
if axis != 0:
raise ValueError(
"Can only scale sparse matrix on axis=0, got axis=%d" % axis
)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.nanmean(X, axis)
if with_std:
scale_ = np.nanstd(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = np.nanmean(Xr, axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn(
"Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features."
)
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = np.nanmean(Xr, axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn(
"Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. "
)
Xr -= mean_2
return X
class MinMaxScaler(_OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
"""Transform features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, e.g. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : bool, default=True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
clip : bool, default=False
Set to True to clip transformed values of held-out data to
provided `feature range`.
.. versionadded:: 0.24
Attributes
----------
min_ : ndarray of shape (n_features,)
Per feature adjustment for minimum. Equivalent to
``min - X.min(axis=0) * self.scale_``
scale_ : ndarray of shape (n_features,)
Per feature relative scaling of the data. Equivalent to
``(max - min) / (X.max(axis=0) - X.min(axis=0))``
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray of shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_*
data_max_ : ndarray of shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_*
data_range_ : ndarray of shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_*
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
n_samples_seen_ : int
The number of samples processed by the estimator.
It will be reset on new calls to fit, but increments across
``partial_fit`` calls.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
minmax_scale : Equivalent function without the estimator API.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
Examples
--------
>>> from sklearn.preprocessing import MinMaxScaler
>>> data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]
>>> scaler = MinMaxScaler()
>>> print(scaler.fit(data))
MinMaxScaler()
>>> print(scaler.data_max_)
[ 1. 18.]
>>> print(scaler.transform(data))
[[0. 0. ]
[0.25 0.25]
[0.5 0.5 ]
[1. 1. ]]
>>> print(scaler.transform([[2, 2]]))
[[1.5 0. ]]
"""
def __init__(self, feature_range=(0, 1), *, copy=True, clip=False):
self.feature_range = feature_range
self.copy = copy
self.clip = clip
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, because they are all set together
# in partial_fit
if hasattr(self, "scale_"):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
y : None
Ignored.
Returns
-------
self : object
Fitted scaler.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when :meth:`fit` is not feasible due to very large number of
`n_samples` or because X is read from a continuous stream.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : None
Ignored.
Returns
-------
self : object
Fitted scaler.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError(
"Minimum of desired feature range must be smaller than maximum. Got %s."
% str(feature_range)
)
if sparse.issparse(X):
raise TypeError(
"MinMaxScaler does not support sparse input. "
"Consider using MaxAbsScaler instead."
)
first_pass = not hasattr(self, "n_samples_seen_")
X = self._validate_data(
X,
reset=first_pass,
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
data_min = np.nanmin(X, axis=0)
data_max = np.nanmax(X, axis=0)
if first_pass:
self.n_samples_seen_ = X.shape[0]
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = (feature_range[1] - feature_range[0]) / _handle_zeros_in_scale(
data_range, copy=True
)
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scale features of X according to feature_range.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data that will be transformed.
Returns
-------
Xt : ndarray of shape (n_samples, n_features)
Transformed data.
"""
check_is_fitted(self)
X = self._validate_data(
X,
copy=self.copy,
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
reset=False,
)
X *= self.scale_
X += self.min_
if self.clip:
np.clip(X, self.feature_range[0], self.feature_range[1], out=X)
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data that will be transformed. It cannot be sparse.
Returns
-------
Xt : ndarray of shape (n_samples, n_features)
Transformed data.
"""
check_is_fitted(self)
X = check_array(
X, copy=self.copy, dtype=FLOAT_DTYPES, force_all_finite="allow-nan"
)
X -= self.min_
X /= self.scale_
return X
def _more_tags(self):
return {"allow_nan": True}
def minmax_scale(X, feature_range=(0, 1), *, axis=0, copy=True):
"""Transform features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by (when ``axis=0``)::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
The transformation is calculated as (when ``axis=0``)::
X_scaled = scale * X + min - X.min(axis=0) * scale
where scale = (max - min) / (X.max(axis=0) - X.min(axis=0))
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface
to :class:`~sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int, default=0
Axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : bool, default=True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Returns
-------
X_tr : ndarray of shape (n_samples, n_features)
The transformed data.
.. warning:: Risk of data leak
Do not use :func:`~sklearn.preprocessing.minmax_scale` unless you know
what you are doing. A common mistake is to apply it to the entire data
*before* splitting into training and test sets. This will bias the
model evaluation because information would have leaked from the test
set to the training set.
In general, we recommend using
:class:`~sklearn.preprocessing.MinMaxScaler` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking: `pipe = make_pipeline(MinMaxScaler(), LogisticRegression())`.
See Also
--------
MinMaxScaler : Performs scaling to a given range using the Transformer
API (e.g. as part of a preprocessing
:class:`~sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(
X, copy=False, ensure_2d=False, dtype=FLOAT_DTYPES, force_all_finite="allow-nan"
)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(_OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
"""Standardize features by removing the mean and scaling to unit variance.
The standard score of a sample `x` is calculated as:
z = (x - u) / s
where `u` is the mean of the training samples or zero if `with_mean=False`,
and `s` is the standard deviation of the training samples or one if
`with_std=False`.
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using
:meth:`transform`.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual features do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
than others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
copy : bool, default=True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
with_mean : bool, default=True
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : bool, default=True
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
Attributes
----------
scale_ : ndarray of shape (n_features,) or None
Per feature relative scaling of the data to achieve zero mean and unit
variance. Generally this is calculated using `np.sqrt(var_)`. If a
variance is zero, we can't achieve unit variance, and the data is left
as-is, giving a scaling factor of 1. `scale_` is equal to `None`
when `with_std=False`.
.. versionadded:: 0.17
*scale_*
mean_ : ndarray of shape (n_features,) or None
The mean value for each feature in the training set.
Equal to ``None`` when ``with_mean=False``.
var_ : ndarray of shape (n_features,) or None
The variance for each feature in the training set. Used to compute
`scale_`. Equal to ``None`` when ``with_std=False``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_samples_seen_ : int or ndarray of shape (n_features,)
The number of samples processed by the estimator for each feature.
If there are no missing samples, the ``n_samples_seen`` will be an
integer, otherwise it will be an array of dtype int. If
`sample_weights` are used it will be a float (if no missing data)
or an array of dtype float that sums the weights seen so far.
Will be reset on new calls to fit, but increments across
``partial_fit`` calls.
See Also
--------
scale : Equivalent function without the estimator API.
:class:`~sklearn.decomposition.PCA` : Further removes the linear
correlation across features with 'whiten=True'.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
We use a biased estimator for the standard deviation, equivalent to
`numpy.std(x, ddof=0)`. Note that the choice of `ddof` is unlikely to
affect model performance.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
Examples
--------
>>> from sklearn.preprocessing import StandardScaler
>>> data = [[0, 0], [0, 0], [1, 1], [1, 1]]
>>> scaler = StandardScaler()
>>> print(scaler.fit(data))
StandardScaler()
>>> print(scaler.mean_)
[0.5 0.5]
>>> print(scaler.transform(data))
[[-1. -1.]
[-1. -1.]
[ 1. 1.]
[ 1. 1.]]
>>> print(scaler.transform([[2, 2]]))
[[3. 3.]]
"""
def __init__(self, *, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, because they are all set together
# in partial_fit
if hasattr(self, "scale_"):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None, sample_weight=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : None
Ignored.
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.24
parameter *sample_weight* support to StandardScaler.
Returns
-------
self : object
Fitted scaler.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y, sample_weight)
def partial_fit(self, X, y=None, sample_weight=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when :meth:`fit` is not feasible due to very large number of
`n_samples` or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : None
Ignored.
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.24
parameter *sample_weight* support to StandardScaler.
Returns
-------
self : object
Fitted scaler.
"""
first_call = not hasattr(self, "n_samples_seen_")
X = self._validate_data(
X,
accept_sparse=("csr", "csc"),
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
reset=first_call,
)
n_features = X.shape[1]
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
# if n_samples_seen_ is an integer (i.e. no missing values), we need to
# transform it to a NumPy array of shape (n_features,) required by
# incr_mean_variance_axis and _incremental_variance_axis
dtype = np.int64 if sample_weight is None else X.dtype
if not hasattr(self, "n_samples_seen_"):
self.n_samples_seen_ = np.zeros(n_features, dtype=dtype)
elif np.size(self.n_samples_seen_) == 1:
self.n_samples_seen_ = np.repeat(self.n_samples_seen_, X.shape[1])
self.n_samples_seen_ = self.n_samples_seen_.astype(dtype, copy=False)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives."
)
sparse_constructor = (
sparse.csr_matrix if X.format == "csr" else sparse.csc_matrix
)
if self.with_std:
# First pass
if not hasattr(self, "scale_"):
self.mean_, self.var_, self.n_samples_seen_ = mean_variance_axis(
X, axis=0, weights=sample_weight, return_sum_weights=True
)
# Next passes
else:
(
self.mean_,
self.var_,
self.n_samples_seen_,
) = incr_mean_variance_axis(
X,
axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_,
weights=sample_weight,
)
# We force the mean and variance to float64 for large arrays
# See https://github.com/scikit-learn/scikit-learn/pull/12338
self.mean_ = self.mean_.astype(np.float64, copy=False)
self.var_ = self.var_.astype(np.float64, copy=False)
else:
self.mean_ = None # as with_mean must be False for sparse
self.var_ = None
weights = _check_sample_weight(sample_weight, X)
sum_weights_nan = weights @ sparse_constructor(
(np.isnan(X.data), X.indices, X.indptr), shape=X.shape
)
self.n_samples_seen_ += (np.sum(weights) - sum_weights_nan).astype(
dtype
)
else:
# First pass
if not hasattr(self, "scale_"):
self.mean_ = 0.0
if self.with_std:
self.var_ = 0.0
else:
self.var_ = None
if not self.with_mean and not self.with_std:
self.mean_ = None
self.var_ = None
self.n_samples_seen_ += X.shape[0] - np.isnan(X).sum(axis=0)
else:
self.mean_, self.var_, self.n_samples_seen_ = _incremental_mean_and_var(
X,
self.mean_,
self.var_,
self.n_samples_seen_,
sample_weight=sample_weight,
)
# for backward-compatibility, reduce n_samples_seen_ to an integer
# if the number of samples is the same for each feature (i.e. no
# missing values)
if np.ptp(self.n_samples_seen_) == 0:
self.n_samples_seen_ = self.n_samples_seen_[0]
if self.with_std:
# Extract the list of near constant features on the raw variances,
# before taking the square root.
constant_mask = _is_constant_feature(
self.var_, self.mean_, self.n_samples_seen_
)
self.scale_ = _handle_zeros_in_scale(
np.sqrt(self.var_), copy=False, constant_mask=constant_mask
)
else:
self.scale_ = None
return self
def transform(self, X, copy=None):
"""Perform standardization by centering and scaling.
Parameters
----------
X : {array-like, sparse matrix of shape (n_samples, n_features)
The data used to scale along the features axis.
copy : bool, default=None
Copy the input X or not.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
check_is_fitted(self)
copy = copy if copy is not None else self.copy
X = self._validate_data(
X,
reset=False,
accept_sparse="csr",
copy=copy,
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives."
)
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to scale along the features axis.
copy : bool, default=None
Copy the input X or not.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
check_is_fitted(self)
copy = copy if copy is not None else self.copy
X = check_array(
X,
accept_sparse="csr",
copy=copy,
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives."
)
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
def _more_tags(self):
return {"allow_nan": True, "preserves_dtype": [np.float64, np.float32]}
class MaxAbsScaler(_OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : bool, default=True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray of shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray of shape (n_features,)
Per feature maximum absolute value.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See Also
--------
maxabs_scale : Equivalent function without the estimator API.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
Examples
--------
>>> from sklearn.preprocessing import MaxAbsScaler
>>> X = [[ 1., -1., 2.],
... [ 2., 0., 0.],
... [ 0., 1., -1.]]
>>> transformer = MaxAbsScaler().fit(X)
>>> transformer
MaxAbsScaler()
>>> transformer.transform(X)
array([[ 0.5, -1. , 1. ],
[ 1. , 0. , 0. ],
[ 0. , 1. , -0.5]])
"""
def __init__(self, *, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, because they are all set together
# in partial_fit
if hasattr(self, "scale_"):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
y : None
Ignored.
Returns
-------
self : object
Fitted scaler.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when :meth:`fit` is not feasible due to very large number of
`n_samples` or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : None
Ignored.
Returns
-------
self : object
Fitted scaler.
"""
first_pass = not hasattr(self, "n_samples_seen_")
X = self._validate_data(
X,
reset=first_pass,
accept_sparse=("csr", "csc"),
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0, ignore_nan=True)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.nanmax(np.abs(X), axis=0)
if first_pass:
self.n_samples_seen_ = X.shape[0]
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs, copy=True)
return self
def transform(self, X):
"""Scale the data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data that should be scaled.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
check_is_fitted(self)
X = self._validate_data(
X,
accept_sparse=("csr", "csc"),
copy=self.copy,
reset=False,
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data that should be transformed back.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
check_is_fitted(self)
X = check_array(
X,
accept_sparse=("csr", "csc"),
copy=self.copy,
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def _more_tags(self):
return {"allow_nan": True}
def maxabs_scale(X, *, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data.
axis : int, default=0
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : bool, default=True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
The transformed data.
.. warning:: Risk of data leak
Do not use :func:`~sklearn.preprocessing.maxabs_scale` unless you know
what you are doing. A common mistake is to apply it to the entire data
*before* splitting into training and test sets. This will bias the
model evaluation because information would have leaked from the test
set to the training set.
In general, we recommend using
:class:`~sklearn.preprocessing.MaxAbsScaler` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking: `pipe = make_pipeline(MaxAbsScaler(), LogisticRegression())`.
See Also
--------
MaxAbsScaler : Performs scaling to the [-1, 1] range using
the Transformer API (e.g. as part of a preprocessing
:class:`~sklearn.pipeline.Pipeline`).
Notes
-----
NaNs are treated as missing values: disregarded to compute the statistics,
and maintained during the data transformation.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(
X,
accept_sparse=("csr", "csc"),
copy=False,
ensure_2d=False,
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(_OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the quantile range (defaults to IQR: Interquartile Range).
The IQR is the range between the 1st quartile (25th quantile)
and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature by
computing the relevant statistics on the samples in the training
set. Median and interquartile range are then stored to be used on
later data using the :meth:`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : bool, default=True
If `True`, center the data before scaling.
This will cause :meth:`transform` to raise an exception when attempted
on sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : bool, default=True
If `True`, scale the data to interquartile range.
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0, \
default=(25.0, 75.0)
Quantile range used to calculate `scale_`. By default this is equal to
the IQR, i.e., `q_min` is the first quantile and `q_max` is the third
quantile.
.. versionadded:: 0.18
copy : bool, default=True
If `False`, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
unit_variance : bool, default=False
If `True`, scale data so that normally distributed features have a
variance of 1. In general, if the difference between the x-values of
`q_max` and `q_min` for a standard normal distribution is greater
than 1, the dataset will be scaled down. If less than 1, the dataset
will be scaled up.
.. versionadded:: 0.24
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
robust_scale : Equivalent function without the estimator API.
sklearn.decomposition.PCA : Further removes the linear correlation across
features with 'whiten=True'.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
https://en.wikipedia.org/wiki/Median
https://en.wikipedia.org/wiki/Interquartile_range
Examples
--------
>>> from sklearn.preprocessing import RobustScaler
>>> X = [[ 1., -2., 2.],
... [ -2., 1., 3.],
... [ 4., 1., -2.]]
>>> transformer = RobustScaler().fit(X)
>>> transformer
RobustScaler()
>>> transformer.transform(X)
array([[ 0. , -2. , 0. ],
[-1. , 0. , 0.4],
[ 1. , 0. , -1.6]])
"""
def __init__(
self,
*,
with_centering=True,
with_scaling=True,
quantile_range=(25.0, 75.0),
copy=True,
unit_variance=False,
):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.quantile_range = quantile_range
self.unit_variance = unit_variance
self.copy = copy
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the median and quantiles
used for later scaling along the features axis.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Fitted scaler.
"""
# at fit, convert sparse matrices to csc for optimized computation of
# the quantiles
X = self._validate_data(
X,
accept_sparse="csc",
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
q_min, q_max = self.quantile_range
if not 0 <= q_min <= q_max <= 100:
raise ValueError("Invalid quantile range: %s" % str(self.quantile_range))
if self.with_centering:
if sparse.issparse(X):
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives."
)
self.center_ = np.nanmedian(X, axis=0)
else:
self.center_ = None
if self.with_scaling:
quantiles = []
for feature_idx in range(X.shape[1]):
if sparse.issparse(X):
column_nnz_data = X.data[
X.indptr[feature_idx] : X.indptr[feature_idx + 1]
]
column_data = np.zeros(shape=X.shape[0], dtype=X.dtype)
column_data[: len(column_nnz_data)] = column_nnz_data
else:
column_data = X[:, feature_idx]
quantiles.append(np.nanpercentile(column_data, self.quantile_range))
quantiles = np.transpose(quantiles)
self.scale_ = quantiles[1] - quantiles[0]
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
if self.unit_variance:
adjust = stats.norm.ppf(q_max / 100.0) - stats.norm.ppf(q_min / 100.0)
self.scale_ = self.scale_ / adjust
else:
self.scale_ = None
return self
def transform(self, X):
"""Center and scale the data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to scale along the specified axis.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
check_is_fitted(self)
X = self._validate_data(
X,
accept_sparse=("csr", "csc"),
copy=self.copy,
dtype=FLOAT_DTYPES,
reset=False,
force_all_finite="allow-nan",
)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The rescaled data to be transformed back.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
check_is_fitted(self)
X = check_array(
X,
accept_sparse=("csr", "csc"),
copy=self.copy,
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def _more_tags(self):
return {"allow_nan": True}
def robust_scale(
X,
*,
axis=0,
with_centering=True,
with_scaling=True,
quantile_range=(25.0, 75.0),
copy=True,
unit_variance=False,
):
"""Standardize a dataset along any axis.
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_sample, n_features)
The data to center and scale.
axis : int, default=0
Axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : bool, default=True
If `True`, center the data before scaling.
with_scaling : bool, default=True
If `True`, scale the data to unit variance (or equivalently,
unit standard deviation).
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0,\
default=(25.0, 75.0)
Quantile range used to calculate `scale_`. By default this is equal to
the IQR, i.e., `q_min` is the first quantile and `q_max` is the third
quantile.
.. versionadded:: 0.18
copy : bool, default=True
Set to `False` to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
unit_variance : bool, default=False
If `True`, scale data so that normally distributed features have a
variance of 1. In general, if the difference between the x-values of
`q_max` and `q_min` for a standard normal distribution is greater
than 1, the dataset will be scaled down. If less than 1, the dataset
will be scaled up.
.. versionadded:: 0.24
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
The transformed data.
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
.. warning:: Risk of data leak
Do not use :func:`~sklearn.preprocessing.robust_scale` unless you know
what you are doing. A common mistake is to apply it to the entire data
*before* splitting into training and test sets. This will bias the
model evaluation because information would have leaked from the test
set to the training set.
In general, we recommend using
:class:`~sklearn.preprocessing.RobustScaler` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking: `pipe = make_pipeline(RobustScaler(), LogisticRegression())`.
See Also
--------
RobustScaler : Performs centering and scaling using the Transformer API
(e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`).
"""
X = check_array(
X,
accept_sparse=("csr", "csc"),
copy=False,
ensure_2d=False,
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = RobustScaler(
with_centering=with_centering,
with_scaling=with_scaling,
quantile_range=quantile_range,
unit_variance=unit_variance,
copy=copy,
)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
def normalize(X, norm="l2", *, axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : {'l1', 'l2', 'max'}, default='l2'
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : {0, 1}, default=1
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : bool, default=True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : bool, default=False
whether to return the computed norms
Returns
-------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Normalized input X.
norms : ndarray of shape (n_samples, ) if axis=1 else (n_features, )
An array of norms along given axis for X.
When X is sparse, a NotImplementedError will be raised
for norm 'l1' or 'l2'.
See Also
--------
Normalizer : Performs normalization using the Transformer API
(e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
if norm not in ("l1", "l2", "max"):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = "csc"
elif axis == 1:
sparse_format = "csr"
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(
X,
accept_sparse=sparse_format,
copy=copy,
estimator="the normalize function",
dtype=FLOAT_DTYPES,
)
if axis == 0:
X = X.T
if sparse.issparse(X):
if return_norm and norm in ("l1", "l2"):
raise NotImplementedError(
"return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'"
)
if norm == "l1":
inplace_csr_row_normalize_l1(X)
elif norm == "l2":
inplace_csr_row_normalize_l2(X)
elif norm == "max":
mins, maxes = min_max_axis(X, 1)
norms = np.maximum(abs(mins), maxes)
norms_elementwise = norms.repeat(np.diff(X.indptr))
mask = norms_elementwise != 0
X.data[mask] /= norms_elementwise[mask]
else:
if norm == "l1":
norms = np.abs(X).sum(axis=1)
elif norm == "l2":
norms = row_norms(X)
elif norm == "max":
norms = np.max(abs(X), axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(_OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1, l2 or inf) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : {'l1', 'l2', 'max'}, default='l2'
The norm to use to normalize each non zero sample. If norm='max'
is used, values will be rescaled by the maximum of the absolute
values.
copy : bool, default=True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Attributes
----------
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
normalize : Equivalent function without the estimator API.
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
Examples
--------
>>> from sklearn.preprocessing import Normalizer
>>> X = [[4, 1, 2, 2],
... [1, 3, 9, 3],
... [5, 7, 5, 1]]
>>> transformer = Normalizer().fit(X) # fit does nothing.
>>> transformer
Normalizer()
>>> transformer.transform(X)
array([[0.8, 0.2, 0.4, 0.4],
[0.1, 0.3, 0.9, 0.3],
[0.5, 0.7, 0.5, 0.1]])
"""
def __init__(self, norm="l2", *, copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged.
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to estimate the normalization parameters.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Fitted transformer.
"""
self._validate_data(X, accept_sparse="csr")
return self
def transform(self, X, copy=None):
"""Scale each non zero row of X to unit norm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
copy : bool, default=None
Copy the input X or not.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
copy = copy if copy is not None else self.copy
X = self._validate_data(X, accept_sparse="csr", reset=False)
return normalize(X, norm=self.norm, axis=1, copy=copy)
def _more_tags(self):
return {"stateless": True}
def binarize(X, *, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix.
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, default=0.0
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : bool, default=True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
The transformed data.
See Also
--------
Binarizer : Performs binarization using the Transformer API
(e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=["csr", "csc"], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError("Cannot binarize a sparse matrix with threshold < 0")
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(_OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
"""Binarize data (set feature values to 0 or 1) according to a threshold.
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, default=0.0
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : bool, default=True
Set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Attributes
----------
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
binarize : Equivalent function without the estimator API.
KBinsDiscretizer : Bin continuous data into intervals.
OneHotEncoder : Encode categorical features as a one-hot numeric array.
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
Examples
--------
>>> from sklearn.preprocessing import Binarizer
>>> X = [[ 1., -1., 2.],
... [ 2., 0., 0.],
... [ 0., 1., -1.]]
>>> transformer = Binarizer().fit(X) # fit does nothing.
>>> transformer
Binarizer()
>>> transformer.transform(X)
array([[1., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]])
"""
def __init__(self, *, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged.
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data.
y : None
Ignored.
Returns
-------
self : object
Fitted transformer.
"""
self._validate_data(X, accept_sparse="csr")
return self
def transform(self, X, copy=None):
"""Binarize each element of X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
copy : bool
Copy the input X or not.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
copy = copy if copy is not None else self.copy
# TODO: This should be refactored because binarize also calls
# check_array
X = self._validate_data(X, accept_sparse=["csr", "csc"], copy=copy, reset=False)
return binarize(X, threshold=self.threshold, copy=False)
def _more_tags(self):
return {"stateless": True}
class KernelCenterer(_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
r"""Center an arbitrary kernel matrix :math:`K`.
Let define a kernel :math:`K` such that:
.. math::
K(X, Y) = \phi(X) . \phi(Y)^{T}
:math:`\phi(X)` is a function mapping of rows of :math:`X` to a
Hilbert space and :math:`K` is of shape `(n_samples, n_samples)`.
This class allows to compute :math:`\tilde{K}(X, Y)` such that:
.. math::
\tilde{K(X, Y)} = \tilde{\phi}(X) . \tilde{\phi}(Y)^{T}
:math:`\tilde{\phi}(X)` is the centered mapped data in the Hilbert
space.
`KernelCenterer` centers the features without explicitly computing the
mapping :math:`\phi(\cdot)`. Working with centered kernels is sometime
expected when dealing with algebra computation such as eigendecomposition
for :class:`~sklearn.decomposition.KernelPCA` for instance.
Read more in the :ref:`User Guide <kernel_centering>`.
Attributes
----------
K_fit_rows_ : ndarray of shape (n_samples,)
Average of each column of kernel matrix.
K_fit_all_ : float
Average of kernel matrix.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
sklearn.kernel_approximation.Nystroem : Approximate a kernel map
using a subset of the training data.
References
----------
.. [1] `Schölkopf, Bernhard, Alexander Smola, and Klaus-Robert Müller.
"Nonlinear component analysis as a kernel eigenvalue problem."
Neural computation 10.5 (1998): 1299-1319.
<https://www.mlpack.org/papers/kpca.pdf>`_
Examples
--------
>>> from sklearn.preprocessing import KernelCenterer
>>> from sklearn.metrics.pairwise import pairwise_kernels
>>> X = [[ 1., -2., 2.],
... [ -2., 1., 3.],
... [ 4., 1., -2.]]
>>> K = pairwise_kernels(X, metric='linear')
>>> K
array([[ 9., 2., -2.],
[ 2., 14., -13.],
[ -2., -13., 21.]])
>>> transformer = KernelCenterer().fit(K)
>>> transformer
KernelCenterer()
>>> transformer.transform(K)
array([[ 5., 0., -5.],
[ 0., 14., -14.],
[ -5., -14., 19.]])
"""
def __init__(self):
# Needed for backported inspect.signature compatibility with PyPy
pass
def fit(self, K, y=None):
"""Fit KernelCenterer.
Parameters
----------
K : ndarray of shape (n_samples, n_samples)
Kernel matrix.
y : None
Ignored.
Returns
-------
self : object
Returns the instance itself.
"""
K = self._validate_data(K, dtype=FLOAT_DTYPES)
if K.shape[0] != K.shape[1]:
raise ValueError(
"Kernel matrix must be a square matrix."
" Input is a {}x{} matrix.".format(K.shape[0], K.shape[1])
)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, copy=True):
"""Center kernel matrix.
Parameters
----------
K : ndarray of shape (n_samples1, n_samples2)
Kernel matrix.
copy : bool, default=True
Set to False to perform inplace computation.
Returns
-------
K_new : ndarray of shape (n_samples1, n_samples2)
Returns the instance itself.
"""
check_is_fitted(self)
K = self._validate_data(K, copy=copy, dtype=FLOAT_DTYPES, reset=False)
K_pred_cols = (np.sum(K, axis=1) / self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
@property
def _n_features_out(self):
"""Number of transformed output features."""
# Used by _ClassNamePrefixFeaturesOutMixin. This model preserves the
# number of input features but this is not a one-to-one mapping in the
# usual sense. Hence the choice not to use _OneToOneFeatureMixin to
# implement get_feature_names_out for this class.
return self.n_features_in_
def _more_tags(self):
return {"pairwise": True}
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {ndarray, sparse matrix} of shape (n_samples, n_features + 1)
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[1., 0., 1.],
[1., 1., 0.]])
"""
X = check_array(X, accept_sparse=["csc", "csr", "coo"], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.full(n_samples, value), X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.full(n_samples, value), X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.full((n_samples, 1), value), X))
class QuantileTransformer(_OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently. First an
estimate of the cumulative distribution function of a feature is
used to map the original values to a uniform distribution. The obtained
values are then mapped to the desired output distribution using the
associated quantile function. Features values of new/unseen data that fall
below or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
.. versionadded:: 0.19
Parameters
----------
n_quantiles : int, default=1000 or n_samples
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative distribution function.
If n_quantiles is larger than the number of samples, n_quantiles is set
to the number of samples as a larger number of quantiles does not give
a better approximation of the cumulative distribution function
estimator.
output_distribution : {'uniform', 'normal'}, default='uniform'
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, default=False
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, default=1e5
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, default=None
Determines random number generation for subsampling and smoothing
noise.
Please see ``subsample`` for more details.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
copy : bool, default=True
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array).
Attributes
----------
n_quantiles_ : int
The actual number of quantiles used to discretize the cumulative
distribution function.
quantiles_ : ndarray of shape (n_quantiles, n_features)
The values corresponding the quantiles of reference.
references_ : ndarray of shape (n_quantiles, )
Quantiles of references.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
quantile_transform : Equivalent function without the estimator API.
PowerTransformer : Perform mapping to a normal distribution using a power
transform.
StandardScaler : Perform standardization that is faster, but less robust
to outliers.
RobustScaler : Perform robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import QuantileTransformer
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> qt = QuantileTransformer(n_quantiles=10, random_state=0)
>>> qt.fit_transform(X)
array([...])
"""
def __init__(
self,
*,
n_quantiles=1000,
output_distribution="uniform",
ignore_implicit_zeros=False,
subsample=int(1e5),
random_state=None,
copy=True,
):
self.n_quantiles = n_quantiles
self.output_distribution = output_distribution
self.ignore_implicit_zeros = ignore_implicit_zeros
self.subsample = subsample
self.random_state = random_state
self.copy = copy
def _dense_fit(self, X, random_state):
"""Compute percentiles for dense matrices.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The data used to scale along the features axis.
"""
if self.ignore_implicit_zeros:
warnings.warn(
"'ignore_implicit_zeros' takes effect only with"
" sparse matrix. This parameter has no effect."
)
n_samples, n_features = X.shape
references = self.references_ * 100
self.quantiles_ = []
for col in X.T:
if self.subsample < n_samples:
subsample_idx = random_state.choice(
n_samples, size=self.subsample, replace=False
)
col = col.take(subsample_idx, mode="clip")
self.quantiles_.append(np.nanpercentile(col, references))
self.quantiles_ = np.transpose(self.quantiles_)
# Due to floating-point precision error in `np.nanpercentile`,
# make sure that quantiles are monotonically increasing.
# Upstream issue in numpy:
# https://github.com/numpy/numpy/issues/14685
self.quantiles_ = np.maximum.accumulate(self.quantiles_)
def _sparse_fit(self, X, random_state):
"""Compute percentiles for sparse matrices.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
The data used to scale along the features axis. The sparse matrix
needs to be nonnegative. If a sparse matrix is provided,
it will be converted into a sparse ``csc_matrix``.
"""
n_samples, n_features = X.shape
references = self.references_ * 100
self.quantiles_ = []
for feature_idx in range(n_features):
column_nnz_data = X.data[X.indptr[feature_idx] : X.indptr[feature_idx + 1]]
if len(column_nnz_data) > self.subsample:
column_subsample = self.subsample * len(column_nnz_data) // n_samples
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=column_subsample, dtype=X.dtype)
else:
column_data = np.zeros(shape=self.subsample, dtype=X.dtype)
column_data[:column_subsample] = random_state.choice(
column_nnz_data, size=column_subsample, replace=False
)
else:
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=len(column_nnz_data), dtype=X.dtype)
else:
column_data = np.zeros(shape=n_samples, dtype=X.dtype)
column_data[: len(column_nnz_data)] = column_nnz_data
if not column_data.size:
# if no nnz, an error will be raised for computing the
# quantiles. Force the quantiles to be zeros.
self.quantiles_.append([0] * len(references))
else:
self.quantiles_.append(np.nanpercentile(column_data, references))
self.quantiles_ = np.transpose(self.quantiles_)
# due to floating-point precision error in `np.nanpercentile`,
# make sure the quantiles are monotonically increasing
# Upstream issue in numpy:
# https://github.com/numpy/numpy/issues/14685
self.quantiles_ = np.maximum.accumulate(self.quantiles_)
def fit(self, X, y=None):
"""Compute the quantiles used for transforming.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
y : None
Ignored.
Returns
-------
self : object
Fitted transformer.
"""
if self.n_quantiles <= 0:
raise ValueError(
"Invalid value for 'n_quantiles': %d. "
"The number of quantiles must be at least one."
% self.n_quantiles
)
if self.subsample <= 0:
raise ValueError(
"Invalid value for 'subsample': %d. "
"The number of subsamples must be at least one."
% self.subsample
)
if self.n_quantiles > self.subsample:
raise ValueError(
"The number of quantiles cannot be greater than"
" the number of samples used. Got {} quantiles"
" and {} samples.".format(self.n_quantiles, self.subsample)
)
X = self._check_inputs(X, in_fit=True, copy=False)
n_samples = X.shape[0]
if self.n_quantiles > n_samples:
warnings.warn(
"n_quantiles (%s) is greater than the total number "
"of samples (%s). n_quantiles is set to "
"n_samples." % (self.n_quantiles, n_samples)
)
self.n_quantiles_ = max(1, min(self.n_quantiles, n_samples))
rng = check_random_state(self.random_state)
# Create the quantiles of reference
self.references_ = np.linspace(0, 1, self.n_quantiles_, endpoint=True)
if sparse.issparse(X):
self._sparse_fit(X, rng)
else:
self._dense_fit(X, rng)
return self
def _transform_col(self, X_col, quantiles, inverse):
"""Private function to transform a single feature."""
output_distribution = self.output_distribution
if not inverse:
lower_bound_x = quantiles[0]
upper_bound_x = quantiles[-1]
lower_bound_y = 0
upper_bound_y = 1
else:
lower_bound_x = 0
upper_bound_x = 1
lower_bound_y = quantiles[0]
upper_bound_y = quantiles[-1]
# for inverse transform, match a uniform distribution
with np.errstate(invalid="ignore"): # hide NaN comparison warnings
if output_distribution == "normal":
X_col = stats.norm.cdf(X_col)
# else output distribution is already a uniform distribution
# find index for lower and higher bounds
with np.errstate(invalid="ignore"): # hide NaN comparison warnings
if output_distribution == "normal":
lower_bounds_idx = X_col - BOUNDS_THRESHOLD < lower_bound_x
upper_bounds_idx = X_col + BOUNDS_THRESHOLD > upper_bound_x
if output_distribution == "uniform":
lower_bounds_idx = X_col == lower_bound_x
upper_bounds_idx = X_col == upper_bound_x
isfinite_mask = ~np.isnan(X_col)
X_col_finite = X_col[isfinite_mask]
if not inverse:
# Interpolate in one direction and in the other and take the
# mean. This is in case of repeated values in the features
# and hence repeated quantiles
#
# If we don't do this, only one extreme of the duplicated is
# used (the upper when we do ascending, and the
# lower for descending). We take the mean of these two
X_col[isfinite_mask] = 0.5 * (
np.interp(X_col_finite, quantiles, self.references_)
- np.interp(-X_col_finite, -quantiles[::-1], -self.references_[::-1])
)
else:
X_col[isfinite_mask] = np.interp(X_col_finite, self.references_, quantiles)
X_col[upper_bounds_idx] = upper_bound_y
X_col[lower_bounds_idx] = lower_bound_y
# for forward transform, match the output distribution
if not inverse:
with np.errstate(invalid="ignore"): # hide NaN comparison warnings
if output_distribution == "normal":
X_col = stats.norm.ppf(X_col)
# find the value to clip the data to avoid mapping to
# infinity. Clip such that the inverse transform will be
# consistent
clip_min = stats.norm.ppf(BOUNDS_THRESHOLD - np.spacing(1))
clip_max = stats.norm.ppf(1 - (BOUNDS_THRESHOLD - np.spacing(1)))
X_col = np.clip(X_col, clip_min, clip_max)
# else output distribution is uniform and the ppf is the
# identity function so we let X_col unchanged
return X_col
def _check_inputs(self, X, in_fit, accept_sparse_negative=False, copy=False):
"""Check inputs before fit and transform."""
X = self._validate_data(
X,
reset=in_fit,
accept_sparse="csc",
copy=copy,
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
# we only accept positive sparse matrix when ignore_implicit_zeros is
# false and that we call fit or transform.
with np.errstate(invalid="ignore"): # hide NaN comparison warnings
if (
not accept_sparse_negative
and not self.ignore_implicit_zeros
and (sparse.issparse(X) and np.any(X.data < 0))
):
raise ValueError(
"QuantileTransformer only accepts non-negative sparse matrices."
)
# check the output distribution
if self.output_distribution not in ("normal", "uniform"):
raise ValueError(
"'output_distribution' has to be either 'normal'"
" or 'uniform'. Got '{}' instead.".format(self.output_distribution)
)
return X
def _transform(self, X, inverse=False):
"""Forward and inverse transform.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The data used to scale along the features axis.
inverse : bool, default=False
If False, apply forward transform. If True, apply
inverse transform.
Returns
-------
X : ndarray of shape (n_samples, n_features)
Projected data.
"""
if sparse.issparse(X):
for feature_idx in range(X.shape[1]):
column_slice = slice(X.indptr[feature_idx], X.indptr[feature_idx + 1])
X.data[column_slice] = self._transform_col(
X.data[column_slice], self.quantiles_[:, feature_idx], inverse
)
else:
for feature_idx in range(X.shape[1]):
X[:, feature_idx] = self._transform_col(
X[:, feature_idx], self.quantiles_[:, feature_idx], inverse
)
return X
def transform(self, X):
"""Feature-wise transformation of the data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : {ndarray, sparse matrix} of shape (n_samples, n_features)
The projected data.
"""
check_is_fitted(self)
X = self._check_inputs(X, in_fit=False, copy=self.copy)
return self._transform(X, inverse=False)
def inverse_transform(self, X):
"""Back-projection to the original space.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : {ndarray, sparse matrix} of (n_samples, n_features)
The projected data.
"""
check_is_fitted(self)
X = self._check_inputs(
X, in_fit=False, accept_sparse_negative=True, copy=self.copy
)
return self._transform(X, inverse=True)
def _more_tags(self):
return {"allow_nan": True}
def quantile_transform(
X,
*,
axis=0,
n_quantiles=1000,
output_distribution="uniform",
ignore_implicit_zeros=False,
subsample=int(1e5),
random_state=None,
copy=True,
):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently. First an
estimate of the cumulative distribution function of a feature is
used to map the original values to a uniform distribution. The obtained
values are then mapped to the desired output distribution using the
associated quantile function. Features values of new/unseen data that fall
below or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to transform.
axis : int, default=0
Axis used to compute the means and standard deviations along. If 0,
transform each feature, otherwise (if 1) transform each sample.
n_quantiles : int, default=1000 or n_samples
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative distribution function.
If n_quantiles is larger than the number of samples, n_quantiles is set
to the number of samples as a larger number of quantiles does not give
a better approximation of the cumulative distribution function
estimator.
output_distribution : {'uniform', 'normal'}, default='uniform'
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, default=False
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, default=1e5
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, default=None
Determines random number generation for subsampling and smoothing
noise.
Please see ``subsample`` for more details.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`
copy : bool, default=True
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array). If True, a copy of `X` is transformed,
leaving the original `X` unchanged
..versionchanged:: 0.23
The default value of `copy` changed from False to True in 0.23.
Returns
-------
Xt : {ndarray, sparse matrix} of shape (n_samples, n_features)
The transformed data.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import quantile_transform
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> quantile_transform(X, n_quantiles=10, random_state=0, copy=True)
array([...])
See Also
--------
QuantileTransformer : Performs quantile-based scaling using the
Transformer API (e.g. as part of a preprocessing
:class:`~sklearn.pipeline.Pipeline`).
power_transform : Maps data to a normal distribution using a
power transformation.
scale : Performs standardization that is faster, but less robust
to outliers.
robust_scale : Performs robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
.. warning:: Risk of data leak
Do not use :func:`~sklearn.preprocessing.quantile_transform` unless
you know what you are doing. A common mistake is to apply it
to the entire data *before* splitting into training and
test sets. This will bias the model evaluation because
information would have leaked from the test set to the
training set.
In general, we recommend using
:class:`~sklearn.preprocessing.QuantileTransformer` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking:`pipe = make_pipeline(QuantileTransformer(),
LogisticRegression())`.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
n = QuantileTransformer(
n_quantiles=n_quantiles,
output_distribution=output_distribution,
subsample=subsample,
ignore_implicit_zeros=ignore_implicit_zeros,
random_state=random_state,
copy=copy,
)
if axis == 0:
return n.fit_transform(X)
elif axis == 1:
return n.fit_transform(X.T).T
else:
raise ValueError(
"axis should be either equal to 0 or 1. Got axis={}".format(axis)
)
class PowerTransformer(_OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
"""Apply a power transform featurewise to make data more Gaussian-like.
Power transforms are a family of parametric, monotonic transformations
that are applied to make data more Gaussian-like. This is useful for
modeling issues related to heteroscedasticity (non-constant variance),
or other situations where normality is desired.
Currently, PowerTransformer supports the Box-Cox transform and the
Yeo-Johnson transform. The optimal parameter for stabilizing variance and
minimizing skewness is estimated through maximum likelihood.
Box-Cox requires input data to be strictly positive, while Yeo-Johnson
supports both positive or negative data.
By default, zero-mean, unit-variance normalization is applied to the
transformed data.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
.. versionadded:: 0.20
Parameters
----------
method : {'yeo-johnson', 'box-cox'}, default='yeo-johnson'
The power transform method. Available methods are:
- 'yeo-johnson' [1]_, works with positive and negative values
- 'box-cox' [2]_, only works with strictly positive values
standardize : bool, default=True
Set to True to apply zero-mean, unit-variance normalization to the
transformed output.
copy : bool, default=True
Set to False to perform inplace computation during transformation.
Attributes
----------
lambdas_ : ndarray of float of shape (n_features,)
The parameters of the power transformation for the selected features.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
power_transform : Equivalent function without the estimator API.
QuantileTransformer : Maps data to a standard normal distribution with
the parameter `output_distribution='normal'`.
Notes
-----
NaNs are treated as missing values: disregarded in ``fit``, and maintained
in ``transform``.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
References
----------
.. [1] I.K. Yeo and R.A. Johnson, "A new family of power transformations to
improve normality or symmetry." Biometrika, 87(4), pp.954-959,
(2000).
.. [2] G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal
of the Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import PowerTransformer
>>> pt = PowerTransformer()
>>> data = [[1, 2], [3, 2], [4, 5]]
>>> print(pt.fit(data))
PowerTransformer()
>>> print(pt.lambdas_)
[ 1.386... -3.100...]
>>> print(pt.transform(data))
[[-1.316... -0.707...]
[ 0.209... -0.707...]
[ 1.106... 1.414...]]
"""
def __init__(self, method="yeo-johnson", *, standardize=True, copy=True):
self.method = method
self.standardize = standardize
self.copy = copy
def fit(self, X, y=None):
"""Estimate the optimal parameter lambda for each feature.
The optimal lambda parameter for minimizing skewness is estimated on
each feature independently using maximum likelihood.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data used to estimate the optimal transformation parameters.
y : None
Ignored.
Returns
-------
self : object
Fitted transformer.
"""
self._fit(X, y=y, force_transform=False)
return self
def fit_transform(self, X, y=None):
"""Fit `PowerTransformer` to `X`, then transform `X`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data used to estimate the optimal transformation parameters
and to be transformed using a power transformation.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
X_new : ndarray of shape (n_samples, n_features)
Transformed data.
"""
return self._fit(X, y, force_transform=True)
def _fit(self, X, y=None, force_transform=False):
X = self._check_input(X, in_fit=True, check_positive=True, check_method=True)
if not self.copy and not force_transform: # if call from fit()
X = X.copy() # force copy so that fit does not change X inplace
optim_function = {
"box-cox": self._box_cox_optimize,
"yeo-johnson": self._yeo_johnson_optimize,
}[self.method]
with np.errstate(invalid="ignore"): # hide NaN warnings
self.lambdas_ = np.array([optim_function(col) for col in X.T])
if self.standardize or force_transform:
transform_function = {
"box-cox": boxcox,
"yeo-johnson": self._yeo_johnson_transform,
}[self.method]
for i, lmbda in enumerate(self.lambdas_):
with np.errstate(invalid="ignore"): # hide NaN warnings
X[:, i] = transform_function(X[:, i], lmbda)
if self.standardize:
self._scaler = StandardScaler(copy=False)
if force_transform:
X = self._scaler.fit_transform(X)
else:
self._scaler.fit(X)
return X
def transform(self, X):
"""Apply the power transform to each feature using the fitted lambdas.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data to be transformed using a power transformation.
Returns
-------
X_trans : ndarray of shape (n_samples, n_features)
The transformed data.
"""
check_is_fitted(self)
X = self._check_input(X, in_fit=False, check_positive=True, check_shape=True)
transform_function = {
"box-cox": boxcox,
"yeo-johnson": self._yeo_johnson_transform,
}[self.method]
for i, lmbda in enumerate(self.lambdas_):
with np.errstate(invalid="ignore"): # hide NaN warnings
X[:, i] = transform_function(X[:, i], lmbda)
if self.standardize:
X = self._scaler.transform(X)
return X
def inverse_transform(self, X):
"""Apply the inverse power transformation using the fitted lambdas.
The inverse of the Box-Cox transformation is given by::
if lambda_ == 0:
X = exp(X_trans)
else:
X = (X_trans * lambda_ + 1) ** (1 / lambda_)
The inverse of the Yeo-Johnson transformation is given by::
if X >= 0 and lambda_ == 0:
X = exp(X_trans) - 1
elif X >= 0 and lambda_ != 0:
X = (X_trans * lambda_ + 1) ** (1 / lambda_) - 1
elif X < 0 and lambda_ != 2:
X = 1 - (-(2 - lambda_) * X_trans + 1) ** (1 / (2 - lambda_))
elif X < 0 and lambda_ == 2:
X = 1 - exp(-X_trans)
Parameters
----------
X : array-like of shape (n_samples, n_features)
The transformed data.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The original data.
"""
check_is_fitted(self)
X = self._check_input(X, in_fit=False, check_shape=True)
if self.standardize:
X = self._scaler.inverse_transform(X)
inv_fun = {
"box-cox": self._box_cox_inverse_tranform,
"yeo-johnson": self._yeo_johnson_inverse_transform,
}[self.method]
for i, lmbda in enumerate(self.lambdas_):
with np.errstate(invalid="ignore"): # hide NaN warnings
X[:, i] = inv_fun(X[:, i], lmbda)
return X
def _box_cox_inverse_tranform(self, x, lmbda):
"""Return inverse-transformed input x following Box-Cox inverse
transform with parameter lambda.
"""
if lmbda == 0:
x_inv = np.exp(x)
else:
x_inv = (x * lmbda + 1) ** (1 / lmbda)
return x_inv
def _yeo_johnson_inverse_transform(self, x, lmbda):
"""Return inverse-transformed input x following Yeo-Johnson inverse
transform with parameter lambda.
"""
x_inv = np.zeros_like(x)
pos = x >= 0
# when x >= 0
if abs(lmbda) < np.spacing(1.0):
x_inv[pos] = np.exp(x[pos]) - 1
else: # lmbda != 0
x_inv[pos] = np.power(x[pos] * lmbda + 1, 1 / lmbda) - 1
# when x < 0
if abs(lmbda - 2) > np.spacing(1.0):
x_inv[~pos] = 1 - np.power(-(2 - lmbda) * x[~pos] + 1, 1 / (2 - lmbda))
else: # lmbda == 2
x_inv[~pos] = 1 - np.exp(-x[~pos])
return x_inv
def _yeo_johnson_transform(self, x, lmbda):
"""Return transformed input x following Yeo-Johnson transform with
parameter lambda.
"""
out = np.zeros_like(x)
pos = x >= 0 # binary mask
# when x >= 0
if abs(lmbda) < np.spacing(1.0):
out[pos] = np.log1p(x[pos])
else: # lmbda != 0
out[pos] = (np.power(x[pos] + 1, lmbda) - 1) / lmbda
# when x < 0
if abs(lmbda - 2) > np.spacing(1.0):
out[~pos] = -(np.power(-x[~pos] + 1, 2 - lmbda) - 1) / (2 - lmbda)
else: # lmbda == 2
out[~pos] = -np.log1p(-x[~pos])
return out
def _box_cox_optimize(self, x):
"""Find and return optimal lambda parameter of the Box-Cox transform by
MLE, for observed data x.
We here use scipy builtins which uses the brent optimizer.
"""
# the computation of lambda is influenced by NaNs so we need to
# get rid of them
_, lmbda = stats.boxcox(x[~np.isnan(x)], lmbda=None)
return lmbda
def _yeo_johnson_optimize(self, x):
"""Find and return optimal lambda parameter of the Yeo-Johnson
transform by MLE, for observed data x.
Like for Box-Cox, MLE is done via the brent optimizer.
"""
def _neg_log_likelihood(lmbda):
"""Return the negative log likelihood of the observed data x as a
function of lambda."""
x_trans = self._yeo_johnson_transform(x, lmbda)
n_samples = x.shape[0]
loglike = -n_samples / 2 * np.log(x_trans.var())
loglike += (lmbda - 1) * (np.sign(x) * np.log1p(np.abs(x))).sum()
return -loglike
# the computation of lambda is influenced by NaNs so we need to
# get rid of them
x = x[~np.isnan(x)]
# choosing bracket -2, 2 like for boxcox
return optimize.brent(_neg_log_likelihood, brack=(-2, 2))
def _check_input(
self, X, in_fit, check_positive=False, check_shape=False, check_method=False
):
"""Validate the input before fit and transform.
Parameters
----------
X : array-like of shape (n_samples, n_features)
in_fit : bool
Whether or not `_check_input` is called from `fit` or other
methods, e.g. `predict`, `transform`, etc.
check_positive : bool, default=False
If True, check that all data is positive and non-zero (only if
``self.method=='box-cox'``).
check_shape : bool, default=False
If True, check that n_features matches the length of self.lambdas_
check_method : bool, default=False
If True, check that the transformation method is valid.
"""
X = self._validate_data(
X,
ensure_2d=True,
dtype=FLOAT_DTYPES,
copy=self.copy,
force_all_finite="allow-nan",
reset=in_fit,
)
with np.warnings.catch_warnings():
np.warnings.filterwarnings("ignore", r"All-NaN (slice|axis) encountered")
if check_positive and self.method == "box-cox" and np.nanmin(X) <= 0:
raise ValueError(
"The Box-Cox transformation can only be "
"applied to strictly positive data"
)
if check_shape and not X.shape[1] == len(self.lambdas_):
raise ValueError(
"Input data has a different number of features "
"than fitting data. Should have {n}, data has {m}".format(
n=len(self.lambdas_), m=X.shape[1]
)
)
valid_methods = ("box-cox", "yeo-johnson")
if check_method and self.method not in valid_methods:
raise ValueError(
"'method' must be one of {}, got {} instead.".format(
valid_methods, self.method
)
)
return X
def _more_tags(self):
return {"allow_nan": True}
def power_transform(X, method="yeo-johnson", *, standardize=True, copy=True):
"""
Power transforms are a family of parametric, monotonic transformations
that are applied to make data more Gaussian-like. This is useful for
modeling issues related to heteroscedasticity (non-constant variance),
or other situations where normality is desired.
Currently, power_transform supports the Box-Cox transform and the
Yeo-Johnson transform. The optimal parameter for stabilizing variance and
minimizing skewness is estimated through maximum likelihood.
Box-Cox requires input data to be strictly positive, while Yeo-Johnson
supports both positive or negative data.
By default, zero-mean, unit-variance normalization is applied to the
transformed data.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data to be transformed using a power transformation.
method : {'yeo-johnson', 'box-cox'}, default='yeo-johnson'
The power transform method. Available methods are:
- 'yeo-johnson' [1]_, works with positive and negative values
- 'box-cox' [2]_, only works with strictly positive values
.. versionchanged:: 0.23
The default value of the `method` parameter changed from
'box-cox' to 'yeo-johnson' in 0.23.
standardize : bool, default=True
Set to True to apply zero-mean, unit-variance normalization to the
transformed output.
copy : bool, default=True
Set to False to perform inplace computation during transformation.
Returns
-------
X_trans : ndarray of shape (n_samples, n_features)
The transformed data.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import power_transform
>>> data = [[1, 2], [3, 2], [4, 5]]
>>> print(power_transform(data, method='box-cox'))
[[-1.332... -0.707...]
[ 0.256... -0.707...]
[ 1.076... 1.414...]]
.. warning:: Risk of data leak.
Do not use :func:`~sklearn.preprocessing.power_transform` unless you
know what you are doing. A common mistake is to apply it to the entire
data *before* splitting into training and test sets. This will bias the
model evaluation because information would have leaked from the test
set to the training set.
In general, we recommend using
:class:`~sklearn.preprocessing.PowerTransformer` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking, e.g.: `pipe = make_pipeline(PowerTransformer(),
LogisticRegression())`.
See Also
--------
PowerTransformer : Equivalent transformation with the
Transformer API (e.g. as part of a preprocessing
:class:`~sklearn.pipeline.Pipeline`).
quantile_transform : Maps data to a standard normal distribution with
the parameter `output_distribution='normal'`.
Notes
-----
NaNs are treated as missing values: disregarded in ``fit``, and maintained
in ``transform``.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
References
----------
.. [1] I.K. Yeo and R.A. Johnson, "A new family of power transformations to
improve normality or symmetry." Biometrika, 87(4), pp.954-959,
(2000).
.. [2] G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal
of the Royal Statistical Society B, 26, 211-252 (1964).
"""
pt = PowerTransformer(method=method, standardize=standardize, copy=copy)
return pt.fit_transform(X)
|
manhhomienbienthuy/scikit-learn
|
sklearn/preprocessing/_data.py
|
Python
|
bsd-3-clause
| 118,565
|
[
"Gaussian"
] |
f6dc6a8ab5835ebcfbf62854aa1cceb4abddb51e8fc3fb9745dce55427671554
|
#!/usr/bin/env python
''' buildCodeDOC
It accepts as argument the DIRAC version ( or branch name )
'''
# defined on DIRACDocs/source/Tools/fakeEnvironment
import fakeEnvironment
import DIRAC
import os
import pkgutil
import sys
import tempfile
DOCMODULES = [ 'API', 'Client', 'Service', 'Utilities' ]
def getTmpDir():
''' Creates a temporary dir and adds it to sys.path so that we can import
whatever lies there.
'''
try:
tmpDir = tempfile.mkdtemp()
except IOError:
sys.exit( 'IOError creating tmp dir' )
sys.path.append( tmpDir )
return tmpDir
#...............................................................................
# Functions generating rst files
def getCodeDocumentationPath():
whereAmI = os.path.dirname( os.path.abspath( __file__ ) )
relativePathToWrite = '../source/CodeDocumentation'
codeDocumentationPath = os.path.abspath( os.path.join( whereAmI,
relativePathToWrite ) )
try:
os.mkdir( codeDocumentationPath )
except OSError:
sys.exit( 'Cannot create %s' % codeDocumentationPath )
return codeDocumentationPath
def getDIRACPackages():
pkgpath = os.path.dirname( DIRAC.__file__ )
packages = [ name for _, name, _ in pkgutil.iter_modules([pkgpath]) ]
packages.sort()
packages.pop(packages.index('Resources'))
packages.pop(packages.index('Workflow'))
return packages
def getPackageModules( package ):
diracPackage = __import__( 'DIRAC.%s' % package, globals(), locals(), [ '*' ] )
pkgpath = os.path.dirname( diracPackage.__file__ )
modules = [ name for _, name, _ in pkgutil.iter_modules([pkgpath]) ]
modules.sort()
return modules
def writeIndexHeader( indexFile, title, depth=2 ):
indexFile.write( '\n' + '=' * len( title ) )
indexFile.write( '\n%s\n' % title )
indexFile.write( '=' * len( title ) )
indexFile.write( '\n\n.. toctree::' )
indexFile.write( '\n :maxdepth: %d\n' % depth )
def writeCodeDocumentationIndexRST( codeDocumentationPath, diracPackages ):
'''
'''
indexPath = os.path.join( codeDocumentationPath, 'index.rst' )
with open( indexPath, 'w' ) as index:
index.write( '.. _code_documentation:\n\n')
index.write( 'Code Documentation (|release|)\n' )
index.write( '------------------------------\n' )
writeIndexHeader( index, 'Systems', 1 )
for diracPackage in diracPackages:
if "System" in diracPackage:
index.write( '\n %s/index.rst\n' % diracPackage )
writeIndexHeader( index, 'Other', 1 )
for diracPackage in ['Interfaces','Core']:
index.write( '\n %s/index.rst\n' % diracPackage )
def writePackageDocumentation( tmpDir, codeDocumentationPath, diracPackage ):
packageDir = os.path.join( codeDocumentationPath, diracPackage )
try:
os.mkdir( packageDir )
except OSError:
sys.exit( 'Cannot create %s' % packageDir )
modulePackages = getPackageModules( diracPackage )
indexPath = os.path.join( packageDir, 'index.rst' )
with open( indexPath, 'w' ) as index:
titlePackage = diracPackage
if diracPackage == "Core":
titlePackage = "Utilities"
elif diracPackage == "Interfaces":
titlePackage = "API"
writeIndexHeader( index, titlePackage )
for modulePackage in modulePackages:
if not modulePackage in DOCMODULES:
continue
index.write( '\n\n %s/index.rst' % modulePackage )
packageModPath = os.path.join( packageDir, modulePackage )
try:
os.mkdir( packageModPath )
except OSError:
sys.exit( 'Cannot create %s' % packageModPath )
packModPackages = getPackageModules( '%s.%s' % ( diracPackage, modulePackage ) )
packageModPathIndex = os.path.join( packageModPath, 'index.rst' )
with open( packageModPathIndex, 'w' ) as packModFile:
writeIndexHeader( packModFile, modulePackage )
for packModPackage in packModPackages:
if 'lfc_dfc_copy' in packModPackage or "lfc_dfc_db_copy" in packModPackage:
continue
if 'CLI' in packModPackage:
continue
route = 'DIRAC/%s/%s/%s.py' % ( diracPackage, modulePackage, packModPackage )
route2 = tmpDir + '/../../' + route
if not os.path.isfile( route2 ):
if not packModPackage in ['Helpers']:
continue
packModFile.write( '\n\n %s/index.rst' % packModPackage )
dir2 = 'DIRAC/%s/%s/%s' % ( diracPackage, modulePackage, packModPackage )
subModPackages = getPackageModules( '%s.%s.%s' % ( diracPackage, modulePackage, packModPackage ) )
subModPath = os.path.join( packageModPath, packModPackage )
subModPathIndex = os.path.join( subModPath, 'index.rst' )
os.mkdir( subModPath )
with open( subModPathIndex, 'w' ) as subModFile:
writeIndexHeader( subModFile, packModPackage )
for subModPackage in subModPackages:
subModFile.write( '\n\n %s' % subModPackage )
subModPackagePath = os.path.join( subModPath, '%s.rst' % subModPackage )
f = open( subModPackagePath, 'w' )
f.write( '=' * len( subModPackage ) )
f.write( '\n%s\n' % subModPackage )
f.write( '=' * len( subModPackage ) )
f.write( '\n' )
f.write( '\n.. automodule:: DIRAC.%s.%s.%s.%s' % ( diracPackage, modulePackage, packModPackage, subModPackage ) )
f.write( '\n :members:' )
f.close()
else:
packModFile.write( '\n\n %s' % packModPackage )
packModPackagePath = os.path.join( packageModPath, '%s.rst' % packModPackage )
f = open( packModPackagePath, 'w' )
f.write( '=' * len( packModPackage ) )
f.write( '\n%s\n' % packModPackage )
f.write( '=' * len( packModPackage ) )
f.write( '\n' )
f.write( '\n.. automodule:: DIRAC.%s.%s.%s' % ( diracPackage, modulePackage, packModPackage ) )
f.write( '\n :members:' )
f.close()
#...............................................................................
# run
def run( diracVersion, tmpDir = None ):
if tmpDir is None:
tmpDir = getTmpDir()
diracPackages = getDIRACPackages()
codeDocumentationPath = getCodeDocumentationPath()
writeCodeDocumentationIndexRST( codeDocumentationPath, diracPackages )
for diracPackage in diracPackages:
writePackageDocumentation( tmpDir, codeDocumentationPath, diracPackage )
#...............................................................................
# main
if __name__ == "__main__":
try:
tmpdir = sys.argv[ 1 ]
except IndexError:
tmpdir = None
try:
diracVersion = sys.argv[ 2 ]
except IndexError:
diracVersion = 'integration'
run( diracVersion, tmpdir )
#...............................................................................
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
andresailer/DIRAC
|
docs/Tools/buildCodeDOC.py
|
Python
|
gpl-3.0
| 7,191
|
[
"DIRAC"
] |
40e5e0a587dc585c924e4bc6d880f32cbfd6e813650affbf7a6e6c2204243952
|
# -*- coding: utf-8 -*-
"""
[Python 2.7 (Mayavi is not yet compatible with Python 3+)]
Created on Wed Dec 16 22:44:15 2015
@author: Ryan Stauffer
https://github.com/ryanpstauffer/market-vis
[This module referenced http://www.theodor.io/scraping-google-finance-data-using-pandas/]
Market Visualization Prototype
Quotes Module
"""
from datetime import datetime, date
import pandas as pd
import json
import urllib
import urllib2
import os
def getIntradayData(ticker, interval_seconds=61, num_days=10):
# Specify URL string based on function inputs.
urlString = 'http://www.google.com/finance/getprices?q={0}'.format(ticker.upper())
urlString += "&i={0}&p={1}d&f=d,c".format(interval_seconds,num_days)
# Request the text, and split by each line
r = urllib2.urlopen(urllib2.Request(urlString)).read()
r = r.splitlines()
# Split each line by a comma, starting at the 8th line
r = [line.split(',') for line in r[7:]]
# Save data in Pandas DataFrame
df = pd.DataFrame(r, columns=['Datetime',ticker])
# Convert UNIX to Datetime format
df['Datetime'] = df['Datetime'].apply(lambda x: datetime.fromtimestamp(int(x[1:])))
df.index = df['Datetime']
return df[ticker]
def getDailyData(ticker, startDate, endDate=date.today()):
''' Daily quotes from Google Finance API. Date format='yyyy-mm-dd' '''
ticker = ticker.upper()
urlString = "http://www.google.com/finance/historical?q={0}".format(ticker)
urlString += "&startdate={0}&enddate={1}&output=csv".format(
startDate.strftime('%b %d, %Y'),endDate.strftime('%b %d, %Y'))
#Convert URL output to dataframe
df = pd.read_csv(urllib.urlopen(urlString))
# Convert strings to Datetime format
df[df.columns[0]] = df[df.columns[0]].apply(lambda x: datetime.strptime(x, '%d-%b-%y'))
#Index by date
df.index = df[df.columns[0]]
df.drop(df.columns[0], axis=1, inplace=True)
return df
def getLastPrice(ticker):
'''Returns last price and date time of a given ticker (from Google Finance API)'''
# Specify URL string based on function inputs.
urlString = 'http://www.google.com/finance/info?client=ig&q={0}'.format(ticker.upper())
# Request the text, and split by each line
r = urllib2.urlopen(urllib2.Request(urlString)).read()
obj = json.loads(r[3:])
print(obj)
price = float(obj[0]['l'])
return price
def buildDailyPriceData(tickerList, startDate, endDate):
print('Pulling Market Data for S&P 500 from {0} to {1}'.format(startDate.strftime('%Y%m%d'), endDate.strftime('%Y%m%d')))
#Build SP500 daily price data (for saving)
firstTicker = tickerList[0]
print(firstTicker)
firstTickerData = getDailyData(firstTicker, startDate, endDate)
firstTickerData.rename(columns={'Close' : firstTicker}, inplace = True)
df = firstTickerData[firstTicker]
for ticker in tickerList[1:]:
print(ticker)
newTicker = getDailyData(ticker, startDate, endDate)
if not newTicker.empty:
newTicker.rename(columns={'Close' : ticker}, inplace = True)
df = pd.concat([df, newTicker[ticker]], axis=1, join='outer')
#Google returns data w/ most recent at the top, this puts data in chrono order
stockPrices = df.sort_index()
print('Pulled data for {0} stocks from {1} to {2}'.format(len(stockPrices.columns), startDate.strftime('%Y%m%d'), endDate.strftime('%Y%m%d')))
return stockPrices
def buildDummyData():
'''Builds Daily Price Data from a backup .csv file
Used for offline testing purposes
'''
#Select Dates
startDate = datetime.strptime('20120101', '%Y%m%d')
endDate = datetime.strptime('20130101', '%Y%m%d')
#Load dataset from .csv
print("Pulling Market Data from .csv")
dataLoc = os.path.join(os.path.dirname(__file__),"Resources/SP500_daily_price_data.csv")
df = pd.read_csv(dataLoc)
#Convert strings to Datetime format
df[df.columns[0]] = df[df.columns[0]].apply(lambda x: datetime.strptime(x, '%Y-%m-%d'))
df.index = df[df.columns[0]]
df.drop(df.columns[0], axis=1, inplace=True)
#Build Price Table
stockPrices = df[startDate:endDate]
print('Pulled data for {0} stocks from {1} to {2}'.format(len(stockPrices.columns), startDate.strftime('%Y%m%d'), endDate.strftime('%Y%m%d')))
return stockPrices
def createIndexedPricing(stockPrices, startingIndexValue):
'''Takes a stock prices tables and converts to indexed pricing
(i.e. all prices are relative based on a common starting index value)
Inputs:
stockPrices => a panda DataFrame
startingIndexValue => the value that all prices will start at
'''
#Build Returns Table
stockReturns = stockPrices.pct_change(1)
#Build Indexed Price Table (indexed to 100)
indexedPrices = stockReturns + 1
indexedPrices.iloc[0] = startingIndexValue
indexedPrices = indexedPrices.cumprod(axis=0)
return indexedPrices
|
ryanpstauffer/market-vis
|
marketvis/quotes.py
|
Python
|
mit
| 5,030
|
[
"Mayavi"
] |
cb7af044733bc4cddb91d309248eb034c00cac6102a6e52af23b0b9dbe952aef
|
# Copyright 2012 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of ngs_crumbs.
# ngs_crumbs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# ngs_crumbs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with ngs_crumbs. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess
from operator import itemgetter
from tempfile import NamedTemporaryFile
from crumbs.utils.optional_modules import Seq, AlignmentFile
from crumbs.utils.tags import (TRIMMING_RECOMMENDATIONS, QUALITY, OTHER,
VECTOR, TRIMMING_KINDS, SEQS_PASSED,
ORPHAN_SEQS)
from crumbs.seq.utils.seq_utils import get_uppercase_segments
from crumbs.seq.seq import (copy_seq, get_str_seq, get_annotations, get_length,
slice_seq, get_int_qualities, get_name)
from crumbs.utils.segments_utils import (get_longest_segment, get_all_segments,
get_longest_complementary_segment,
merge_overlaping_segments)
from crumbs.utils.tags import SEQRECORD
from crumbs.iterutils import rolling_window
from crumbs.blast import BlasterForFewSubjects
from crumbs.seq.seqio import write_seqs
from crumbs.seq.pairs import group_pairs_by_name, group_pairs
from crumbs.settings import get_setting
from crumbs.seq.mate_chimeras import (_split_mates, _get_primary_alignment,
_read_is_totally_mapped, _get_qstart,
_get_qend, _5end_mapped,
_group_alignments_reads_by_qname)
from crumbs.mapping import (alignedread_to_seqitem, map_with_bwamem,
map_process_to_sortedbam)
# pylint: disable=R0903
def seq_to_trim_packets(seq_packets, group_paired_reads=False):
'It yields packets suitable for the filters'
for packet in seq_packets:
if group_paired_reads:
packet = list(group_pairs_by_name(packet))
else:
packet = list(group_pairs(packet, n_seqs_in_pair=1))
yield {SEQS_PASSED: packet, ORPHAN_SEQS: []}
class _BaseTrim(object):
'Base Trim class'
def __call__(self, trim_packet):
'It trims the seqs'
self._pre_trim(trim_packet)
trimmed_seqs = []
for paired_seqs in trim_packet[SEQS_PASSED]:
trimmed_seqs.append([self._do_trim(s) for s in paired_seqs])
self._post_trim()
return {SEQS_PASSED: trimmed_seqs,
ORPHAN_SEQS: trim_packet[ORPHAN_SEQS]}
def _do_trim(self, seq):
raise NotImplementedError()
def _pre_trim(self, trim_packet):
pass
def _post_trim(self):
pass
class TrimLowercasedLetters(_BaseTrim):
'It trims the masked segments of the seqrecords.'
def _do_trim(self, seq):
str_seq = get_str_seq(seq)
unmasked_segments = get_uppercase_segments(str_seq)
segment = get_longest_segment(unmasked_segments)
if segment is not None:
segments = []
if segment[0] != 0:
segments.append((0, segment[0] - 1))
len_seq = len(str_seq)
if segment[1] != len_seq - 1:
segments.append((segment[1] + 1, len_seq - 1))
_add_trim_segments(segments, seq, kind=OTHER)
else:
segments = [(0, len(seq))]
_add_trim_segments(segments, seq, kind=OTHER)
return seq
def _add_trim_segments(segments, sequence, kind):
'It adds segments to the trimming recommendation in the annotation'
assert kind in TRIMMING_KINDS
if not segments:
return
annotations = sequence.object.annotations
if TRIMMING_RECOMMENDATIONS not in annotations:
annotations[TRIMMING_RECOMMENDATIONS] = {}
for trim_kind in TRIMMING_KINDS:
annotations[TRIMMING_RECOMMENDATIONS][trim_kind] = []
trim_rec = annotations[TRIMMING_RECOMMENDATIONS]
trim_rec[kind].extend(segments)
class TrimEdges(_BaseTrim):
'It adds a trimming recommendation a fixed number of bases from the seqs.'
def __init__(self, left=0, right=0):
'''The initiator.
left - number of bases to trim from the left side
right - number of bases to trim from the right side
mask - If True the edges will be masked instead of trimmed
'''
self.left = left
self.right = right
super(TrimEdges, self).__init__()
def _do_trim(self, seq):
'It trims the edges of the given seqs.'
left = self.left
right = self.right
segments = [(0, left - 1)] if left else []
if right:
seq_len = get_length(seq)
segments.append((seq_len - right, seq_len - 1))
_add_trim_segments(segments, seq, kind=OTHER)
return seq
def _mask_sequence(seq, segments):
'It masks the given segments of the sequence'
if not segments:
return seq
segments = merge_overlaping_segments(segments)
segments = get_all_segments(segments, get_length(seq))
str_seq = get_str_seq(seq)
new_seq = ''
for segment in segments:
start = segment[0][0]
end = segment[0][1] + 1
str_seq_ = str_seq[start:end]
if segment[1]:
str_seq_ = str_seq_.lower()
new_seq += str_seq_
if seq.kind == SEQRECORD:
new_seq = Seq(new_seq, alphabet=seq.object.seq.alphabet)
return copy_seq(seq, seq=new_seq)
class TrimOrMask(object):
'It trims and masks the Seq following the trimming recommendations.'
def __init__(self, mask=False):
'''The initiator.'''
self.mask = mask
def __call__(self, trim_packet):
'It trims the seqs'
trimmed_seqs = []
orphan_seqs = trim_packet[ORPHAN_SEQS]
for paired_seqs in trim_packet[SEQS_PASSED]:
trimmed_paired_seqs = [self._do_trim(s) for s in paired_seqs]
# all sequences are trimed, no lost
if None not in trimmed_paired_seqs:
trimmed_seqs.append(trimmed_paired_seqs)
# all secuences are lost because of trimming
elif (len(trimmed_paired_seqs) == 1 or
trimmed_paired_seqs == (None, None)):
continue
# one of the pairs is lost in trimming
else:
orphans = [s for s in trimmed_paired_seqs if s is not None]
orphan_seqs.extend(orphans)
orphan_seqs = self._trim_orphans(orphan_seqs)
return {SEQS_PASSED: trimmed_seqs, ORPHAN_SEQS: orphan_seqs}
def _do_trim(self, seq):
'It trims the edges of the given seqs.'
annots = get_annotations(seq)
if not TRIMMING_RECOMMENDATIONS in annots:
return seq
trim_rec = annots[TRIMMING_RECOMMENDATIONS]
# fixing the trimming recommendations
if TRIMMING_RECOMMENDATIONS in annots:
del annots[TRIMMING_RECOMMENDATIONS]
trim_segments = []
for trim_kind in TRIMMING_KINDS:
trim_segments.extend(trim_rec.get(trim_kind, []))
# masking
if self.mask:
seq = _mask_sequence(seq, trim_segments)
else:
# trimming
if trim_segments:
trim_limits = get_longest_complementary_segment(
trim_segments, get_length(seq))
if trim_limits is None:
# there's no sequence left
return None
else:
trim_limits = []
if trim_limits:
seq = slice_seq(seq, trim_limits[0], trim_limits[1] + 1)
return seq
def _trim_orphans(self, seqs):
new_seqs = []
for seq in seqs:
seq = self._do_trim(seq)
if seq is not None:
new_seqs.append(seq)
return new_seqs
def _get_bad_quality_segments(quals, window, threshold, trim_left=True,
trim_right=True):
'''It returns the regions with quality above the threshold.
The algorithm is similar to the one used by qclip in Staden.
'''
# do window quality means
mean = lambda l: float(sum(l)) / len(l) if len(l) > 0 else float('nan')
wquals = [mean(win_quals) for win_quals in rolling_window(quals, window)]
if not wquals:
return [(0, len(quals) - 1)]
index_max, max_val = max(enumerate(wquals), key=itemgetter(1))
if max_val < threshold:
return [(0, len(quals) - 1)]
if trim_left:
wleft_index = 0
for wleft_index in range(index_max - 1, -1, -1):
if wquals[wleft_index] < threshold:
wleft_index += 1
break
else:
wleft_index = 0
if trim_right:
wright_index = index_max
for wright_index in range(index_max, len(wquals)):
if wquals[wright_index] < threshold:
wright_index -= 1
break
else:
wright_index = len(wquals) - 1
left = wleft_index
right = wright_index + window - 1
segments = []
if left:
segments.append((0, left - 1))
if right < len(quals) - 1:
segments.append((right + 1, len(quals) - 1))
if not segments:
return None
return segments
class TrimByQuality(_BaseTrim):
'It trims the low quality regions of the SeqRecords.'
def __init__(self, window, threshold, trim_left=True, trim_right=True):
'The initiator'
self.window = int(window)
self.threshold = threshold
self.trim_left = trim_left
self.trim_right = trim_right
super(TrimByQuality, self).__init__()
def _do_trim(self, seq):
'It trims the masked segments of the seqrecords.'
window = self.window
threshold = self.threshold
trim_left = self.trim_left
trim_right = self.trim_right
try:
quals = list(get_int_qualities(seq))
except KeyError:
msg = 'Some of the input sequences do not have qualities: {}'
msg = msg.format(get_name(seq))
segments = _get_bad_quality_segments(quals, window, threshold,
trim_left, trim_right)
if segments is not None:
_add_trim_segments(segments, seq, kind=QUALITY)
return seq
class TrimWithBlastShort(_BaseTrim):
'It trims adaptors with the blast short algorithm'
def __init__(self, oligos):
'The initiator'
self.oligos = oligos
super(TrimWithBlastShort, self).__init__()
def _pre_trim(self, trim_packet):
seqs = [s for seqs in trim_packet[SEQS_PASSED]for s in seqs]
db_fhand = write_seqs(seqs, file_format='fasta')
db_fhand.flush()
params = {'task': 'blastn-short', 'expect': '0.0001'}
filters = [{'kind': 'score_threshold', 'score_key': 'identity',
'min_score': 87},
{'kind': 'min_length', 'min_num_residues': 13,
'length_in_query': False}]
self._matcher = BlasterForFewSubjects(db_fhand.name, self.oligos,
program='blastn', filters=filters,
params=params,
elongate_for_global=True)
def _do_trim(self, seq):
'It trims the masked segments of the SeqWrappers.'
segments = self._matcher.get_matched_segments_for_read(get_name(seq))
if segments is not None:
_add_trim_segments(segments[0], seq, kind=VECTOR)
return seq
def _get_longest_5end_alinged_read(aligned_reads, max_clipping):
longest_5end = None
length = 0
for aligned_read in aligned_reads:
if (_5end_mapped(aligned_read, max_clipping)
and aligned_read.alen > length):
longest_5end = aligned_read
length = aligned_read.alen
return longest_5end
class TrimMatePairChimeras(_BaseTrim):
'It trims chimeric regions in mate pairs reads'
def __init__(self, index_fpath, max_clipping=None, tempdir=None):
'The initiator'
self._tempdir = tempdir
self._index_fpath = index_fpath
if max_clipping is not None:
self.max_clipping = max_clipping
else:
self.max_clipping = get_setting('CHIMERAS_SETTINGS')['MAX_CLIPPING']
def _pre_trim(self, trim_packet):
seqs = [s for seqs in trim_packet[SEQS_PASSED]for s in seqs]
reads_fhand = NamedTemporaryFile(dir=self._tempdir, suffix='.trimming')
write_seqs(seqs, reads_fhand)
reads_fhand.flush()
bwa = map_with_bwamem(self._index_fpath,
interleave_fpath=reads_fhand.name)
bam_fhand = NamedTemporaryFile(dir=self._tempdir)
map_process_to_sortedbam(bwa, bam_fhand.name, key='queryname',
tempdir=self._tempdir)
self._bam_fhand = bam_fhand
reads_fhand.close()
def _do_trim(self, aligned_reads):
max_clipping = self.max_clipping
primary_alignment = _get_primary_alignment(aligned_reads)
_5end = _get_longest_5end_alinged_read(aligned_reads, max_clipping)
seq = alignedread_to_seqitem(primary_alignment)
segments = None
if _5end is not None:
if not _read_is_totally_mapped([_5end], max_clipping):
if not _5end.is_reverse:
qend = _get_qend(_5end)
else:
qend = get_length(seq) - _get_qstart(_5end)
segments = [(qend, get_length(seq) - 1)]
if segments is not None:
_add_trim_segments(segments, seq, kind=OTHER)
return seq
def __call__(self, trim_packet):
'It trims the seqs'
self._pre_trim(trim_packet)
trimmed_seqs = []
bamfile = AlignmentFile(self._bam_fhand.name)
for grouped_mates in _group_alignments_reads_by_qname(bamfile):
for aligned_reads in _split_mates(grouped_mates):
trimmed_seqs.append([self._do_trim(aligned_reads)])
self._post_trim()
return {SEQS_PASSED: trimmed_seqs,
ORPHAN_SEQS: trim_packet[ORPHAN_SEQS]}
def _post_trim(self):
self._bam_fhand.close()
class TrimNexteraAdapters(_BaseTrim):
"It trims from Nextera adaptors found with blast short algorithm to 3'end"
"If adapter is at one end and it is not complete, it trims more bases"
def __init__(self, oligos):
'The initiator'
self.oligos = oligos
super(TrimNexteraAdapters, self).__init__()
def _pre_trim(self, trim_packet):
seqs = [s for seqs in trim_packet[SEQS_PASSED]for s in seqs]
db_fhand = write_seqs(seqs, file_format='fasta')
db_fhand.flush()
params = {'task': 'blastn-short', 'expect': '0.0001'}
filters = [{'kind': 'score_threshold', 'score_key': 'identity',
'min_score': 87},
{'kind': 'min_length', 'min_num_residues': 13,
'length_in_query': False}]
self._matcher = BlasterForFewSubjects(db_fhand.name, self.oligos,
program='blastn', filters=filters,
params=params,
elongate_for_global=True)
def _do_trim(self, seq):
'It trims the masked segments of the SeqWrappers.'
segments = self._matcher.get_matched_segments_for_read(get_name(seq))
if segments is not None:
segments = [(segment[0], get_length(seq) - 1) for segment in segments[0]]
_add_trim_segments(segments, seq, kind=OTHER)
return seq
CUTADAPT = 'cutadapt'
#cutadapt bin should be included somewhere else
_5END = '5end'
_3END = '3end'
ANYWHERE = 'anywhere'
def trim_with_cutadapt(in_fpath, out_fpath, oligos, error_rate=None,
summary_fpath=None):
#TODO: include cutadapt in the code or remove this function
# This functionallity is not throughly tested
options = {_3END: '-a', ANYWHERE: '-b', _5END: '-g'}
cmd = [CUTADAPT, in_fpath, '-o', out_fpath]
for kind, oligo_seqs in oligos.items():
for oligo_seq in oligo_seqs:
cmd.extend([options[kind], oligo_seq])
if error_rate is not None:
cmd.append(str(error_rate))
if summary_fpath is None:
summary_fhand = sys.stdout
else:
summary_fhand = open(summary_fpath, 'w')
cutadapt = subprocess.Popen(cmd, stdout=summary_fhand)
cutadapt.wait()
|
JoseBlanca/ngs_crumbs
|
crumbs/seq/trim.py
|
Python
|
gpl-3.0
| 17,144
|
[
"BLAST",
"BWA"
] |
c1ae050bc1ac2dd52b5594f407a16f583d6c706a72d0f4eca4d4539e06be8c9f
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Common functions for auxiliary reading --- :mod:`MDAnalysis.auxiliary.core`
===========================================================================
.. autofunction:: get_auxreader_for
.. autofunction:: auxreader
"""
from __future__ import absolute_import
from six import string_types
from . import _AUXREADERS
from ..lib import util
def get_auxreader_for(auxdata=None, format=None):
"""Return the appropriate auxiliary reader class for *auxdata*/*format*.
If *format* is provided, will attempt to find an AuxReader corresponding
to that format. If *auxdata* is provided, the format will first be guessed.
Parameters
----------
auxdata
(Optional) The auxiliary data (e.g. filename of file containing
auxiliary data).
format
(Optional). Known format of *auxdata*.
Returns
-------
:class:`~MDAnalysis.auxiliary.base.AuxReader`
AuxReader class corresponding to the supplied/guessed format.
Raises
------
ValueError
If an AuxReader for the format (provided or guessed from *auxdata*)
cannot be found.
"""
if not auxdata and not format:
raise ValueError('Must provide either auxdata or format')
if format is None:
if isinstance(auxdata, string_types):
## assume it's a filename?
format = util.guess_format(auxdata)
else:
## TBA if add non-file-format readers
pass
format = format.upper()
try:
return _AUXREADERS[format]
except KeyError:
raise ValueError("Unknown auxiliary data format for auxdata: "
"{0}".format(auxdata))
else:
try:
return _AUXREADERS[format]
except KeyError:
raise ValueError("Unknown auxiliary data format {0}".format(format))
def auxreader(auxdata, format=None, **kwargs):
""" Return an auxiliary reader instance for *auxdata*.
An appropriate reader class is first obtained using
:func:`get_auxreader_for`, and an auxiliary reader instance for *auxdata*
then created and returned.
Parameters
----------
auxdata
Auxiliary data (e.g. filename of file containing auxiliary data).
format
(Optional). The format of *auxdata*, if known.
**kwargs
Additional AuxReader options.
Returns
-------
:class:`~MDAnalysis.auxiliary.base.AuxReader` instance
Appropriate auxiliary reader instance for *auxdata*.
"""
reader = get_auxreader_for(auxdata, format=format)
return reader(auxdata, **kwargs)
|
kain88-de/mdanalysis
|
package/MDAnalysis/auxiliary/core.py
|
Python
|
gpl-2.0
| 3,638
|
[
"MDAnalysis"
] |
a7bfa52c3984277e3f3c67290cf6b2592414e44a099356b8ec202d40be437e45
|
#!/usr/bin/env python
"""CSP Commstime benchmark.
See F.R.M. Barnes (2006) Compiling CSP. In Proceedings of
Communicating Process Architectures 2006.
Code adapted from PyCSP by John Markus Bjorndalen, available:
http://www.cs.uit.no/~johnm/code/PyCSP/
PyCSP - Communicating Sequential Processes for Python. John Markus
Bjorndalen, Brian Vinter, Otto Anshus. CPA 2007, Surrey, UK, July
8-11, 2007. IOS Press 2007, ISBN 978-1-58603-767-3, Concurrent
Systems Engineering Series (ISSN 1383-7575).
"""
from csp.csp import *
from csp.builtins import Prefix, Delta2, Succ
import os
@process
def Consumer(cin):
"""Commstime consumer process
readset = cin
writeset =
"""
N = 5000
ts = time.time
t1 = ts()
cin.read()
t1 = ts()
for i in range(N):
cin.read()
t2 = ts()
dt = t2-t1
tchan = dt / (4 * N)
print("DT = {0}.\nTime per ch : {1}/(4*{2}) = {3} s = {4} us".format(dt, dt, N, tchan, tchan * 1000000))
print("consumer done, posioning channel")
cin.poison()
def CommsTimeBM():
print('Creating channels now...')
# Create channels
a = Channel()
b = Channel()
c = Channel()
d = Channel()
print("Running commstime test")
Par(Prefix(c, a, prefix_item = 0), # Initiator
Delta2(a, b, d), # Forwarding to two
Succ(b, c), # Feeding back to prefix
Consumer(d)).start() # Timing process
print('Finished run...')
if __name__ == '__main__':
N_BM = 10
for i in range(N_BM):
print("----------- run {0}/{1} -------------".format(i+1, N_BM))
CommsTimeBM()
print("------- Commstime finished ---------")
|
bjlittle/python-csp
|
benchmark/commstime/commstime.py
|
Python
|
gpl-2.0
| 1,672
|
[
"Brian"
] |
6ccacc3e155d4c4c7b9e1ff1b034f5f026dc1d60a58cdd702147a400a5918817
|
import numpy as np
from scipy import signal
def find_sideband(ft_data, which=+1, copy=True):
"""Find the side band position of a hologram
The hologram is Fourier-transformed and the side band
is determined by finding the maximum amplitude in
Fourier space.
Parameters
----------
ft_data: 2d ndarray
FFt-shifted Fourier transform of the hologram image
which: +1 or -1
which sideband to search for:
- +1: upper half
- -1: lower half
copy: bool
copy `ft_data` before modification
Returns
-------
fsx, fsy : tuple of floats
coordinates of the side band in Fourier space frequencies
"""
if copy:
ft_data = ft_data.copy()
if which not in [+1, -1]:
raise ValueError("`which` must be +1 or -1!")
ox, oy = ft_data.shape
cx = ox // 2
cy = oy // 2
minlo = max(int(np.ceil(ox / 42)), 5)
if which == +1:
# remove lower part
ft_data[cx - minlo:] = 0
else:
ft_data[:cx + minlo] = 0
# remove values around axes
ft_data[cx - 3:cx + 3, :] = 0
ft_data[:, cy - 3:cy + 3] = 0
# find maximum
am = np.argmax(np.abs(ft_data))
iy = am % oy
ix = int((am - iy) / oy)
fx = np.fft.fftshift(np.fft.fftfreq(ft_data.shape[0]))[ix]
fy = np.fft.fftshift(np.fft.fftfreq(ft_data.shape[1]))[iy]
return fx, fy
def fourier2dpad(data, zero_pad=True):
"""Compute the FFT-shifted 2D Fourier transform with zero padding
Parameters
----------
data: 2d fload ndarray
real-valued image data
zero_pad: bool
perform zero-padding to next order of 2
"""
if zero_pad:
# zero padding size is next order of 2
(N, M) = data.shape
order = int(max(64., 2**np.ceil(np.log(2 * max(N, M)) / np.log(2))))
# this is faster than np.pad
datapad = np.zeros((order, order), dtype=float)
datapad[:data.shape[0], :data.shape[1]] = data
else:
datapad = data
# Fourier transform
fft = np.fft.fftshift(np.fft.fft2(datapad))
return fft
def get_field(hologram, sideband=+1, filter_name="disk", filter_size=1/3,
filter_size_interpretation="sideband distance",
subtract_mean=True, zero_pad=True, copy=True, ret_mask=False):
"""Compute the complex field from a hologram using Fourier analysis
Parameters
----------
hologram: real-valued 2d ndarray
hologram data (if this is a 3d array, then the first slice
defined by the first two axes is used)
sideband: +1, -1, or tuple of (float, float)
specifies the location of the sideband:
- +1: sideband in the upper half in Fourier space,
exact location is found automatically
- -1: sideband in the lower half in Fourier space,
exact location is found automatically
- (float, float): sideband coordinates in
frequencies in interval [1/"axes size", .5]
filter_name: str
specifies the filter to use, one of
- "disk": binary disk with radius `filter_size`
- "smooth disk": disk with radius `filter_size` convolved
with a radial gaussian (`sigma=filter_size/5`)
- "gauss": radial gaussian (`sigma=0.6*filter_size`)
- "square": binary square with side length `filter_size`
- "smooth square": square with side length `filter_size`
convolved with square gaussian (`sigma=filter_size/5`)
- "tukey": a square tukey window of width `2*filter_size` and
`alpha=0.1`
filter_size: float
Size of the filter in Fourier space. The interpretation
of this value depends on `filter_size_interpretation`.
See `filter_name` for how it is used in filtering.
filter_size_interpretation: str
If set to "sideband distance", the filter size is interpreted
as the relative distance between central band and sideband
(this is the default). If set to "frequency index", the filter
size is interpreted as a Fourier frequency index ("pixel size")
and must be between 0 and `max(hologram.shape)/2`.
subtract_mean: bool
If True, remove the mean of the hologram before performing
the Fourier transform. This setting is recommended as it
can reduce artifacts from frequencies around the central
band.
zero_pad: bool
Perform zero-padding before applying the FFT. Setting
`zero_pad` to `False` increases speed but might
introduce image distortions such as tilts in the phase
and amplitude data or dark borders in the amplitude data.
copy: bool
If set to True, input `data` is not edited.
ret_mask: bool
If set to True, return the filter mask used.
Notes
-----
If the input image has three axes, then only the first image
(defined by the first two axes) is taken (ignore alpha or
RGB channels).
The input image is zero-padded as a square image to the next
order of :math:`2^n`.
Even though the size of the "gauss" filter approximately matches
the frequencies of the "disk" filter, it takes into account
higher frequencies as well and thus suppresses ringing artifacts
for data that contain jumps in the phase image.
"""
if len(hologram.shape) == 3:
# take the first slice (we have alpha or RGB information)
hologram = hologram[:, :, 0]
if copy:
hologram = hologram.astype(dtype=float, copy=True)
if subtract_mean:
# remove contributions of the central band
# (this affects more than one pixel in the FFT
# because of zero-padding)
if issubclass(hologram.dtype.type, np.integer):
hologram = hologram.astype(float)
hologram -= hologram.mean()
# Fourier transform
fft = fourier2dpad(hologram, zero_pad=zero_pad)
if sideband in [+1, -1]:
fsx, fsy = find_sideband(fft, which=sideband)
else:
fsx, fsy = sideband
# roll fft such that sideband is located at the image center
shifted = np.roll(np.roll(fft, -int(fsx * fft.shape[0]), axis=0),
-int(fsy * fft.shape[1]), axis=1)
# coordinates in Fourier space
assert fft.shape[0] == fft.shape[1] # square-shaped Fourier domain
fx = np.fft.fftshift(np.fft.fftfreq(fft.shape[0])).reshape(-1, 1)
fy = fx.reshape(1, -1)
if filter_size_interpretation == "sideband distance":
# filter size based on distance b/w central band and sideband
if filter_size <= 0 or filter_size >= 1:
raise ValueError("For sideband distance interpretation, "
"`filter_size` must be between 0 and 1; "
f"got '{filter_size}'!")
fsize = np.sqrt(fsx**2 + fsy**2) * filter_size
elif filter_size_interpretation == "frequency index":
# filter size given in Fourier index (number of Fourier pixels)
if filter_size <= 0 or filter_size >= max(hologram.shape)/2:
raise ValueError("For frequency index interpretation, "
"`filter_size` must be between 0 and "
f"max(hologram.shape)/2; got '{filter_size}'!")
# convert to frequencies (compatible with fx and fy)
fsize = filter_size / fft.shape[0]
else:
raise ValueError("Invalid value for `filter_size_interpretation`: "
+ f"'{filter_size_interpretation}'")
if filter_name == "disk":
afilter = (fx**2 + fy**2) <= fsize**2
elif filter_name == "smooth disk":
sigma = fsize / 5
tau = 2 * sigma**2
radsq = fx**2 + fy**2
disk = radsq <= fsize**2
gauss = np.exp(-radsq / tau)
afilter = signal.convolve(gauss, disk, mode="same")
afilter /= afilter.max()
elif filter_name == "gauss":
sigma = fsize * .6
tau = 2 * sigma**2
afilter = np.exp(-(fx**2 + fy**2) / tau)
afilter /= afilter.max()
elif filter_name == "square":
afilter = (np.abs(fx) <= fsize) * (np.abs(fy) <= fsize)
elif filter_name == "smooth square":
blur = fsize / 5
tau = 2 * blur**2
square = (np.abs(fx) < fsize) * (np.abs(fy) < fsize)
gauss = np.exp(-(fy**2) / tau) * np.exp(-(fy**2) / tau)
afilter = signal.convolve(square, gauss, mode="same")
afilter /= afilter.max()
elif filter_name == "tukey":
alpha = 0.1
rsize = int(min(fx.size, fy.size)*fsize) * 2
tukey_window_x = signal.tukey(rsize, alpha=alpha).reshape(-1, 1)
tukey_window_y = signal.tukey(rsize, alpha=alpha).reshape(1, -1)
tukey = tukey_window_x * tukey_window_y
afilter = np.zeros(shifted.shape)
s1 = (np.array(shifted.shape) - rsize)//2
s2 = (np.array(shifted.shape) + rsize)//2
afilter[s1[0]:s2[0], s1[1]:s2[1]] = tukey
else:
raise ValueError("Unknown filter: {}".format(filter_name))
# apply filter
fft_filt = afilter * shifted
# inverse Fourier transform
field = np.fft.ifft2(np.fft.ifftshift(fft_filt))
# crop to original image shape
cropped = field[:hologram.shape[0], :hologram.shape[1]]
if ret_mask:
# shift mask center to sideband location
shifted_mask = np.roll(
np.roll(afilter, int(fsx * fft.shape[0]), axis=0),
int(fsy * fft.shape[1]), axis=1)
return cropped, shifted_mask
else:
return cropped
|
RI-imaging/qpimage
|
qpimage/holo.py
|
Python
|
mit
| 9,555
|
[
"Gaussian"
] |
350938947c1cc4e4f5707c1c3962459462305ca784510b97e792b9232d790f21
|
"""
Acceptance tests for Studio.
"""
from unittest import expectedFailure
from bok_choy.web_app_test import WebAppTest
from ..pages.studio.asset_index import AssetIndexPage
from ..pages.studio.auto_auth import AutoAuthPage
from ..pages.studio.checklists import ChecklistsPage
from ..pages.studio.course_import import ImportPage
from ..pages.studio.course_info import CourseUpdatesPage
from ..pages.studio.edit_tabs import StaticPagesPage
from ..pages.studio.export import ExportPage
from ..pages.studio.howitworks import HowitworksPage
from ..pages.studio.index import DashboardPage
from ..pages.studio.login import LoginPage
from ..pages.studio.manage_users import CourseTeamPage
from ..pages.studio.overview import CourseOutlinePage
from ..pages.studio.settings import SettingsPage
from ..pages.studio.settings_advanced import AdvancedSettingsPage
from ..pages.studio.settings_graders import GradingPage
from ..pages.studio.signup import SignupPage
from ..pages.studio.textbooks import TextbooksPage
from ..pages.xblock.acid import AcidView
from ..fixtures.course import CourseFixture, XBlockFixtureDesc
from .helpers import UniqueCourseTest
class LoggedOutTest(WebAppTest):
"""
Smoke test for pages in Studio that are visible when logged out.
"""
def setUp(self):
super(LoggedOutTest, self).setUp()
self.pages = [LoginPage(self.browser), HowitworksPage(self.browser), SignupPage(self.browser)]
def test_page_existence(self):
"""
Make sure that all the pages are accessible.
Rather than fire up the browser just to check each url,
do them all sequentially in this testcase.
"""
for page in self.pages:
page.visit()
class LoggedInPagesTest(WebAppTest):
"""
Tests that verify the pages in Studio that you can get to when logged
in and do not have a course yet.
"""
def setUp(self):
super(LoggedInPagesTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
def test_dashboard_no_courses(self):
"""
Make sure that you can get to the dashboard page without a course.
"""
self.auth_page.visit()
self.dashboard_page.visit()
class CoursePagesTest(UniqueCourseTest):
"""
Tests that verify the pages in Studio that you can get to when logged
in and have a course.
"""
COURSE_ID_SEPARATOR = "."
def setUp(self):
"""
Install a course with no content using a fixture.
"""
super(UniqueCourseTest, self).setUp()
CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
).install()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.pages = [
clz(self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run'])
for clz in [
AssetIndexPage, ChecklistsPage, ImportPage, CourseUpdatesPage,
StaticPagesPage, ExportPage, CourseTeamPage, CourseOutlinePage, SettingsPage,
AdvancedSettingsPage, GradingPage, TextbooksPage
]
]
def test_page_existence(self):
"""
Make sure that all these pages are accessible once you have a course.
Rather than fire up the browser just to check each url,
do them all sequentially in this testcase.
"""
# Log in
self.auth_page.visit()
# Verify that each page is available
for page in self.pages:
page.visit()
class XBlockAcidBase(WebAppTest):
"""
Base class for tests that verify that XBlock integration is working correctly
"""
__test__ = False
def setUp(self):
"""
Create a unique identifier for the course used in this test.
"""
# Ensure that the superclass sets up
super(XBlockAcidBase, self).setUp()
# Define a unique course identifier
self.course_info = {
'org': 'test_org',
'number': 'course_' + self.unique_id[:5],
'run': 'test_' + self.unique_id,
'display_name': 'Test Course ' + self.unique_id
}
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_id = '{org}.{number}.{run}'.format(**self.course_info)
self.setup_fixtures()
self.auth_page.visit()
def test_acid_block_preview(self):
"""
Verify that all expected acid block tests pass in studio preview
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.toggle_expand().unit('Test Unit').go_to()
container = unit.components[0].go_to_container()
acid_block = AcidView(self.browser, container.xblocks[0].preview_selector)
self.assertTrue(acid_block.init_fn_passed)
self.assertTrue(acid_block.child_tests_passed)
self.assertTrue(acid_block.resource_url_passed)
self.assertTrue(acid_block.scope_passed('user_state'))
self.assertTrue(acid_block.scope_passed('user_state_summary'))
self.assertTrue(acid_block.scope_passed('preferences'))
self.assertTrue(acid_block.scope_passed('user_info'))
# This will fail until we support editing on the container page
@expectedFailure
def test_acid_block_editor(self):
"""
Verify that all expected acid block tests pass in studio preview
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.toggle_expand().unit('Test Unit').go_to()
unit.edit_draft()
acid_block = AcidView(self.browser, unit.components[0].edit().editor_selector)
self.assertTrue(acid_block.init_fn_passed)
self.assertTrue(acid_block.child_tests_passed)
self.assertTrue(acid_block.resource_url_passed)
self.assertTrue(acid_block.scope_passed('content'))
self.assertTrue(acid_block.scope_passed('settings'))
class XBlockAcidNoChildTest(XBlockAcidBase):
"""
Tests of an AcidBlock with no children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid', 'Acid Block')
)
)
)
).install()
class XBlockAcidChildTest(XBlockAcidBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid', 'Acid Block').add_children(
XBlockFixtureDesc('acid', 'First Acid Child', metadata={'name': 'first'}),
XBlockFixtureDesc('acid', 'Second Acid Child', metadata={'name': 'second'}),
XBlockFixtureDesc('html', 'Html Child', data="<html>Contents</html>"),
)
)
)
)
).install()
# This will fail until we fix support of children in pure XBlocks
@expectedFailure
def test_acid_block_preview(self):
super(XBlockAcidChildTest, self).test_acid_block_preview()
# This will fail until we fix support of children in pure XBlocks
@expectedFailure
def test_acid_block_editor(self):
super(XBlockAcidChildTest, self).test_acid_block_editor()
|
XiaodunServerGroup/xiaodun-platform
|
common/test/acceptance/tests/test_studio.py
|
Python
|
agpl-3.0
| 8,703
|
[
"VisIt"
] |
7bdaf5a909fc59ca48355c993b6cc4babbc81a84679f35f3f88ff3b9fa4451f1
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements an XRD pattern calculator.
"""
import os
import json
from math import sin, cos, asin, pi, degrees, radians
import numpy as np
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from .core import DiffractionPattern, AbstractDiffractionPatternCalculator, \
get_unique_families
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "5/22/14"
# XRD wavelengths in angstroms
WAVELENGTHS = {
"CuKa": 1.54184,
"CuKa2": 1.54439,
"CuKa1": 1.54056,
"CuKb1": 1.39222,
"MoKa": 0.71073,
"MoKa2": 0.71359,
"MoKa1": 0.70930,
"MoKb1": 0.63229,
"CrKa": 2.29100,
"CrKa2": 2.29361,
"CrKa1": 2.28970,
"CrKb1": 2.08487,
"FeKa": 1.93735,
"FeKa2": 1.93998,
"FeKa1": 1.93604,
"FeKb1": 1.75661,
"CoKa": 1.79026,
"CoKa2": 1.79285,
"CoKa1": 1.78896,
"CoKb1": 1.63079,
"AgKa": 0.560885,
"AgKa2": 0.563813,
"AgKa1": 0.559421,
"AgKb1": 0.497082,
}
with open(os.path.join(os.path.dirname(__file__),
"atomic_scattering_params.json")) as f:
ATOMIC_SCATTERING_PARAMS = json.load(f)
class XRDCalculator(AbstractDiffractionPatternCalculator):
r"""
Computes the XRD pattern of a crystal structure.
This code is implemented by Shyue Ping Ong as part of UCSD's NANO106 -
Crystallography of Materials. The formalism for this code is based on
that given in Chapters 11 and 12 of Structure of Materials by Marc De
Graef and Michael E. McHenry. This takes into account the atomic
scattering factors and the Lorentz polarization factor, but not
the Debye-Waller (temperature) factor (for which data is typically not
available). Note that the multiplicity correction is not needed since
this code simply goes through all reciprocal points within the limiting
sphere, which includes all symmetrically equivalent facets. The algorithm
is as follows
1. Calculate reciprocal lattice of structure. Find all reciprocal points
within the limiting sphere given by :math:`\\frac{2}{\\lambda}`.
2. For each reciprocal point :math:`\\mathbf{g_{hkl}}` corresponding to
lattice plane :math:`(hkl)`, compute the Bragg condition
:math:`\\sin(\\theta) = \\frac{\\lambda}{2d_{hkl}}`
3. Compute the structure factor as the sum of the atomic scattering
factors. The atomic scattering factors are given by
.. math::
f(s) = Z - 41.78214 \\times s^2 \\times \\sum\\limits_{i=1}^n a_i \
\\exp(-b_is^2)
where :math:`s = \\frac{\\sin(\\theta)}{\\lambda}` and :math:`a_i`
and :math:`b_i` are the fitted parameters for each element. The
structure factor is then given by
.. math::
F_{hkl} = \\sum\\limits_{j=1}^N f_j \\exp(2\\pi i \\mathbf{g_{hkl}}
\\cdot \\mathbf{r})
4. The intensity is then given by the modulus square of the structure
factor.
.. math::
I_{hkl} = F_{hkl}F_{hkl}^*
5. Finally, the Lorentz polarization correction factor is applied. This
factor is given by:
.. math::
P(\\theta) = \\frac{1 + \\cos^2(2\\theta)}
{\\sin^2(\\theta)\\cos(\\theta)}
"""
# Tuple of available radiation keywords.
AVAILABLE_RADIATION = tuple(WAVELENGTHS.keys())
def __init__(self, wavelength="CuKa", symprec=0, debye_waller_factors=None):
"""
Initializes the XRD calculator with a given radiation.
Args:
wavelength (str/float): The wavelength can be specified as either a
float or a string. If it is a string, it must be one of the
supported definitions in the AVAILABLE_RADIATION class
variable, which provides useful commonly used wavelengths.
If it is a float, it is interpreted as a wavelength in
angstroms. Defaults to "CuKa", i.e, Cu K_alpha radiation.
symprec (float): Symmetry precision for structure refinement. If
set to 0, no refinement is done. Otherwise, refinement is
performed using spglib with provided precision.
debye_waller_factors ({element symbol: float}): Allows the
specification of Debye-Waller factors. Note that these
factors are temperature dependent.
"""
if isinstance(wavelength, float):
self.wavelength = wavelength
else:
self.radiation = wavelength
self.wavelength = WAVELENGTHS[wavelength]
self.symprec = symprec
self.debye_waller_factors = debye_waller_factors or {}
def get_pattern(self, structure, scaled=True, two_theta_range=(0, 90)):
"""
Calculates the diffraction pattern for a structure.
Args:
structure (Structure): Input structure
scaled (bool): Whether to return scaled intensities. The maximum
peak is set to a value of 100. Defaults to True. Use False if
you need the absolute values to combine XRD plots.
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
Returns:
(XRDPattern)
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
wavelength = self.wavelength
latt = structure.lattice
is_hex = latt.is_hexagonal()
# Obtained from Bragg condition. Note that reciprocal lattice
# vector length is 1 / d_hkl.
min_r, max_r = (0, 2 / wavelength) if two_theta_range is None else \
[2 * sin(radians(t / 2)) / wavelength for t in two_theta_range]
# Obtain crystallographic reciprocal lattice points within range
recip_latt = latt.reciprocal_lattice_crystallographic
recip_pts = recip_latt.get_points_in_sphere(
[[0, 0, 0]], [0, 0, 0], max_r)
if min_r:
recip_pts = [pt for pt in recip_pts if pt[1] >= min_r]
# Create a flattened array of zs, coeffs, fcoords and occus. This is
# used to perform vectorized computation of atomic scattering factors
# later. Note that these are not necessarily the same size as the
# structure as each partially occupied specie occupies its own
# position in the flattened array.
zs = []
coeffs = []
fcoords = []
occus = []
dwfactors = []
for site in structure:
for sp, occu in site.species.items():
zs.append(sp.Z)
try:
c = ATOMIC_SCATTERING_PARAMS[sp.symbol]
except KeyError:
raise ValueError("Unable to calculate XRD pattern as "
"there is no scattering coefficients for"
" %s." % sp.symbol)
coeffs.append(c)
dwfactors.append(self.debye_waller_factors.get(sp.symbol, 0))
fcoords.append(site.frac_coords)
occus.append(occu)
zs = np.array(zs)
coeffs = np.array(coeffs)
fcoords = np.array(fcoords)
occus = np.array(occus)
dwfactors = np.array(dwfactors)
peaks = {}
two_thetas = []
for hkl, g_hkl, ind, _ in sorted(
recip_pts, key=lambda i: (i[1], -i[0][0], -i[0][1], -i[0][2])):
# Force miller indices to be integers.
hkl = [int(round(i)) for i in hkl]
if g_hkl != 0:
d_hkl = 1 / g_hkl
# Bragg condition
theta = asin(wavelength * g_hkl / 2)
# s = sin(theta) / wavelength = 1 / 2d = |ghkl| / 2 (d =
# 1/|ghkl|)
s = g_hkl / 2
# Store s^2 since we are using it a few times.
s2 = s ** 2
# Vectorized computation of g.r for all fractional coords and
# hkl.
g_dot_r = np.dot(fcoords, np.transpose([hkl])).T[0]
# Highly vectorized computation of atomic scattering factors.
# Equivalent non-vectorized code is::
#
# for site in structure:
# el = site.specie
# coeff = ATOMIC_SCATTERING_PARAMS[el.symbol]
# fs = el.Z - 41.78214 * s2 * sum(
# [d[0] * exp(-d[1] * s2) for d in coeff])
fs = zs - 41.78214 * s2 * np.sum(
coeffs[:, :, 0] * np.exp(-coeffs[:, :, 1] * s2), axis=1)
dw_correction = np.exp(-dwfactors * s2)
# Structure factor = sum of atomic scattering factors (with
# position factor exp(2j * pi * g.r and occupancies).
# Vectorized computation.
f_hkl = np.sum(fs * occus * np.exp(2j * pi * g_dot_r)
* dw_correction)
# Lorentz polarization correction for hkl
lorentz_factor = (1 + cos(2 * theta) ** 2) / \
(sin(theta) ** 2 * cos(theta))
# Intensity for hkl is modulus square of structure factor.
i_hkl = (f_hkl * f_hkl.conjugate()).real
two_theta = degrees(2 * theta)
if is_hex:
# Use Miller-Bravais indices for hexagonal lattices.
hkl = (hkl[0], hkl[1], - hkl[0] - hkl[1], hkl[2])
# Deal with floating point precision issues.
ind = np.where(np.abs(np.subtract(two_thetas, two_theta)) <
AbstractDiffractionPatternCalculator.TWO_THETA_TOL)
if len(ind[0]) > 0:
peaks[two_thetas[ind[0][0]]][0] += i_hkl * lorentz_factor
peaks[two_thetas[ind[0][0]]][1].append(tuple(hkl))
else:
peaks[two_theta] = [i_hkl * lorentz_factor, [tuple(hkl)],
d_hkl]
two_thetas.append(two_theta)
# Scale intensities so that the max intensity is 100.
max_intensity = max([v[0] for v in peaks.values()])
x = []
y = []
hkls = []
d_hkls = []
for k in sorted(peaks.keys()):
v = peaks[k]
fam = get_unique_families(v[1])
if v[0] / max_intensity * 100 > AbstractDiffractionPatternCalculator.SCALED_INTENSITY_TOL:
x.append(k)
y.append(v[0])
hkls.append([{"hkl": hkl, "multiplicity": mult}
for hkl, mult in fam.items()])
d_hkls.append(v[2])
xrd = DiffractionPattern(x, y, hkls, d_hkls)
if scaled:
xrd.normalize(mode="max", value=100)
return xrd
|
gVallverdu/pymatgen
|
pymatgen/analysis/diffraction/xrd.py
|
Python
|
mit
| 11,340
|
[
"CRYSTAL",
"pymatgen"
] |
4fcd69d4d3b61433154a0dba7df3655a90809d0abc04c1acef800f1865efd72a
|
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
from scipy.linalg import pascal, invpascal, toeplitz
from scipy.special import legendre
from scipy.interpolate import PPoly, UnivariateSpline
try:
from functools import cache
except ImportError: # no functools in Python 2
def cache(func):
res = {}
def decorated(*args):
if args not in res:
res[args] = func(*args)
return res[args]
return decorated
__doc__ = """
See :ref:`Polynomials` for details and examples.
.. toctree::
:hidden:
tools/polynomial
"""
class BasePolynomial(object):
"""
Abstract base class for polynomials. Implements multiplication and division
by numbers. (Addition and subtraction of polynomials are not implemented
because they are meaningful only for polynomials generated on the same
grid. Use ``Piecewise...`` classes for sums of polynomials.)
Attributes
----------
func : numpy array
values of the original function
abel : numpy array
values of the Abel transform
"""
def copy(self):
"""
Return an independent copy.
"""
other = self.__new__(type(self)) # create empty object (same type)
other.func = self.func.copy()
other.abel = self.abel.copy()
return other
def __imul__(self, num):
"""
In-place multiplication: Polynomial *= num.
"""
self.func *= num
self.abel *= num
return self
def __mul__(self, num):
"""
Multiplication: Polynomial * num.
"""
res = self.copy()
return res.__imul__(num)
__rmul__ = __mul__
__rmul__.__doc__ = \
"""
Multiplication: num * Polynomial.
"""
def __itruediv__(self, num):
"""
In-place division: Polynomial /= num.
"""
return self.__imul__(1 / num)
def __truediv__(self, num):
"""
Division: Polynomial / num.
"""
return self.__mul__(1 / num)
class Polynomial(BasePolynomial):
r"""
Polynomial function and its Abel transform.
Parameters
----------
r : numpy array
*r* values at which the function is generated
(and *x* values for its Abel transform);
must be non-negative and in ascending order
r_min, r_max : float
*r* domain:
the function is defined as the polynomial on [**r_min**, **r_max**]
and zero outside it;
0 ≤ **r_min** < **r_max** ≲ **max r**
(**r_max** might exceed maximal **r**, but usually by < 1 pixel;
negative **r_min** or **r_max** are allowed for convenience but are
interpreted as 0)
c: numpy array
polynomial coefficients in order of increasing degree:
[c₀, c₁, c₂] means c₀ + c₁ *r* + c₂ *r*\ ²
r_0 : float, optional
origin shift: the polynomial is defined as
c₀ + c₁ (*r* − **r_0**) + c₂ (*r* − **r_0**)² + ...
s : float, optional
*r* stretching factor (around **r_0**): the polynomial is defined as
c₀ + c₁ ((*r* − **r_0**)/**s**) + c₂ ((*r* − **r_0**)/**s**)² + ...
reduced : boolean, optional
internally rescale the *r* range to [0, 1];
useful to avoid floating-point overflows for high degrees
at large r (and might improve numeric accuracy)
"""
def __init__(self, r, r_min, r_max, c, r_0=0.0, s=1.0, reduced=False):
n = r.shape[0]
# trim negative r limits
if r_max <= 0:
# both func and abel must be zero everywhere
self.func = np.zeros(n)
self.abel = np.zeros(n)
return
if r_min < 0:
r_min = 0
# remove zero high-order terms
c = np.array(np.trim_zeros(c, 'b'), float)
# if all coefficients are zero
if len(c) == 0:
# then both func and abel are also zero everywhere
self.func = np.zeros(n)
self.abel = np.zeros(n)
return
# polynomial degree
K = len(c) - 1
if reduced:
# rescale r to [0, 1] (to avoid FP overflow)
r = r / r_max
r_0 /= r_max
s /= r_max
abel_scale = r_max
r_min /= r_max
r_max = 1.0
if s != 1.0:
# apply stretch
S = np.cumprod([1.0] + [1.0 / s] * K) # powers of 1/s
c *= S
if r_0 != 0.0:
# apply shift
P = pascal(1 + K, 'upper', False) # binomial coefficients
rk = np.cumprod([1.0] + [-float(r_0)] * K) # powers of -r_0
T = toeplitz([1.0] + [0.0] * K, rk) # upper-diag. (-r_0)^{l - k}
c = (P * T).dot(c)
# whether even and odd powers are present
even = np.any(c[::2])
odd = np.any(c[1::2])
# index limits
i_min = np.searchsorted(r, r_min)
i_max = np.searchsorted(r, r_max)
# Calculate all necessary variables within [0, r_max]
# x, x^2
x = r[:i_max]
x2 = x * x
# integration limits y = sqrt(r^2 - x^2) or 0
def sqrt0(x): return np.sqrt(x, np.zeros_like(x), where=x > 0)
y_up = sqrt0(r_max * r_max - x2)
y_lo = sqrt0(r_min * r_min - x2)
# y r^k |_lo^up
# (actually only even are neded for "even", and only odd for "odd")
Dyr = np.outer(np.cumprod([1.0] + [r_max] * K), y_up) - \
np.outer(np.cumprod([1.0] + [r_min] * K), y_lo)
# ln(r + y) |_lo^up, only for odd k
if odd:
# ln x for x > 0, otherwise 0
def ln0(x): return np.log(x, np.zeros_like(x), where=x > 0)
Dlnry = ln0(r_max + y_up) - \
ln0(np.maximum(r_min, x) + y_lo)
# One-sided Abel integral \int_lo^up r^k dy.
def a(k):
odd_k = k % 2
# max. x power
K = k - odd_k # (k - 1 for odd k)
# generate coefficients for all x^m r^{k - m} terms
# (only even indices are actually used;
# for odd k, C[K] is also used for x^{k+1} ln(r + y))
C = [0] * (K + 1)
C[0] = 1 / (k + 1)
for m in range(k, 1, -2):
C[k - m + 2] = C[k - m] * m / (m - 1)
# sum all terms using Horner's method in x
a = C[K] * Dyr[k - K]
if odd_k:
a += C[K] * x2 * Dlnry
for m in range(K - 2, -1, -2):
a = a * x2 + C[m] * Dyr[k - m]
return a
# Generate the polynomial function
func = np.zeros(n)
span = slice(i_min, i_max)
# (using Horner's method)
func[span] = c[K]
for k in range(K - 1, -1, -1):
func[span] = func[span] * x[span] + c[k]
self.func = func
# Generate its Abel transform
abel = np.zeros(n)
span = slice(0, i_max)
if reduced:
c *= abel_scale
for k in range(K + 1):
if c[k]:
abel[span] += c[k] * 2 * a(k)
self.abel = abel
class PiecewisePolynomial(BasePolynomial):
r"""
Piecewise polynomial function (sum of :class:`Polynomial`\ s)
and its Abel transform.
Parameters
----------
r : numpy array
*r* values at which the function is generated
(and *x* values for its Abel transform)
ranges : iterable of unpackable
(list of tuples of) polynomial parameters for each piece::
[(r_min_1st, r_max_1st, c_1st),
(r_min_2nd, r_max_2nd, c_2nd),
...
(r_min_nth, r_max_nth, c_nth)]
according to ``Polynomial`` conventions.
All ranges are independent (may overlap and have gaps, may define
polynomials of any degrees) and may include optional ``Polynomial``
parameters
Attributes
----------
p : list of Polynomial
:class:`Polynomial` objects corresponding to each piece
"""
def __init__(self, r, ranges):
self.p = [Polynomial(r, *rng) for rng in ranges]
self.func = sum(p.func for p in self.p)
self.abel = sum(p.abel for p in self.p)
def copy(self):
"""
Make an independent copy.
"""
# make a basic copy with func and abel
other = super(type(self), self).copy()
# copy pieces also
other.p = [pn.copy() for pn in self.p]
return other
def __imul__(self, num):
"""
In-place multiplication: Polynomial *= num.
"""
# multiply func and abel
super(type(self), self).__imul__(num)
# multiply each piece also
for p in self.p:
p *= num
return self
class SPolynomial(BasePolynomial):
r"""
Bivariate polynomial function :math:`\sum_{mn} c_{mn} r^m \cos^n\theta` in
spherical coordinates and its Abel transform.
Parameters
----------
r, cos : numpy array
*r* and cos *θ* values at which the function is generated; *r* must be
non-negative. Arrays for generating a 2D image can be conveniently
prepared by the :func:`rcos` function. On the other hand, the radial
dependence alone (for a *single* cosine power) can be obtained by
supplying a 1D **r** array and a **cos** array filled with ones.
r_min, r_max : float
*r* domain: the function is defined as the polynomial on
[**r_min**, **r_max**] and zero outside it;
0 ≤ **r_min** < **r_max** ≲ **max r**
(**r_max** might exceed maximal **r**, but usually by < 1 pixel;
negative **r_min** or **r_max** are allowed for convenience but are
interpreted as 0)
c: 2D numpy array
polynomial coefficients for *r* and cos *θ* powers: ``c[m, n]`` is the
coefficient for the :math:`r^m \cos^n\theta` term. This array can be
conveniently constructed using :class:`Angular` tools.
r_0 : float, optional
*r* domain shift: the polynomial is defined in powers of
(*r* − **r_0**) instead of *r*
s : float, optional
*r* stretching factor (around **r_0**): the polynomial is defined in
powers of (*r* − **r_0**)/**s** instead of *r*
"""
def __init__(self, r, cos, r_min, r_max, c, r_0=0.0, s=1.0):
if r.shape != cos.shape:
raise ValueError('Shapes of r and cos arrays must be equal.')
# trim negative r limits
if r_max <= 0:
# both func and abel must be zero everywhere
self.func = np.zeros_like(r)
self.abel = np.zeros_like(r)
return
if r_min < 0:
r_min = 0
c = np.array(c, dtype=float) # convert / make copy
if np.ndim(c) != 2:
raise ValueError('Coefficients array c must be 2-dimensional.')
# highest cos power with non-zero coefficient
N = c.nonzero()[1].max(initial=-1)
if N < 0: # all coefficients are zero
# so both func and abel are also zero everywhere
self.func = np.zeros_like(r)
self.abel = np.zeros_like(r)
return
# for each cos power: highest r power with non-zero coefficient
M = [a.nonzero()[0].max(initial=-1) for a in c.T]
if s != 1.0:
# apply stretch
S = np.cumprod([1.0] + [1.0 / s] * max(M)) # powers of 1/s
c *= np.array([S]).T
if r_0 != 0.0:
# apply shift
m = max(M)
P = pascal(1 + m, 'upper', False) # binomial coefficients
rm = np.cumprod([1.0] + [-float(r_0)] * m) # powers of -r_0
T = toeplitz([1.0] + [0.0] * m, rm) # upper-diag. (-r_0)^{i - j}
c = (P * T).dot(c)
rfull, cosfull = r, cos # (r and cos will be limited below)
# Generate the polynomial function
self.func = np.zeros_like(rfull)
# limit calculations to relevant domain (outside it func = 0)
dom = (r_min <= rfull) & (rfull < r_max)
r = rfull[dom]
cos = cosfull[dom]
# sum all non-zero terms using Horner's method
for n in range(N, -1, -1):
if n < N:
self.func[dom] *= cos
if M[n] < 0:
continue
p = np.full_like(r, c[M[n], n])
for m in range(M[n] - 1, -1, -1):
p *= r
if c[m, n]:
p += c[m, n]
self.func[dom] += p
# Generate its Abel transform
self.abel = np.zeros_like(rfull)
# relevant domain (outside it abel = 0)
# (excluding r = 0 to avoid singularities, see below)
dom = (0 < rfull) & (rfull < r_max)
r = rfull[dom]
cos = cosfull[dom]
# values at lower and upper integration limits
rho = [np.maximum(r, r_min),
r_max] # = max(r, r_max) within domain
z = [np.sqrt(rho[0]**2 - r**2),
np.sqrt(rho[1]**2 - r**2)]
f = [np.minimum(r / r_min, 1.0) if r_min else 1.0,
r / r_max] # = min(r/r_max, 1) within domain
# antiderivatives (recursive and used several times, thus cached)
@cache
def F(k, lim): # lim: 0 = lower limit, 1 = upper limit
if k < 0:
return (z[lim] * f[lim]**k - k * F(k + 2, lim)) / (1 - k)
if k == 0:
return z[lim]
if k == 1:
return r * np.log(z[lim] + rho[lim])
if k == 2:
return r * np.arccos(f[lim])
if k == 3: # (using explicit expression for higher efficiency)
return z[lim] * f[lim]
# k > 3: (in principle, k > 2)
k -= 2
return (z[lim] * f[lim]**k + (k - 1) * F(k, lim)) / k
# sum all non-zero terms using Horner's method
for n in range(N, -1, -1):
if n < N:
self.abel[dom] *= cos
if M[n] < 0:
continue
p = c[M[n], n] * 2 * (F(n - M[n], 1) - F(n - M[n], 0))
for m in range(M[n] - 1, -1, -1):
p *= r
if c[m, n]:
p += c[m, n] * 2 * (F(n - m, 1) - F(n - m, 0))
self.abel[dom] += p
# value at r = 0 (excluded above), nonzero only for n = 0
dom = np.where(rfull == 0)
for m in range(M[0] + 1):
k = m + 1
self.abel[dom] += c[m, 0] * 2 * (r_max**k - r_min**k) / k
# help garbage collector to release cache memory
F = None
class PiecewiseSPolynomial(BasePolynomial):
r"""
Piecewise bivariate polynomial function (sum of :class:`SPolynomial`\ s) in
spherical coordinates and its Abel transform.
Parameters
----------
r, cos : numpy array
*r* and cos *θ* values at which the function is generated
ranges : iterable of unpackable
(list of tuples of) polynomial parameters for each piece::
[(r_min_1st, r_max_1st, c_1st),
(r_min_2nd, r_max_2nd, c_2nd),
...
(r_min_nth, r_max_nth, c_nth)]
according to :class:`SPolynomial` conventions.
All ranges are independent (may overlap and have gaps, may define
polynomials of any degrees) and may include optional
:class:`SPolynomial` parameters (``r_0, s``).
"""
def __init__(self, r, cos, ranges):
for rng in ranges:
p = SPolynomial(r, cos, *rng)
try:
self.func += p.func
self.abel += p.abel
except AttributeError: # first range
self.func = p.func
self.abel = p.abel
def rcos(rows=None, cols=None, shape=None, origin=None):
r"""
Create arrays with polar coordinates :math:`r` and :math:`\cos\theta`:
either from a pair of Cartesian arrays (**rows**, **cols**) with row and
column values for each point *or* for a uniform grid with given dimensions
and origin (**shape**, **origin**).
Parameters
----------
rows, cols : numpy array
arrays with respectively row and column values for each point. Must
have identical shapes (the output arrays will have the same shape), but
might contain any values. For example, can be 2D arrays with integer
pixel coordinates, or with floating-point numbers for sampling at
subpixel points or on a distorted grid, or 1D arrays for sampling along
some curve.
shape : tuple of int
(rows, cols) -- create output arrays of given shape, with values
corresponding to a uniform pixel grid.
origin : tuple of float, optional
position of the origin (:math:`r = 0`) in the output array. By default,
the center of the array is used (center of the middle pixel for
odd-sized dimensions; even-sized dimensions will have a corresponding
half-pixel shift).
Returns
-------
r : numpy array
radii :math:`r = \sqrt{\text{row}^2 + \text{col}^2}` for each point
cos : numpy array
cosines of the polar angle :math:`\cos\theta = -\text{row}/r` for each
point (by convention, :math:`\cos\theta = 0` at :math:`r = 0`)
"""
if rows is not None or cols is not None: # at least one array given
# sanity checks:
if rows is None or cols is None or \
shape is not None or origin is not None: # incompatible options
raise ValueError('Arguments must be either '
'two arrays rows and cols or '
'shape=<tuple> and optional origin=<tuple>.')
if rows.shape != cols.shape:
raise ValueError('Shapes of rows and cols arrays must be equal.')
else:
# create rows and cols arrays for given shape
rows, cols = np.mgrid[0.0:shape[0], 0.0:shape[1]]
# prepare origin
if origin is None: # default = midpoint
row = (shape[0] - 1) / 2
col = (shape[1] - 1) / 2
else:
row, col = origin
# to absolute coordinates
if row < 0:
row += shape[0]
if col < 0:
col += shape[1]
# shift "0" to origin
rows -= row
cols -= col
# radius
r = np.sqrt(rows**2 + cols**2)
# cosine
cos = -np.divide(rows, r, where=r != 0, out=np.zeros_like(r))
return r, cos
class Angular(object):
r"""
Class helping to define angular dependences for :class:`SPolynomial` and
:class:`PiecewiseSPolynomial`.
Supports arithmetic operations (addition, subtraction, multiplication of
objects; multiplication and division by numbers) and outer product with
radial coefficients (any list-like object). For example::
[3, 0, -1] * (Angular.cos(4) + Angular.sin(4) / 2)
represents :math:`(3 - r^2)\big(\cos^4\theta + (\sin^4\theta) / 2\big)`,
producing ::
[[ 1.5 0. -3. 0. 4.5]
[ 0. 0. -0. 0. 0. ]
[-0.5 0. 1. 0. -1.5]]
which can be supplied as the coefficient matrix to :class:`SPolynomial`.
Likewise, a list of ranges for :class:`PiecewiseSPolynomial` can be
prepared as an outer product with a list of ``(r_min, r_max, coeffs)``
tuples (with optional other :class:`SPolynomial` parameters), where 1D
``coeffs`` contain radial coefficients for a polynomial segment.
Parameters
----------
c : float or iterable of float
list of coefficients: ``Angular([c₀, c₁, c₂, ...])`` means
:math:`c_0 \cos^0\theta + c_1 \cos^1\theta + c_2 \cos^2\theta + \dots`;
``Angular(a)`` represents the isotropic distribution a⋅cos⁰ *θ*
Attributes
----------
c : numpy array
coefficients for :math:`\cos^n\theta` powers, passed at instantiation
directly (see above) or converted from other representations by the
methods below.
"""
def __init__(self, c):
"""
Weighted sum of cosine powers.
"""
self.c = np.ravel(c).astype(float)
@classmethod
def cos(cls, n):
r"""
Cosine power: ``Angular.cos(n)`` means :math:`\cos^n\theta`.
"""
if not isinstance(n, int) or n < 0:
raise ValueError('Power must be positive integer.')
return cls([0] * n + [1])
@classmethod
def sin(cls, n):
r"""
Sine power: ``Angular.sin(n)`` means :math:`\sin^n\theta`
(*n* must be even).
"""
return cls.cossin(0, n)
@classmethod
def cossin(cls, m, n):
r"""
Product of cosine and sine powers: ``Angular.cossin(m, n)`` means
:math:`\cos^m\theta \cdot \sin^n\theta` (*n* must be even).
"""
if not isinstance(m, int) or m < 0:
raise ValueError('Cosine power must be positive integer.')
if not isinstance(n, int) or n < 0 or n % 2:
raise ValueError('Sine power must be even positive integer.')
c = np.zeros(1 + m + n)
# binomial coefficients of (1 - cos^2)^(n/2)
c[m::2] = invpascal(1 + n // 2, 'lower', False)[-1, ::-1]
return cls(c)
@classmethod
def legendre(cls, c):
r"""
Weighted sum of Legendre polynomials in cos *θ*:
``Angular.legendre([c₀, c₁, c₂, ...])`` means
:math:`c_0 P_0(\cos\theta) + c_1 P_1(\cos\theta) + c_2 P_2(\cos\theta)
+ \dots`
This method is intended to be called like ::
Angular.legendre([1, β₁, β₂, ...])
where :math:`\beta_i` are so-called anisotropy parameters. However, if
you really need a single polynomial :math:`P_n(\cos\theta)`, this can
be easily achieved by ::
Angular.legendre([0] * n + [1])
"""
C = np.zeros_like(c, dtype=float)
for n, a in enumerate(c):
C[n::-2] += a * legendre(n).c[::2]
# (SciPy's legendre() has backwards order and produces noise in
# coefficients that must be zero, so indexing takes care of this)
return cls(C)
# disable NumPy "ufunc" handling, which makes no sense here
# and interferes with the overloaded multiplication operator
__array_ufunc__ = None
def __add__(self, other):
"""
Sum of two objects (might have different sizes).
"""
a, b, = sorted([self.c, other.c], key=len)
c = b.copy() # copy the longer array
c[:len(a)] += a # add the shorter array to the relevant part
return Angular(c)
def __sub__(self, other):
"""
Difference of two objects (might have different orders).
"""
a, b, = sorted([self.c, other.c], key=len)
c = b.copy() # copy the longer array
c[:len(a)] -= a # subtract the shorter array from the relevant part
return Angular(c)
def __mul__(self, obj):
"""
Multiplication by number or another angular dependence: return the
resulting angular dependence.
Outer product of radial and angular coefficients: return 2D array with
rows corresponding to powers of *r* and columns to powers of cos *θ*.
"""
if isinstance(obj, Angular): # by another angular dependence
return Angular(np.convolve(self.c, obj.c))
if np.isscalar(obj): # by number
return Angular(obj * self.c)
try: # piecewise ranges
ranges = []
for rng in obj:
r_min, r_max, c = rng[:3]
c = np.outer(np.ravel(c), self.c)
rng = (r_min, r_max, c) + rng[3:]
ranges.append(rng)
return ranges
except TypeError: # otherwise -- by radial coefficients
return np.outer(np.ravel(obj), self.c)
__rmul__ = __mul__
__rmul__.__doc__ = __mul__.__doc__
def __truediv__(self, num):
"""
Division by number.
"""
return Angular(self.c / num)
def __repr__(self):
return str(self.c) + '.[cos^n]'
class ApproxGaussian(object):
r"""
Piecewise quadratic approximation (non-negative and continuous but not
exactly smooth) of the unit-amplitude, unit-SD Gaussian function
:math:`\exp(-r^2/2)`, equal to it at endpoints and midpoint of each piece.
The forward Abel transform of this approximation will typically have a
better relative accuracy than the approximation itself.
Parameters
----------
tol : float
absolute approximation tolerance (maximal deviation).
Some reference values yielding the best accuracy for certain number of
segments:
.. table::
:widths: auto
======= =========== ===========
**tol** Better than Segments
======= =========== ===========
3.7e-2 5% 3
1.4e-2 2% 5
4.8e-3 0.5% 7 (default)
0.86e-3 0.1% 13
0.99e-4 0.01% 27
0.95e-5 0.001% 59
======= =========== ===========
Attributes
----------
ranges : lists of tuple
list of parameters ``(r_min, r_max, [c₀, c₁, c₂], r_0, s)`` that can be
passed directly to :class:`PiecewisePolynomial` or, after
“multiplication” by :class:`Angular`, to :class:`PiecewiseSPolynomial`
norm : float
the integral :math:`\int_{-\infty}^{+\infty} f(r)\,dr` for
normalization (equals :math:`\sqrt{2\pi}` for the ideal Gaussian
function, but slightly differs for the approximation)
"""
def __init__(self, tol=4.8e-3):
# Reference Gaussian function.
def g(x):
return np.exp(-x**2 / 2)
# Determine splitting nodes
xs = [] # node positions
# first (max. x) point: g(x) = tol / 2
x1 = np.sqrt(-2 * np.log(tol / 2))
# moving towards x = 0...
while x1 > 0:
xs.append(x1)
# Find next point x2 such that max. deviation on [x2, x1] is <= tol
# (SciPy tools don't like this problem, so solving it manually...)
# 3rd derivative to estimate max. deviation
derx1 = np.abs(3 - x1**2) * x1 * g(x1) # at x1
# constant factor for max. of cubic Taylor term
M = 1 / (72 * np.sqrt(3))
# max. among mid- and endpoints
def der(x2):
xc = (x1 + x2) / 2
return M * max(derx1,
np.abs(3 - xc**2) * xc * g(xc),
np.abs(3 - x2**2) * x2 * g(x2))
# estimator of max. deviation
def dev(x2):
return der(x2) * (x1 - x2)**3
x2low, x2 = x1, x1 # initialize root interval
devx2 = 0
for i in range(100): # (for safety; in fact, takes ≲20 iterations)
if devx2 > tol: # switch to binary search (more stable)
xc = (x2low + x2) / 2
if dev(xc) > tol:
x2 = xc
else:
x2low = xc
else:
x2low = x2
# estimate (x2 - x1) from ~cubic deviation growth
dx = (tol / der(x2))**(1/3)
if x2 == x1: # for 1st estimate:
dx /= 2 # carefully go only 1/2 as far
x2 = max(x1 - dx, 0) # shouldn't go beyond 0
devx2 = dev(x2)
# terminate on root uncertainty (tol is more than enough)
if x2low - x2 < tol:
break
# make sure that outer parabola doesn't go below 0
if len(xs) == 1 and g(x2) > 4 * g((x1 + x2) / 2):
# use node point that matches the limiting parabola (x - x1)^2
x2 = (x1 + 2 * np.sqrt(x1**2 - 6 * np.log(4))) / 3
# move to next segment
x1 = x2
# add x = 0 to split central (last) segment if its max. deviation is
# too large (estimated from quartic term)
zero = (1 + g(xs[-1])) / 2 - g(xs[-1] / np.sqrt(2)) > tol
# symmetric copy to negative x
if zero:
xs = [-x for x in xs] + [0.0] + xs[::-1]
else:
xs = [-x for x in xs] + xs[::-1]
N = len(xs) # total number of nodes
xs = np.array(xs)
# midpoints positions
xc = (xs[:-1] + xs[1:]) / 2
# values at nodes and midpoints
ys = g(xs)
ys[0] = ys[-1] = 0 # zero at endpoints
yc = g(xc)
# Create polynomial parameters
cs = [ys[:-1],
-3 * ys[:-1] + 4 * yc - ys[1:],
2 * (ys[:-1] - 2 * yc + ys[1:])]
self.ranges = []
for i in range(N - 1):
self.ranges.append((xs[i], xs[i + 1], # r_min, r_max
[cs[0][i], cs[1][i], cs[2][i]], # c
xs[i], xs[i + 1] - xs[i])) # r_0, s
# calculate norm
self.norm = ((cs[0] + cs[1] / 2 + cs[2] / 3) * np.diff(xs)).sum()
def scaled(self, A=1.0, r_0=0.0, sigma=1.0):
r"""
Parameters for piecewise polynomials corresponding to the shifted and
scaled Gaussian function
:math:`A \exp\big([(r - r_0)/\sigma]^2 / 2\big)`.
(Useful numbers: a Gaussian normalized to unit integral, that is the
standard normal distribution, has :math:`A = 1/\sqrt{2\pi}`; however,
see :attr:`norm` above. A Gaussian with FWHM = 1 has :math:`\sigma =
1/\sqrt{8\ln2}`.)
Parameters
----------
A : float
amplitude
r_0 : float
peak position
sigma : float
standard deviation
Returns
-------
ranges : list of tuple
parameters for the piecewise polynomial approximating the shifted
and scaled Gaussian
"""
ranges = []
for r_min, r_max, c, r, s in self.ranges:
r_max = r_0 + sigma * r_max
if r_max < 0:
continue
r_min = max(0, r_0 + sigma * r_min)
c = [A * cn for cn in c]
r = r_0 + sigma * r
s *= sigma
ranges.append((r_min, r_max, c, r, s))
return ranges
def bspline(spl):
"""
Convert SciPy B-spline to :class:`PiecewisePolynomial` parameters.
Parameters
----------
spl : tuple or BSpline or UnivariateSpline
``scipy.interpolate`` B-spline representation, such as ``splrep()``
results, ``BSpline`` object (result of ``make_interp_spline()``, for
example) or ``UnivariateSpline`` object
Returns
-------
ranges : list of tuple
list of parameters ``(r_min, r_max, coeffs, r_0)`` that can be passed
directly to :class:`PiecewisePolynomial` or, after “multiplication” by
:class:`Angular`, to :class:`PiecewiseSPolynomial`
"""
if isinstance(spl, UnivariateSpline):
# extract necessary data, convert to compatible format
knots = spl.get_knots()
coeffs = spl.get_coeffs()
k = len(coeffs) - len(knots) + 1
knots = np.pad(knots, k, 'edge')
spl = (knots, coeffs, k)
# convert B-spline representation to piecewise polynomial representation
ppoly = PPoly.from_spline(spl)
x = ppoly.x # breakpoints
c = ppoly.c.T[:, ::-1] # coefficients (PPoly degrees are descending)
# convert to PiecewisePolynomial ranges
ranges = []
for i in range(len(x) - 1):
r_min, r_max = x[i], x[i + 1]
if r_min != r_max: # (some PPoly intervals are degenerate)
ranges.append((r_min, r_max, c[i], r_min))
return ranges
|
PyAbel/PyAbel
|
abel/tools/polynomial.py
|
Python
|
mit
| 32,017
|
[
"Gaussian"
] |
279d745c1114ad7a96a6461645aacd87ee7e969d80b1066d12dd1a5927805294
|
#!/usr/bin/env python
"""Read in two csv files and check that the float contents are the same"""
from __future__ import print_function
import sys
from termcolor import colored
from openpyxl import load_workbook
global threshold
def main():
# Start processing the directories
if len(sys.argv) <= 1 :
print('checkexcel file file2 [-thresh 1.0E-3] [-f]', file=sys.stderr)
print(' Compare the two excel files for any significant changes', file=sys.stderr)
print(' Numbers f1 and f2 in each file are compared', file=sys.stderr)
print(' an error is flagged if 2*abs(f1-f2)/(f1+f2+2) > threshold', file=sys.stderr)
print(' The default value for threshold is 1.0E-3 ', file=sys.stderr)
print(' -f forces a full comparison of the sheets ', file=sys.stderr)
print(' by default Settings is not included ', file=sys.stderr)
exit()
threshold = 1.0E-3
separator = None
tokens = sys.argv[1:]
ntokens = len(tokens)-1
itoken = -1
files = []
full = False
while itoken < ntokens:
itoken += 1
token = tokens[itoken]
if token == '-thresh':
itoken +=1
threshold = float(tokens[itoken])
if token == '-f':
full = True
else:
files.append(tokens[itoken])
# end if
# end while
file1 = files[0]
file2 = files[1]
#
# Open the work books
#
wb1 = load_workbook(filename=file1, read_only=True)
wb2 = load_workbook(filename=file2, read_only=True)
#
# Initialise variables
#
error = None
nerrors = 0
max_percentage_error = 0.0
row = 0
col = 0
sheet = ''
file1 = ''
value1 = 0
file2 = ''
value2 = 0
#
# Loop over sheets
#
sheets = ['Main','Scenarios','Powder Molar Absorption (cells)','Powder Absorption','Powder Real Permittivity','Powder Imaginary Permittivity', 'Powder ATR Reflectance', 'Analysis','Crystal R_p','Crystal R_s','Crystal T_p','Crystal T_s','Real Crystal Permittivity','Imag Crystal Permittivity']
if full:
sheets.append('Settings')
for sheet in sheets:
if not sheet in wb1 :
continue
if not sheet in wb2 :
continue
print('Checking sheet ',sheet)
ws1 = wb1[sheet]
ws2 = wb2[sheet]
max_rows1 = ws1.max_row
max_rows2 = ws2.max_row
max_columns1 = ws1.max_column
max_columns2 = ws2.max_column
if max_rows1 != max_rows2:
print('Error - the number rows in the sheet are not the same',sheet,max_rows1,max_rows2)
nerrors += 1
continue
if max_columns1 != max_columns2:
print('Error - the number columns in the sheet are not the same',sheet,max_columns1,max_columns2)
nerrors += 1
continue
#
# Loop over rows
#
row_index = 0
for row1,row2 in zip(ws1.rows, ws2.rows):
row_index += 1
col_index = 0
#
# Loop over cells
#
for cell1,cell2, in zip(row1,row2):
col_index += 1
value1 = cell1.value
value2 = cell2.value
if cell1 is None and cell2 is None:
pass
elif cell1 is None or cell2 is None:
nerrors += 1
percentage_error = 0.0
error = (sheet, row_index, col_index, value1, value2, percentage_error)
else:
if value1 != value2:
if cell1.data_type == 'n' and cell2.data_type == 'n':
#
# Flag an error which is numeric
#
percentage_error = 100.0*abs(2.0*(value1 - value2)/(abs(value1) + abs(value2)+2))
if percentage_error > 100.*threshold:
nerrors += 1
if percentage_error > max_percentage_error:
max_percentage_error = percentage_error
error = (sheet, row_index, col_index, value1, value2, percentage_error)
# if percentage error
else:
if percentage_error > max_percentage_error:
max_percentage_error = percentage_error
# if percentage error > threshold
else:
#
# This is a non-numeric error
#
nerrors += 1
percentage_error = 0.0
error = (sheet, row_index, col_index, value1, value2, percentage_error)
# if cell1.data_type
# if value1 != value2
# if cell1 is none
# for cell1, cell2
# for rowq, row
# for sheet
if error is not None:
sheet,row,col,value1,value2,max_percentage_error = error
print(' '+colored('ERRORS:','red')+'({}) LARGEST ON ROW,COL {},{} OF SHEET {}, {}({}) and {}({}) -- max %error={}'.format(nerrors, row,col,sheet,file1,value1,file2,value2,max_percentage_error))
elif nerrors > 0:
print(' '+colored('ERRORS:','red')+'({}) Dimensions of spreadsheet were wrong '.format(nerrors))
else:
print(' '+colored('OK:','blue')+" {} = {} -- max %error={}" .format(file1,file2,max_percentage_error))
return nerrors, row,col,sheet,file1,value1,file2,value2,max_percentage_error
if __name__=="__main__":
main()
|
JohnKendrick/PDielec
|
PDielec/checkexcel.py
|
Python
|
mit
| 5,880
|
[
"CRYSTAL"
] |
88d28f07514433153d1fece7211a19028036da5426541fbc6ff663d6e727bdb3
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
Provide handling for persisting OpenLP settings. OpenLP uses QSettings to manage settings persistence. QSettings
provides a single API for saving and retrieving settings from the application but writes to disk in an OS dependant
format.
"""
import os
from openlp.core.utils import AppLocation
class SettingsManager(object):
"""
Class to provide helper functions for the loading and saving of application settings.
"""
@staticmethod
def get_files(section=None, extension=None):
"""
Get a list of files from the data files path.
``section``
Defaults to *None*. The section of code getting the files - used to load from a section's data subdirectory.
``extension``
Defaults to *None*. The extension to search for.
"""
path = AppLocation.get_data_path()
if section:
path = os.path.join(path, section)
try:
files = os.listdir(path)
except OSError:
return []
if extension:
return [filename for filename in files if extension == os.path.splitext(filename)[1]]
else:
# no filtering required
return files
|
marmyshev/transitions
|
openlp/core/lib/settingsmanager.py
|
Python
|
gpl-2.0
| 3,318
|
[
"Brian"
] |
545cb1e53fce011d616b0022a6e8a982c351c731119c7f645554cd6c0100947c
|
##
# Copyright 2015-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Molpro, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import fileinput
import os
import re
import sys
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import mkdir, read_file
from easybuild.tools.run import run_cmd
class EB_Molpro(ConfigureMake):
"""Support for building and installing Molpro."""
def __init__(self, *args, **kwargs):
"""Easyblock constructor, initialize class variables specific to Molpro and check on license token."""
super(EB_Molpro, self).__init__(*args, **kwargs)
self.full_prefix = '' # no None, to make easyblock compatible with --module-only
self.orig_launcher = None
self.cleanup_token_symlink = False
self.license_token = os.path.join(os.path.expanduser('~'), '.molpro', 'token')
def configure_step(self):
"""Custom configuration procedure for Molpro: use 'configure -batch'."""
if not os.path.isfile(self.license_token):
if self.cfg['license_file'] is not None and os.path.isfile(self.cfg['license_file']):
# put symlink in place to specified license file in $HOME/.molpro/token
# other approaches (like defining $MOLPRO_KEY) don't seem to work
self.cleanup_token_symlink = True
mkdir(os.path.dirname(self.license_token))
try:
os.symlink(self.cfg['license_file'], self.license_token)
self.log.debug("Symlinked %s to %s", self.cfg['license_file'], self.license_token)
except OSError, err:
raise EasyBuildError("Failed to create symlink for license token at %s", self.license_token)
else:
# no license token available, so fail early
raise EasyBuildError("No license token found at either %s or via 'license_file'", self.license_token)
# installation prefix
self.cfg.update('configopts', "-prefix %s" % self.installdir)
# compilers
# compilers & MPI
if self.toolchain.options.get('usempi', None):
self.cfg.update('configopts', "-%s -%s" % (os.environ['CC_SEQ'], os.environ['F90_SEQ']))
if 'MPI_INC_DIR' in os.environ:
self.cfg.update('configopts', "-mpp -mppbase %s" % os.environ['MPI_INC_DIR'])
else:
raise EasyBuildError("$MPI_INC_DIR not defined")
else:
self.cfg.update('configopts', "-%s -%s" % (os.environ['CC'], os.environ['F90']))
# BLAS/LAPACK
if 'BLAS_LIB_DIR' in os.environ:
self.cfg.update('configopts', "-blas -blaspath %s" % os.environ['BLAS_LIB_DIR'])
else:
raise EasyBuildError("$BLAS_LIB_DIR not defined")
if 'LAPACK_LIB_DIR' in os.environ:
self.cfg.update('configopts', "-lapack -lapackpath %s" % os.environ['LAPACK_LIB_DIR'])
else:
raise EasyBuildError("$LAPACK_LIB_DIR not defined")
# 32 vs 64 bit
if self.toolchain.options.get('32bit', None):
self.cfg.update('configopts', '-i4')
else:
self.cfg.update('configopts', '-i8')
run_cmd("./configure -batch %s" % self.cfg['configopts'])
cfgfile = os.path.join(self.cfg['start_dir'], 'CONFIG')
cfgtxt = read_file(cfgfile)
# determine original LAUNCHER value
launcher_regex = re.compile('^LAUNCHER=(.*)$', re.M)
res = launcher_regex.search(cfgtxt)
if res:
self.orig_launcher = res.group(1)
self.log.debug("Found original value for LAUNCHER: %s", self.orig_launcher)
else:
raise EasyBuildError("Failed to determine LAUNCHER value")
# determine full installation prefix
prefix_regex = re.compile('^PREFIX=(.*)$', re.M)
res = prefix_regex.search(cfgtxt)
if res:
self.full_prefix = res.group(1)
self.log.debug("Found full installation prefix: %s", self.full_prefix)
else:
raise EasyBuildError("Failed to determine full installation prefix")
# determine MPI launcher command that can be used during build/test
# obtain command with specific number of cores (required by mpi_cmd_for), then replace that number with '%n'
launcher = self.toolchain.mpi_cmd_for('%x', self.cfg['parallel'])
launcher = launcher.replace(' %s' % self.cfg['parallel'], ' %n')
# patch CONFIG file to change LAUNCHER definition, in order to avoid having to start mpd
for line in fileinput.input(cfgfile, inplace=1, backup='.orig'):
line = re.sub(r"^(LAUNCHER\s*=\s*).*$", r"\1 %s" % launcher, line)
sys.stdout.write(line)
# reread CONFIG and log contents
cfgtxt = read_file(cfgfile)
self.log.info("Contents of CONFIG file:\n%s", cfgtxt)
def test_step(self):
"""Custom test procedure for Molpro: make quicktest, make test."""
# check 'main routes' only
run_cmd("make quicktest")
# extensive test
run_cmd("make MOLPRO_OPTIONS='-n%s' test" % self.cfg['parallel'])
def install_step(self):
"""
Custom install procedure for Molpro:
* put license token in place in $installdir/.token
* run 'make tuning'
* install with 'make install'
"""
run_cmd("make tuning")
super(EB_Molpro, self).install_step()
# put original LAUNCHER definition back in place in bin/molpro that got installed,
# since the value used during installation point to temporary files
for line in fileinput.input(os.path.join(self.full_prefix, 'bin', 'molpro'), inplace=1):
line = re.sub(r"^(LAUNCHER\s*=\s*).*$", r"\1 %s" % self.orig_launcher, line)
sys.stdout.write(line)
if self.cleanup_token_symlink:
try:
os.remove(self.license_token)
self.log.debug("Symlink to license token %s removed", self.license_token)
except OSError, err:
raise EasyBuildError("Failed to remove %s: %s", self.license_token, err)
def make_module_req_guess(self):
"""Customize $PATH guesses for Molpro module."""
guesses = super(EB_Molpro, self).make_module_req_guess()
guesses.update({
'PATH': [os.path.join(os.path.basename(self.full_prefix), x) for x in ['bin', 'utilities']],
})
return guesses
def sanity_check_step(self):
"""Custom sanity check for Molpro."""
prefix_subdir = os.path.basename(self.full_prefix)
custom_paths = {
'files': [os.path.join(prefix_subdir, x) for x in ['bin/molpro.exe', 'bin/molpro', 'lib/.token']],
'dirs': [os.path.join(prefix_subdir, x) for x in ['doc', 'examples', 'utilities']],
}
super(EB_Molpro, self).sanity_check_step(custom_paths=custom_paths)
|
torbjoernk/easybuild-easyblocks
|
easybuild/easyblocks/m/molpro.py
|
Python
|
gpl-2.0
| 8,111
|
[
"Molpro"
] |
98e396349909deb5f6b9a0639582f8f169fbae186d32d417fe2e3dfd8b1d4023
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import inspect
import os
import sys
import numpy as np
import numpy.testing as npt
import pandas.util.testing as pdt
from scipy.spatial.distance import pdist
from ._decorator import experimental
class ReallyEqualMixin:
"""Use this for testing __eq__/__ne__.
Taken and modified from the following public domain code:
https://ludios.org/testing-your-eq-ne-cmp/
"""
def assertReallyEqual(self, a, b):
# assertEqual first, because it will have a good message if the
# assertion fails.
self.assertEqual(a, b)
self.assertEqual(b, a)
self.assertTrue(a == b)
self.assertTrue(b == a)
self.assertFalse(a != b)
self.assertFalse(b != a)
def assertReallyNotEqual(self, a, b):
# assertNotEqual first, because it will have a good message if the
# assertion fails.
self.assertNotEqual(a, b)
self.assertNotEqual(b, a)
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
@experimental(as_of="0.4.0")
def get_data_path(fn, subfolder='data'):
"""Return path to filename ``fn`` in the data folder.
During testing it is often necessary to load data files. This
function returns the full path to files in the ``data`` subfolder
by default.
Parameters
----------
fn : str
File name.
subfolder : str, defaults to ``data``
Name of the subfolder that contains the data.
Returns
-------
str
Inferred absolute path to the test data for the module where
``get_data_path(fn)`` is called.
Notes
-----
The requested path may not point to an existing file, as its
existence is not checked.
"""
# getouterframes returns a list of tuples: the second tuple
# contains info about the caller, and the second element is its
# filename
callers_filename = inspect.getouterframes(inspect.currentframe())[1][1]
path = os.path.dirname(os.path.abspath(callers_filename))
data_path = os.path.join(path, subfolder, fn)
return data_path
@experimental(as_of="0.4.0")
def assert_ordination_results_equal(left, right, ignore_method_names=False,
ignore_axis_labels=False,
ignore_directionality=False,
decimal=7):
"""Assert that ordination results objects are equal.
This is a helper function intended to be used in unit tests that need to
compare ``OrdinationResults`` objects.
Parameters
----------
left, right : OrdinationResults
Ordination results to be compared for equality.
ignore_method_names : bool, optional
Ignore differences in `short_method_name` and `long_method_name`.
ignore_axis_labels : bool, optional
Ignore differences in axis labels (i.e., column labels).
ignore_directionality : bool, optional
Ignore differences in directionality (i.e., differences in signs) for
attributes `samples`, `features` and `biplot_scores`.
Raises
------
AssertionError
If the two objects are not equal.
"""
npt.assert_equal(type(left) is type(right), True)
if not ignore_method_names:
npt.assert_equal(left.short_method_name, right.short_method_name)
npt.assert_equal(left.long_method_name, right.long_method_name)
_assert_frame_dists_equal(left.samples, right.samples,
ignore_columns=ignore_axis_labels,
ignore_directionality=ignore_directionality,
decimal=decimal)
_assert_frame_dists_equal(left.features, right.features,
ignore_columns=ignore_axis_labels,
ignore_directionality=ignore_directionality,
decimal=decimal)
_assert_frame_dists_equal(left.biplot_scores, right.biplot_scores,
ignore_columns=ignore_axis_labels,
ignore_directionality=ignore_directionality,
decimal=decimal)
_assert_frame_dists_equal(left.sample_constraints,
right.sample_constraints,
ignore_columns=ignore_axis_labels,
ignore_directionality=ignore_directionality,
decimal=decimal)
_assert_series_equal(left.eigvals, right.eigvals, ignore_axis_labels,
decimal=decimal)
_assert_series_equal(left.proportion_explained, right.proportion_explained,
ignore_axis_labels,
decimal=decimal)
def _assert_series_equal(left_s, right_s, ignore_index=False, decimal=7):
# assert_series_equal doesn't like None...
if left_s is None or right_s is None:
assert left_s is None and right_s is None
else:
npt.assert_almost_equal(left_s.values, right_s.values,
decimal=decimal)
if not ignore_index:
pdt.assert_index_equal(left_s.index, right_s.index)
def _assert_frame_dists_equal(left_df, right_df, ignore_index=False,
ignore_columns=False,
ignore_directionality=False, decimal=7):
if left_df is None or right_df is None:
assert left_df is None and right_df is None
else:
left_values = left_df.values
right_values = right_df.values
left_dists = pdist(left_values)
right_dists = pdist(right_values)
npt.assert_almost_equal(left_dists, right_dists, decimal=decimal)
if not ignore_index:
pdt.assert_index_equal(left_df.index, right_df.index)
if not ignore_columns:
pdt.assert_index_equal(left_df.columns, right_df.columns)
def _assert_frame_equal(left_df, right_df, ignore_index=False,
ignore_columns=False, ignore_directionality=False,
decimal=7):
# assert_frame_equal doesn't like None...
if left_df is None or right_df is None:
assert left_df is None and right_df is None
else:
left_values = left_df.values
right_values = right_df.values
if ignore_directionality:
left_values, right_values = _normalize_signs(left_values,
right_values)
npt.assert_almost_equal(left_values, right_values, decimal=decimal)
if not ignore_index:
pdt.assert_index_equal(left_df.index, right_df.index)
if not ignore_columns:
pdt.assert_index_equal(left_df.columns, right_df.columns)
def _normalize_signs(arr1, arr2):
"""Change column signs so that "column" and "-column" compare equal.
This is needed because results of eigenproblmes can have signs
flipped, but they're still right.
Notes
=====
This function tries hard to make sure that, if you find "column"
and "-column" almost equal, calling a function like np.allclose to
compare them after calling `normalize_signs` succeeds.
To do so, it distinguishes two cases for every column:
- It can be all almost equal to 0 (this includes a column of
zeros).
- Otherwise, it has a value that isn't close to 0.
In the first case, no sign needs to be flipped. I.e., for
|epsilon| small, np.allclose(-epsilon, 0) is true if and only if
np.allclose(epsilon, 0) is.
In the second case, the function finds the number in the column
whose absolute value is largest. Then, it compares its sign with
the number found in the same index, but in the other array, and
flips the sign of the column as needed.
"""
# Let's convert everyting to floating point numbers (it's
# reasonable to assume that eigenvectors will already be floating
# point numbers). This is necessary because np.array(1) /
# np.array(0) != np.array(1.) / np.array(0.)
arr1 = np.asarray(arr1, dtype=np.float64)
arr2 = np.asarray(arr2, dtype=np.float64)
if arr1.shape != arr2.shape:
raise ValueError(
"Arrays must have the same shape ({0} vs {1}).".format(arr1.shape,
arr2.shape)
)
# To avoid issues around zero, we'll compare signs of the values
# with highest absolute value
max_idx = np.abs(arr1).argmax(axis=0)
max_arr1 = arr1[max_idx, range(arr1.shape[1])]
max_arr2 = arr2[max_idx, range(arr2.shape[1])]
sign_arr1 = np.sign(max_arr1)
sign_arr2 = np.sign(max_arr2)
# Store current warnings, and ignore division by zero (like 1. /
# 0.) and invalid operations (like 0. / 0.)
wrn = np.seterr(invalid='ignore', divide='ignore')
differences = sign_arr1 / sign_arr2
# The values in `differences` can be:
# 1 -> equal signs
# -1 -> diff signs
# Or nan (0/0), inf (nonzero/0), 0 (0/nonzero)
np.seterr(**wrn)
# Now let's deal with cases where `differences != \pm 1`
special_cases = (~np.isfinite(differences)) | (differences == 0)
# In any of these cases, the sign of the column doesn't matter, so
# let's just keep it
differences[special_cases] = 1
return arr1 * differences, arr2
@experimental(as_of="0.4.0")
def assert_data_frame_almost_equal(left, right):
"""Raise AssertionError if ``pd.DataFrame`` objects are not "almost equal".
Wrapper of ``pd.util.testing.assert_frame_equal``. Floating point values
are considered "almost equal" if they are within a threshold defined by
``assert_frame_equal``. This wrapper uses a number of
checks that are turned off by default in ``assert_frame_equal`` in order to
perform stricter comparisons (for example, ensuring the index and column
types are the same). It also does not consider empty ``pd.DataFrame``
objects equal if they have a different index.
Other notes:
* Index (row) and column ordering must be the same for objects to be equal.
* NaNs (``np.nan``) in the same locations are considered equal.
This is a helper function intended to be used in unit tests that need to
compare ``pd.DataFrame`` objects.
Parameters
----------
left, right : pd.DataFrame
``pd.DataFrame`` objects to compare.
Raises
------
AssertionError
If `left` and `right` are not "almost equal".
See Also
--------
pandas.util.testing.assert_frame_equal
"""
# pass all kwargs to ensure this function has consistent behavior even if
# `assert_frame_equal`'s defaults change
pdt.assert_frame_equal(left, right,
check_dtype=True,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False)
# this check ensures that empty DataFrames with different indices do not
# compare equal. exact=True specifies that the type of the indices must be
# exactly the same
assert_index_equal(left.index, right.index)
def assert_series_almost_equal(left, right):
# pass all kwargs to ensure this function has consistent behavior even if
# `assert_series_equal`'s defaults change
pdt.assert_series_equal(left, right,
check_dtype=True,
check_index_type=True,
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
obj='Series')
# this check ensures that empty Series with different indices do not
# compare equal.
assert_index_equal(left.index, right.index)
def assert_index_equal(a, b):
pdt.assert_index_equal(a, b,
exact=True,
check_names=True,
check_exact=True)
def pytestrunner():
try:
import numpy
try:
# NumPy 1.14 changed repr output breaking our doctests,
# request the legacy 1.13 style
numpy.set_printoptions(legacy="1.13")
except TypeError:
# Old Numpy, output should be fine as it is :)
# TypeError: set_printoptions() got an unexpected
# keyword argument 'legacy'
pass
except ImportError:
numpy = None
try:
import pandas
# Max columns is automatically set by pandas based on terminal
# width, so set columns to unlimited to prevent the test suite
# from passing/failing based on terminal size.
pandas.options.display.max_columns = None
except ImportError:
pandas = None
# import here, cause outside the eggs aren't loaded
import pytest
args = ['--pyargs', 'skbio', '--doctest-modules', '--doctest-glob',
'*.pyx', '-o', '"doctest_optionflags=NORMALIZE_WHITESPACE'
' IGNORE_EXCEPTION_DETAIL"'] + sys.argv[1:]
errno = pytest.main(args=args)
sys.exit(errno)
|
gregcaporaso/scikit-bio
|
skbio/util/_testing.py
|
Python
|
bsd-3-clause
| 13,826
|
[
"scikit-bio"
] |
c6b40e0254ac827ac30f0ea1c76d4fd749a7c22aa1faea9f98e78934457206ba
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
# $Id$
#
"""
Display all events on a particular day.
"""
from gramps.gen.simple import SimpleAccess, SimpleDoc, SimpleTable
from gramps.gui.plug.quick import QuickTable
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
from gramps.gen.lib import Date
def get_ref(db, objclass, handle):
"""
Looks up object in database
"""
if objclass == 'Person':
ref = db.get_person_from_handle(handle)
elif objclass == 'Family':
ref = db.get_family_from_handle(handle)
elif objclass == 'Event':
ref = db.get_event_from_handle(handle)
elif objclass == 'Source':
ref = db.get_source_from_handle(handle)
elif objclass == 'Place':
ref = db.get_place_from_handle(handle)
elif objclass == 'Repository':
ref = db.get_repository_from_handle(handle)
else:
ref = objclass
return ref
def run(database, document, main_event):
"""
Displays events on a specific date of an event (or date)
Takes an Event or Date object
"""
if isinstance(main_event, Date):
main_date = main_event
else:
main_date = main_event.get_date_object()
cal = main_date.get_calendar();
# setup the simple access functions
sdb = SimpleAccess(database)
sdoc = SimpleDoc(document)
stab = QuickTable(sdb)
stab.set_link_col(3)
yeartab = QuickTable(sdb)
yeartab.set_link_col(3)
histab = QuickTable(sdb)
histab.set_link_col(3)
# display the title
sdoc.title(_("Events of %(date)s") %
{"date": sdb.date_string(main_date)})
sdoc.paragraph("")
stab.columns(_("Date"), _("Type"), _("Place"), _("Reference"))
yeartab.columns(_("Date"), _("Type"), _("Place"), _("Reference"))
histab.columns(_("Date"), _("Type"), _("Place"), _("Reference"))
for event in database.iter_events():
date = event.get_date_object()
date.convert_calendar(cal)
if date.get_year() == 0:
continue
if (date.get_year() == main_date.get_year() and
date.get_month() == main_date.get_month() and
date.get_day() == main_date.get_day()):
for (objclass, handle) in database.find_backlink_handles(event.handle):
ref = get_ref(database, objclass, handle)
stab.row(date,
sdb.event_type(event),
sdb.event_place(event), ref)
elif (date.get_month() == main_date.get_month() and
date.get_day() == main_date.get_day() and
date.get_month() != 0):
for (objclass, handle) in database.find_backlink_handles(event.handle):
ref = get_ref(database, objclass, handle)
histab.row(date,
sdb.event_type(event),
sdb.event_place(event), ref)
elif (date.get_year() == main_date.get_year()):
for (objclass, handle) in database.find_backlink_handles(event.handle):
ref = get_ref(database, objclass, handle)
yeartab.row(date,
sdb.event_type(event),
sdb.event_place(event), ref)
document.has_data = False
if stab.get_row_count() > 0:
document.has_data = True
sdoc.paragraph(_("Events on this exact date"))
stab.write(sdoc)
else:
sdoc.paragraph(_("No events on this exact date"))
sdoc.paragraph("")
sdoc.paragraph("")
if histab.get_row_count() > 0:
document.has_data = True
sdoc.paragraph(_("Other events on this month/day in history"))
histab.write(sdoc)
else:
sdoc.paragraph(_("No other events on this month/day in history"))
sdoc.paragraph("")
sdoc.paragraph("")
if yeartab.get_row_count() > 0:
document.has_data = True
sdoc.paragraph(_("Other events in %(year)d") %
{"year":main_date.get_year()})
yeartab.write(sdoc)
else:
sdoc.paragraph(_("No other events in %(year)d") %
{"year":main_date.get_year()})
sdoc.paragraph("")
sdoc.paragraph("")
|
Forage/Gramps
|
gramps/plugins/quickview/onthisday.py
|
Python
|
gpl-2.0
| 5,069
|
[
"Brian"
] |
c854ddb062cad169e7a90c814e5cd4a942265018591b1e4f042e302d8a8464a3
|
from AritLexer import AritLexer
from AritParser import AritParser
# from AritVisitor import AritVisitor
from MyAritVisitor import MyAritVisitor, UnknownIdentifier
from antlr4 import InputStream, CommonTokenStream
import sys
# example of use of visitors to parse arithmetic expressions.
# stops when the first SyntaxError is launched.
def main():
lexer = AritLexer(InputStream(sys.stdin.read()))
stream = CommonTokenStream(lexer)
parser = AritParser(stream)
tree = parser.prog()
print("Parsing : done.")
visitor = MyAritVisitor()
try:
visitor.visit(tree)
except UnknownIdentifier as exc:
print('Unknown identifier: {}'.format(exc.args[0]))
exit(-1)
if __name__ == '__main__':
main()
|
lauregonnord/cap-labs
|
TP03/arith-visitor/arit.py
|
Python
|
gpl-3.0
| 747
|
[
"VisIt"
] |
5e4130dba91705a0a49478d6e9e6635a5ef7a52719a141f3034c8c3ba653881f
|
# -*- encoding: utf-8 -*-
from __future__ import print_function
import json
import time
from builtins import str as text
import pytest
from django.contrib.auth.models import User
from django.utils.translation import gettext_lazy as _
from model_bakery import baker
from multiseek import logic
from multiseek.logic import (
AND,
CONTAINS,
EQUAL,
MULTISEEK_ORDERING_PREFIX,
MULTISEEK_REPORT_TYPE,
OR,
RANGE_OPS,
)
from multiseek.models import SearchForm
from multiseek.util import make_field
from multiseek.views import LAST_FIELD_REMOVE_MESSAGE
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.expected_conditions import alert_is_present
from selenium.webdriver.support.wait import WebDriverWait
from . import multiseek_registry
from .models import Author, Language
from .testutil import select_select2_autocomplete, wait_for_page_load
class wait_for_alert(object):
method_name = "until"
def __init__(self, browser):
self.browser = browser
def __enter__(self):
pass
def __exit__(self, *_):
wait = WebDriverWait(self.browser, 10)
method = getattr(wait, self.method_name)
method(alert_is_present())
class wait_until_no_alert(wait_for_alert):
method_name = "until_not"
FRAME = "frame-0"
FIELD = "field-0"
@pytest.mark.django_db
def test_client_picks_up_database_changes_direct(initial_data, client):
res = client.get("/multiseek/")
assert "english" in res.content.decode(res.charset)
n = Language.objects.all()[0]
n.name = "FOOBAR"
n.save()
res = client.get("/multiseek/")
assert "FOOBAR" in res.content.decode(res.charset)
@pytest.mark.django_db
def test_liveserver_picks_up_database_changes(multiseek_page):
n = Language.objects.all()[0]
n.name = "FOOBAR"
n.save()
with wait_for_page_load(multiseek_page.browser):
multiseek_page.browser.reload()
assert "FOOBAR" in multiseek_page.browser.html
@pytest.mark.django_db
def test_multiseek(multiseek_page):
field = multiseek_page.get_field(FIELD)
# On init, the first field will be selected
assert field["selected"] == multiseek_page.registry.fields[0].label
@pytest.mark.django_db
def test_liveserver_picks_up_database_changes_direct(
initial_data, browser, live_server
):
with wait_for_page_load(browser):
browser.visit(live_server.url)
assert "english" in browser.html
n = Language.objects.all()[0]
n.name = "FOOBAR"
n.save()
with wait_for_page_load(browser):
browser.reload()
assert "FOOBAR" in browser.html
@pytest.mark.django_db
def test_change_field(multiseek_page):
field = multiseek_page.get_field(FIELD)
field["type"].find_by_value(text(multiseek_registry.YearQueryObject.label)).click()
field = multiseek_page.get_field(FIELD)
assert field["inner_type"] == logic.RANGE
assert len(field["value"]) == 2
field["type"].find_by_value(
text(multiseek_registry.LanguageQueryObject.label)
).click()
field = multiseek_page.get_field(FIELD)
assert field["inner_type"] == logic.VALUE_LIST
field["type"].find_by_value(
text(multiseek_registry.AuthorQueryObject.label)
).click()
field = multiseek_page.get_field(FIELD)
assert field["inner_type"] == logic.AUTOCOMPLETE
@pytest.mark.django_db
def test_serialize_form(multiseek_page):
with wait_for_page_load(multiseek_page.browser):
multiseek_page.browser.reload()
frame = multiseek_page.get_frame("frame-0")
frame["add_field"].click()
frame["add_field"].click()
frame["add_field"].click()
frame["add_frame"].click()
frame["add_frame"].click()
for n in range(2, 5):
field = multiseek_page.get_field("field-%i" % n)
field["value_widget"].type("aaapud!")
field = multiseek_page.get_field("field-0")
field["type"].find_by_value(text(multiseek_registry.YearQueryObject.label)).click()
field = multiseek_page.get_field("field-0")
field["value_widget"][0].type("1999")
field["value_widget"][1].type("2000")
field = multiseek_page.get_field("field-1")
field["prev-op"].find_by_value("or").click()
field["type"].find_by_value(
text(multiseek_registry.LanguageQueryObject.label)
).click()
field = multiseek_page.get_field("field-1")
field["value_widget"].find_by_value(text(_(u"english"))).click()
expected = [
None,
{
u"field": u"Year",
u"operator": text(RANGE_OPS[0]),
u"value": u"[1999,2000]",
u"prev_op": None,
},
{
u"field": u"Language",
u"operator": text(EQUAL),
u"value": u"english",
u"prev_op": OR,
},
{
u"field": u"Title",
u"operator": text(CONTAINS),
u"value": u"aaapud!",
u"prev_op": AND,
},
{
u"field": u"Title",
u"operator": text(CONTAINS),
u"value": u"aaapud!",
u"prev_op": AND,
},
[
AND,
{
u"field": u"Title",
u"operator": text(CONTAINS),
u"value": u"aaapud!",
u"prev_op": None,
},
],
[
AND,
{
u"field": u"Title",
u"operator": text(CONTAINS),
u"value": u"",
u"prev_op": None,
},
],
]
serialized = multiseek_page.serialize()
assert serialized == expected
for n in range(1, 6):
field = multiseek_page.get_field("field-%i" % n)
field["close-button"].click()
time.sleep(2)
expected = [
None,
{
u"field": u"Year",
u"operator": u"in range",
u"value": u"[1999,2000]",
u"prev_op": None,
},
]
serialized = multiseek_page.serialize()
assert serialized == expected
@pytest.mark.django_db
def test_remove_last_field(multiseek_page):
assert Language.objects.count()
field = multiseek_page.get_field("field-0")
field["close-button"].click()
alert = multiseek_page.browser.get_alert()
alert.text == LAST_FIELD_REMOVE_MESSAGE
alert.accept()
@pytest.mark.django_db
def test_autocomplete_field(multiseek_page):
assert Language.objects.count()
field = multiseek_page.get_field(FIELD)
field["type"].find_by_value(
text(multiseek_registry.AuthorQueryObject.label)
).click()
element = multiseek_page.browser.find_by_css(".select2-container")
select_select2_autocomplete(multiseek_page.browser, element, "Smith")
got = multiseek_page.serialize()
expect = [
None,
make_field(
multiseek_registry.AuthorQueryObject,
text(EQUAL),
str(Author.objects.filter(last_name="Smith")[0].pk),
prev_op=None,
),
]
assert got == expect
@pytest.mark.django_db
def test_autocomplete_field_bug(multiseek_page):
"""We fill autocomplete field with NOTHING, then we submit the form,
then we reload the homepage, and by the time of writing, we see
HTTP error 500, which is not what we need..."""
field = multiseek_page.get_field(FIELD)
field["type"].find_by_value(
text(multiseek_registry.AuthorQueryObject.label)
).click()
multiseek_page.browser.find_by_id("sendQueryButton").click()
time.sleep(1)
with wait_for_page_load(multiseek_page.browser):
multiseek_page.browser.reload()
assert "Server Error (500)" not in multiseek_page.browser.html
@pytest.mark.django_db
def test_autocomplete_field_bug_2(multiseek_page):
"""We fill autocomplete field with NOTHING, then we submit the form,
then we reload the homepage, click the "add field button" and by the
time of writing, we get a javascript error."""
field = multiseek_page.get_field(FIELD)
field["type"].find_by_value(
text(multiseek_registry.AuthorQueryObject.label)
).click()
multiseek_page.browser.find_by_id("sendQueryButton").click()
time.sleep(1)
with wait_for_page_load(multiseek_page.browser):
multiseek_page.browser.reload()
multiseek_page.browser.find_by_id("add_field").click()
time.sleep(1)
selects = [
tag
for tag in multiseek_page.browser.find_by_tag("select")
if tag["id"] == "type"
]
assert len(selects[0].find_by_tag("option")) != 0
assert len(selects[1].find_by_tag("option")) != 0
@pytest.mark.django_db
def test_set_join(multiseek_page):
multiseek_page.browser.find_by_id("add_field").click()
multiseek_page.browser.execute_script(
"$('#field-1').multiseekField('prevOperation').val('or')"
)
ret = multiseek_page.browser.evaluate_script(
"$('#field-1').multiseekField('prevOperation').val()"
)
assert ret == "or"
multiseek_page.add_field(
FRAME,
text(multiseek_page.registry.fields[0].label),
text(multiseek_page.registry.fields[0].ops[0]),
"",
)
multiseek_page.browser.execute_script(
"$('#field-2').multiseekField('prevOperation').val('or')"
)
ret = multiseek_page.browser.evaluate_script(
"$('#field-2').multiseekField('prevOperation').val()"
)
assert ret == "or"
@pytest.mark.django_db
def test_set_frame_join(multiseek_page):
multiseek_page.browser.execute_script(
"""
$("#frame-0").multiseekFrame('addFrame');
$("#frame-0").multiseekFrame('addFrame', 'or');
"""
)
ret = multiseek_page.browser.evaluate_script(
"$('#frame-2').multiseekFrame('getPrevOperationValue')"
)
assert ret == "or"
@pytest.mark.django_db
def test_add_field_value_list(multiseek_page):
multiseek_page.add_field(
FRAME,
multiseek_registry.LanguageQueryObject.label,
multiseek_registry.LanguageQueryObject.ops[1],
text(_(u"polish")),
)
field = multiseek_page.get_field("field-1")
assert field["type"].value == text(multiseek_registry.LanguageQueryObject.label)
assert field["op"].value == text(multiseek_registry.LanguageQueryObject.ops[1])
assert field["value"] == text(_(u"polish"))
@pytest.mark.django_db
def test_add_field_autocomplete(multiseek_page):
multiseek_page.add_field(
FRAME,
multiseek_registry.AuthorQueryObject.label,
multiseek_registry.AuthorQueryObject.ops[1],
'[1,"John Smith"]',
)
value = multiseek_page.get_field_value("field-1")
assert value == "1"
@pytest.mark.django_db
def test_add_field_string(multiseek_page):
multiseek_page.add_field(
FRAME,
multiseek_registry.TitleQueryObject.label,
multiseek_registry.TitleQueryObject.ops[0],
"aaapud!",
)
field = multiseek_page.get_field_value("field-1")
assert field == "aaapud!"
@pytest.mark.django_db
def test_add_field_range(multiseek_page):
multiseek_page.add_field(
FRAME,
multiseek_registry.YearQueryObject.label,
multiseek_registry.YearQueryObject.ops[0],
"[1000, 2000]",
)
field = multiseek_page.get_field_value("field-1")
assert field == "[1000,2000]"
@pytest.mark.django_db
def test_refresh_bug(multiseek_page):
# There was a bug, that when you submit the form with "OR" operation,
# and then you refresh the page, the operation is changed to "AND"
frame = multiseek_page.get_frame("frame-0")
frame["add_field"].click()
field = multiseek_page.get_field("field-1")
field["prev-op"].find_by_value(text(_("or"))).click()
assert field["prev-op"].value == text(_("or"))
button = multiseek_page.browser.find_by_id("sendQueryButton")
button.click()
time.sleep(0.5)
multiseek_page.browser.reload()
field = multiseek_page.get_field("field-1")
assert field["prev-op"].value == text(_("or"))
@pytest.mark.django_db
def test_frame_bug(multiseek_page):
multiseek_page.browser.find_by_id("add_frame").click()
multiseek_page.browser.find_by_id("close-button").click()
multiseek_page.browser.find_by_id("sendQueryButton").click()
with multiseek_page.browser.get_iframe("if") as iframe:
assert "Server Error (500)" not in iframe.html
@pytest.mark.django_db
def test_date_field(multiseek_page):
field = multiseek_page.get_field("field-0")
field["type"].find_by_value(
text(multiseek_registry.DateLastUpdatedQueryObject.label)
).click()
field["op"].find_by_value(
text(multiseek_registry.DateLastUpdatedQueryObject.ops[6])
).click()
expected = [
None,
{
u"field": u"Last updated on",
u"operator": u"in range",
u"value": u'["",""]',
u"prev_op": None,
},
]
assert multiseek_page.serialize() == expected
field["op"].find_by_value(
text(multiseek_registry.DateLastUpdatedQueryObject.ops[3])
).click()
expected = [
None,
{
u"field": u"Last updated on",
u"operator": u"greater or equal to(female gender)",
u"value": u'[""]',
u"prev_op": None,
},
]
assert expected == multiseek_page.serialize()
@pytest.mark.django_db
def test_removed_records(multiseek_page, live_server, initial_data):
"""Try to remove a record by hand and check if that fact is properly
recorded."""
multiseek_page.browser.visit(live_server + "/multiseek/results")
assert "A book with" in multiseek_page.browser.html
assert "Second book" in multiseek_page.browser.html
multiseek_page.browser.execute_script("""$("a:contains('❌')").first().click()""")
time.sleep(1)
multiseek_page.browser.visit(live_server + "/multiseek/results")
assert "A book with" in multiseek_page.browser.html
assert "Second book" not in multiseek_page.browser.html
assert "1 record(s) has been removed manually" in multiseek_page.browser.html
multiseek_page.browser.execute_script("""$("a:contains('❌')").first().click()""")
time.sleep(1)
multiseek_page.browser.execute_script("""$("a:contains('❌')").first().click()""")
time.sleep(1)
multiseek_page.browser.visit(live_server + "/multiseek/results")
assert "A book with" in multiseek_page.browser.html
assert "Second book" not in multiseek_page.browser.html
assert "1 record(s) has been removed manually" in multiseek_page.browser.html
@pytest.mark.django_db
def test_form_save_anon_initial(multiseek_page):
# Without SearchForm objects, the formsSelector is invisible
elem = multiseek_page.browser.find_by_id("formsSelector")
assert not elem.visible
@pytest.mark.django_db
def test_form_save_anon_initial_with_data(multiseek_page):
baker.make(SearchForm, public=True)
multiseek_page.browser.reload()
elem = multiseek_page.browser.find_by_id("formsSelector")
assert elem.visible
@pytest.mark.django_db
def test_form_save_anon_form_save_anonymous(multiseek_page):
# Anonymous users cannot save forms:
assert len(multiseek_page.browser.find_by_id("saveFormButton")) == 0
@pytest.mark.django_db
def test_form_save_anon_bug(multiseek_page):
multiseek_page.browser.find_by_id("add_frame").click()
multiseek_page.browser.find_by_id("add_field").click()
field1 = multiseek_page.get_field("field-1")
field1["close-button"].click()
time.sleep(1)
selects = multiseek_page.browser.find_by_tag("select")
prevops = [x for x in selects if x["id"] == "prev-op"]
assert len(prevops) == 1
@pytest.mark.django_db
def test_public_report_types_secret_report_invisible(multiseek_page):
elem = multiseek_page.browser.find_by_name("_ms_report_type").find_by_tag("option")
assert len(elem) == 2
@pytest.mark.django_db
def test_logged_in_secret_report_visible(
multiseek_admin_page, admin_user, initial_data
):
elem = multiseek_admin_page.browser.find_by_name("_ms_report_type")
elem = elem.first.find_by_tag("option")
assert len(elem) == 3
@pytest.mark.django_db
def test_save_form_logged_in(multiseek_admin_page, initial_data):
assert multiseek_admin_page.browser.find_by_id("saveFormButton").visible
@pytest.mark.django_db
def test_save_form_server_error(multiseek_admin_page, initial_data):
NAME = "testowy formularz"
multiseek_admin_page.browser.execute_script(
"multiseek.SAVE_FORM_URL='/unexistent';"
)
browser = multiseek_admin_page.browser
# Zapiszmy formularz
multiseek_admin_page.save_form_as(NAME)
# ... pytanie, czy ma być publiczny:
WebDriverWait(browser, 10).until(alert_is_present())
multiseek_admin_page.browser.get_alert().accept()
time.sleep(1)
# ... po chwili informacja, że BŁĄD!
WebDriverWait(browser, 10).until(alert_is_present())
multiseek_admin_page.browser.get_alert().accept()
WebDriverWait(browser, 10).until_not(alert_is_present())
# ... i selector się NIE pojawia:
assert not multiseek_admin_page.browser.find_by_id("formsSelector").visible
# ... i w bazie też PUSTKA:
assert SearchForm.objects.all().count() == 0
@pytest.mark.django_db
def test_save_form_save(multiseek_admin_page, initial_data):
browser = multiseek_admin_page.browser
assert SearchForm.objects.all().count() == 0
# multiseek_admin_page.browser.reload()
with wait_for_alert(browser):
multiseek_admin_page.click_save_button()
with wait_until_no_alert(browser):
multiseek_admin_page.dismiss_alert()
# Anulowanie nie powinno wyświetlić następnego formularza
NAME = "testowy formularz"
# Zapiszmy formularz
multiseek_admin_page.save_form_as(NAME)
# ... pytanie, czy ma być publiczny:
WebDriverWait(browser, 10).until(alert_is_present())
with wait_until_no_alert(browser):
multiseek_admin_page.accept_alert()
# ... po chwili informacja, że zapisano
WebDriverWait(browser, 10).until(alert_is_present())
with wait_until_no_alert(browser):
multiseek_admin_page.accept_alert()
# ... i nazwa pojawia się w selectorze
assert multiseek_admin_page.count_elements_in_form_selector(NAME) == 1
# ... i w bazie:
assert SearchForm.objects.all().count() == 1
# Zapiszmy formularz pod TĄ SAMĄ NAZWĄ
multiseek_admin_page.save_form_as(NAME)
# ... pytanie, czy ma być publiczny:
WebDriverWait(browser, 10).until(alert_is_present())
with wait_until_no_alert(browser):
multiseek_admin_page.accept_alert()
# ... po chwili informacja, że jest już taki w bazie i czy nadpisać?
WebDriverWait(browser, 10).until(alert_is_present())
with wait_until_no_alert(browser):
multiseek_admin_page.accept_alert()
# ... po chwili informacja, że zapisano:
WebDriverWait(browser, 10).until(alert_is_present())
with wait_until_no_alert(browser):
multiseek_admin_page.accept_alert()
# ... i nazwa pojawia się w selectorze
assert multiseek_admin_page.count_elements_in_form_selector(NAME) == 1
# ... i w bazie jest nadal jeden
assert SearchForm.objects.all().count() == 1
# Zapiszmy formularz pod TĄ SAMĄ NAZWĄ ale już NIE nadpisujemy
multiseek_admin_page.save_form_as(NAME)
# ... pytanie, czy ma być publiczny:
WebDriverWait(browser, 10).until(alert_is_present())
with wait_until_no_alert(browser):
multiseek_admin_page.accept_alert()
# ... po chwili informacja, że jest już taki w bazie i czy nadpisać?
WebDriverWait(browser, 10).until(alert_is_present())
with wait_until_no_alert(browser):
multiseek_admin_page.accept_alert()
# ... po chwili informacja, że ZAPISANY
WebDriverWait(browser, 10).until(alert_is_present())
with wait_until_no_alert(browser):
multiseek_admin_page.accept_alert()
# ... i w bazie jest nadal jeden
assert SearchForm.objects.all().count() == 1
# Sprawdźmy, czy jest publiczny
assert SearchForm.objects.all()[0].public
# Nadpiszmy formularz jako nie-publiczny
multiseek_admin_page.save_form_as(NAME)
# ... pytanie, czy ma być publiczny:
WebDriverWait(browser, 10).until(alert_is_present())
with wait_until_no_alert(browser):
multiseek_admin_page.dismiss_alert()
# ... po chwili informacja, że jest już taki w bazie i czy nadpisać?
WebDriverWait(browser, 10).until(alert_is_present())
with wait_until_no_alert(browser):
multiseek_admin_page.accept_alert()
# ... po chwili informacja, że zapisano:
WebDriverWait(browser, 10).until(alert_is_present())
with wait_until_no_alert(browser):
multiseek_admin_page.accept_alert()
# ... i jest to już NIE-publiczny:
assert not SearchForm.objects.all()[0].public
@pytest.mark.django_db
def test_load_form(multiseek_admin_page, initial_data):
fld = make_field(
multiseek_admin_page.registry.fields[2],
multiseek_admin_page.registry.fields[2].ops[1],
json.dumps([2000, 2010]),
)
SearchForm.objects.create(
name="lol",
owner=User.objects.create(username="foo", password="bar"),
public=True,
data=json.dumps({"form_data": [None, fld]}),
)
multiseek_admin_page.load_form_by_name("lol")
field = multiseek_admin_page.extract_field_data(
multiseek_admin_page.browser.find_by_id("field-0")
)
assert field["selected"] == text(multiseek_admin_page.registry.fields[2].label)
assert field["value"][0] == 2000
assert field["value"][1] == 2010
# Przetestuj, czy po ANULOWANIU select wróci do pierwotnej wartości
elem = multiseek_admin_page.browser.find_by_id("formsSelector").first
elem.find_by_text("lol").click()
multiseek_admin_page.dismiss_alert()
elem = multiseek_admin_page.browser.find_by_id("formsSelector").find_by_tag(
"option"
)
assert elem[0].selected
@pytest.mark.django_db
def test_bug_2(multiseek_admin_page, initial_data):
f = multiseek_admin_page.registry.fields[0]
v = multiseek_admin_page.registry.fields[0].ops[0]
value = "foo"
field = make_field(f, v, value, OR)
form = [None, field, [OR, field, field, field], [OR, field, field, field]]
data = json.dumps({"form_data": form})
user = User.objects.create(username="foo", password="bar")
SearchForm.objects.create(name="bug-2", owner=user, public=True, data=data)
multiseek_admin_page.load_form_by_name("bug-2")
elements = multiseek_admin_page.browser.find_by_css("[name=prev-op]:visible")
for elem in elements:
if elem.css("visibility") != "hidden":
assert elem.value == logic.OR
@pytest.mark.django_db
def test_save_ordering_direction(multiseek_admin_page, initial_data):
elem = "input[name=%s1_dir]" % MULTISEEK_ORDERING_PREFIX
browser = multiseek_admin_page.browser
browser.find_by_css(elem).type(Keys.SPACE)
multiseek_admin_page.save_form_as("foobar")
# Should the dialog be public?
WebDriverWait(browser, 10).until(alert_is_present())
multiseek_admin_page.accept_alert()
WebDriverWait(browser, 10).until_not(alert_is_present())
# Form saved success
WebDriverWait(browser, 10).until(alert_is_present())
multiseek_admin_page.accept_alert()
WebDriverWait(browser, 10).until_not(alert_is_present())
multiseek_admin_page.reset_form()
multiseek_admin_page.load_form_by_name("foobar")
assert len(multiseek_admin_page.browser.find_by_css("%s:checked" % elem)) == 1
@pytest.mark.django_db
def test_save_ordering_box(multiseek_admin_page, initial_data):
elem = "select[name=%s0]" % MULTISEEK_ORDERING_PREFIX
browser = multiseek_admin_page.browser
select = browser.find_by_css(elem)
option = select.find_by_css('option[value="2"]')
assert not option.selected
option.click()
multiseek_admin_page.save_form_as("foobar")
WebDriverWait(browser, 10).until(alert_is_present())
multiseek_admin_page.accept_alert()
WebDriverWait(browser, 10).until_not(alert_is_present())
WebDriverWait(browser, 10).until(alert_is_present())
multiseek_admin_page.accept_alert()
WebDriverWait(browser, 10).until_not(alert_is_present())
multiseek_admin_page.reset_form()
multiseek_admin_page.load_form_by_name("foobar")
select = multiseek_admin_page.browser.find_by_css(elem)
option = select.find_by_css('option[value="2"]')
assert option.selected
@pytest.mark.django_db
def test_save_report_type(multiseek_admin_page, initial_data):
elem = "select[name=%s]" % MULTISEEK_REPORT_TYPE
select = multiseek_admin_page.browser.find_by_css(elem).first
option = select.find_by_css('option[value="1"]')
assert not option.selected
option.click()
multiseek_admin_page.save_form_as("foobar")
browser = multiseek_admin_page.browser
WebDriverWait(browser, 10).until(alert_is_present())
multiseek_admin_page.accept_alert()
WebDriverWait(browser, 10).until_not(alert_is_present())
WebDriverWait(browser, 10).until(alert_is_present())
multiseek_admin_page.accept_alert()
WebDriverWait(browser, 10).until_not(alert_is_present())
multiseek_admin_page.reset_form()
time.sleep(1)
multiseek_admin_page.load_form_by_name("foobar")
select = multiseek_admin_page.browser.find_by_css(elem).first
option = select.find_by_css('option[value="1"]')
assert option.selected
|
mpasternak/django-multiseek
|
test_project/test_app/tests.py
|
Python
|
mit
| 25,528
|
[
"VisIt"
] |
8473bcbf061ec9ecd8b71f78ab433f43e8567916984dc92f5eaa601b44ddf454
|
"""The config flow tests for the forked_daapd media player platform."""
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.forked_daapd.const import (
CONF_LIBRESPOT_JAVA_PORT,
CONF_MAX_PLAYLISTS,
CONF_TTS_PAUSE_TIME,
CONF_TTS_VOLUME,
DOMAIN,
)
from homeassistant.config_entries import (
CONN_CLASS_LOCAL_PUSH,
SOURCE_USER,
SOURCE_ZEROCONF,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PORT
from tests.async_mock import AsyncMock, patch
from tests.common import MockConfigEntry
SAMPLE_CONFIG = {
"websocket_port": 3688,
"version": "25.0",
"buildoptions": [
"ffmpeg",
"iTunes XML",
"Spotify",
"LastFM",
"MPD",
"Device verification",
"Websockets",
"ALSA",
],
}
@pytest.fixture(name="config_entry")
def config_entry_fixture():
"""Create hass config_entry fixture."""
data = {
CONF_HOST: "192.168.1.1",
CONF_PORT: "2345",
CONF_PASSWORD: "",
}
return MockConfigEntry(
version=1,
domain=DOMAIN,
title="",
data=data,
options={},
system_options={},
source=SOURCE_USER,
connection_class=CONN_CLASS_LOCAL_PUSH,
entry_id=1,
)
async def test_show_form(hass):
"""Test that the form is served with no input."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == SOURCE_USER
async def test_config_flow(hass, config_entry):
"""Test that the user step works."""
with patch(
"homeassistant.components.forked_daapd.config_flow.ForkedDaapdAPI.test_connection",
new=AsyncMock(),
) as mock_test_connection, patch(
"homeassistant.components.forked_daapd.media_player.ForkedDaapdAPI.get_request",
autospec=True,
) as mock_get_request:
mock_get_request.return_value = SAMPLE_CONFIG
mock_test_connection.return_value = ["ok", "My Music on myhost"]
config_data = config_entry.data
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=config_data
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "My Music on myhost"
assert result["data"][CONF_HOST] == config_data[CONF_HOST]
assert result["data"][CONF_PORT] == config_data[CONF_PORT]
assert result["data"][CONF_PASSWORD] == config_data[CONF_PASSWORD]
# Also test that creating a new entry with the same host aborts
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data=config_entry.data,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_zeroconf_updates_title(hass, config_entry):
"""Test that zeroconf updates title and aborts with same host."""
MockConfigEntry(domain=DOMAIN, data={CONF_HOST: "different host"}).add_to_hass(hass)
config_entry.add_to_hass(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 2
discovery_info = {
"host": "192.168.1.1",
"port": 23,
"properties": {"mtd-version": "27.0", "Machine Name": "zeroconf_test"},
}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert config_entry.title == "zeroconf_test"
assert len(hass.config_entries.async_entries(DOMAIN)) == 2
async def test_config_flow_no_websocket(hass, config_entry):
"""Test config flow setup without websocket enabled on server."""
with patch(
"homeassistant.components.forked_daapd.config_flow.ForkedDaapdAPI.test_connection",
new=AsyncMock(),
) as mock_test_connection:
# test invalid config data
mock_test_connection.return_value = ["websocket_not_enabled"]
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=config_entry.data
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
async def test_config_flow_zeroconf_invalid(hass):
"""Test that an invalid zeroconf entry doesn't work."""
# test with no discovery properties
discovery_info = {"host": "127.0.0.1", "port": 23}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
) # doesn't create the entry, tries to show form but gets abort
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "not_forked_daapd"
# test with forked-daapd version < 27
discovery_info = {
"host": "127.0.0.1",
"port": 23,
"properties": {"mtd-version": "26.3", "Machine Name": "forked-daapd"},
}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
) # doesn't create the entry, tries to show form but gets abort
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "not_forked_daapd"
# test with verbose mtd-version from Firefly
discovery_info = {
"host": "127.0.0.1",
"port": 23,
"properties": {"mtd-version": "0.2.4.1", "Machine Name": "firefly"},
}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
) # doesn't create the entry, tries to show form but gets abort
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "not_forked_daapd"
# test with svn mtd-version from Firefly
discovery_info = {
"host": "127.0.0.1",
"port": 23,
"properties": {"mtd-version": "svn-1676", "Machine Name": "firefly"},
}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
) # doesn't create the entry, tries to show form but gets abort
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "not_forked_daapd"
async def test_config_flow_zeroconf_valid(hass):
"""Test that a valid zeroconf entry works."""
discovery_info = {
"host": "192.168.1.1",
"port": 23,
"properties": {
"mtd-version": "27.0",
"Machine Name": "zeroconf_test",
"Machine ID": "5E55EEFF",
},
}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
async def test_options_flow(hass, config_entry):
"""Test config flow options."""
with patch(
"homeassistant.components.forked_daapd.media_player.ForkedDaapdAPI.get_request",
autospec=True,
) as mock_get_request:
mock_get_request.return_value = SAMPLE_CONFIG
config_entry.add_to_hass(hass)
await config_entry.async_setup(hass)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_TTS_PAUSE_TIME: 0.05,
CONF_TTS_VOLUME: 0.8,
CONF_LIBRESPOT_JAVA_PORT: 0,
CONF_MAX_PLAYLISTS: 8,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
|
tchellomello/home-assistant
|
tests/components/forked_daapd/test_config_flow.py
|
Python
|
apache-2.0
| 8,044
|
[
"Firefly"
] |
5d5cd70ee3eb17faa2ded6ca0e8456891c6f9807020c643ee5bc6384c6154f41
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from ase import
|
jochym/ase-qe
|
ase-qe/__init__.py
|
Python
|
gpl-3.0
| 62
|
[
"ASE"
] |
84b4e4b3c3103dcca0c70097fa1c1fedcb6db44ccec132010b0816bc29700da7
|
import os
import re
import uuid
import requests
import json
import logging
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("urllib3").setLevel(logging.WARNING)
from django.core.management.base import BaseCommand
from account.models import Team
from django.conf import settings
from django.utils import timezone
RANCHER_ACCESS_KEY = os.environ.get('RANCHER_ACCESS_KEY', None)
RANCHER_SECRET_KEY = os.environ.get('RANCHER_SECRET_KEY', None)
METADATA_FLAG = os.environ.get('METADATA_FLAG', "none_yet")
def get_env(team):
return {
"PROXY_PREFIX": "/galaxy-%s" % team.name,
"GALAXY_CONFIG_MASTER_API_KEY": uuid.uuid4().hex,
"GALAXY_DEFAULT_ADMIN_KEY": 'key-' + str(team.admin_password),
"GALAXY_DEFAULT_ADMIN_PASSWORD": str(team.admin_password),
"GALAXY_CONFIG_OVERRIDE_STATSD_PREFIX": "galaxy-all",
"GALAXY_CONFIG_OVERRIDE_STATSD_HOST": "127.0.0.1",
"GALAXY_CONFIG_OVERRIDE_STATSD_PORT": 8125,
"GALAXY_CONFIG_REQUIRE_LOGIN": "true",
"TEAM_ID" : str(team.id),
"TEAM_NAME" : team.name,
"TEAM_PASSWORD": team.password,
}
def safe_str(unsafe):
unsafe = unsafe.replace(' ', '_')
unsafe = re.sub('[^A-Za-z0-9_-]', '', unsafe)
if len(unsafe) == 0:
raise Exception("Empty result")
return unsafe
def get_current_state():
data = requests.get(
'https://rancher.galaxians.org/v2-beta/projects/1a5/stacks/1st19/services',
auth=(RANCHER_ACCESS_KEY, RANCHER_SECRET_KEY)
).json()
state = {}
for container in data['data']:
labels = container['launchConfig']['labels']
if 'org.galaxians.ctf' not in labels:
continue
state[container['launchConfig']['labels']['org.galaxians.ctf.team.name']] = {
'id': container['id'],
}
return state
def update_load_balancer(routes):
port_rules = []
linked_services = {}
priority = 1
for (team_name, data) in routes.items():
port_rules.append({
"path" : "/galaxy-%s" % team_name,
"priority" : priority,
"protocol" : "http",
"serviceId" : routes[team_name]['id'],
"sourcePort" : 81,
"targetPort" : 80,
"type" : "portRule",
})
linked_services['galaxy-%s' % team_name] = routes[team_name]['id']
priority += 1
port_rules.append({
"path": "/",
"priority" : priority,
"protocol": "http",
"serviceId": "1s72",
"sourcePort": 81,
"targetPort": 80,
"type": "portRule",
})
data = {
"externalId" : None,
"removed" : None,
"uuid" : "a7336947-8198-4f86-bf1f-c6525354c37",
"assignServiceIpAddress" : False,
"name" : "lb",
"state" : "active",
"type" : "loadBalancerService",
"scalePolicy" : None,
"launchConfig" : {
"cpuPercent" : None,
"ipcMode" : None,
"vcpu" : 1,
"memoryMb" : None,
"stdinOpen" : False,
"instanceTriggeredStop" : "stop",
"networkMode" : "managed",
"ioMaximumBandwidth" : None,
"kind" : "container",
"pidMode" : None,
"count" : None,
"cpuPeriod" : None,
"healthState" : None,
"pidsLimit" : None,
"imageUuid" : "docker:rancher/lb-service-haproxy:v0.7.1",
"userdata" : None,
"diskQuota" : None,
"usernsMode" : None,
"memory" : None,
"readOnly" : False,
"ip" : None,
"ioMaximumIOps" : None,
"startOnCreate" : True,
"ip6" : None,
"shmSize" : None,
"hostname" : None,
"createIndex" : None,
"ports" : [
"81:81/tcp"
],
"memorySwappiness" : None,
"type" : "launchConfig",
"labels" : {
"io.rancher.container.agent.role" : "environmentAdmin",
"io.rancher.container.create_agent" : "True"
},
"uts" : None,
"healthCheck" : {
"strategy" : None,
"requestLine" : None,
"responseTimeout" : 2000,
"recreateOnQuorumStrategyConfig" : None,
"type" : "instanceHealthCheck",
"initializingTimeout" : 60000,
"interval" : 2000,
"healthyThreshold" : 2,
"port" : 42,
"reinitializingTimeout" : 60000,
"unhealthyThreshold" : 3,
"name" : None
},
"workingDir" : None,
"kernelMemory" : None,
"cpuShares" : None,
"externalId" : None,
"memorySwap" : None,
"removed" : None,
"uuid" : None,
"isolation" : None,
"memoryReservation" : None,
"stopSignal" : None,
"firstRunning" : None,
"cpuSetMems" : None,
"milliCpuReservation" : None,
"cpuSet" : None,
"expose" : [],
"cpuCount" : None,
"privileged" : False,
"deploymentUnitUuid" : None,
"networkLaunchConfig" : None,
"tty" : False,
"description" : None,
"healthRetries" : None,
"domainName" : None,
"oomScoreAdj" : None,
"requestedIpAddress" : None,
"cpuQuota" : None,
"volumeDriver" : None,
"startCount" : None,
"healthInterval" : None,
"publishAllPorts" : False,
"version" : "0",
"system" : False,
"blkioWeight" : None,
"cgroupParent" : None,
"healthTimeout" : None,
"created" : None,
"user" : None
},
"created" : "2017-05-06T22:02:09Z",
"secondaryLaunchConfigs" : [],
"baseType" : "service",
"vip" : None,
"startOnCreate" : True,
"linkedServices" : linked_services,
"transitioningProgress" : None,
"publicEndpoints" : [
{
"instanceId" : "1i14994",
"type" : "publicEndpoint",
"serviceId" : "1s124",
"ipAddress" : "207.154.248.244",
"port" : 81,
"hostId" : "1h8"
}
],
"accountId" : "1a5",
"selectorLink" : None,
"system" : False,
"retainIp" : None,
"lbConfig" : {
"certificateIds" : [],
"portRules" : port_rules,
"defaultCertificateId" : None,
"type" : "lbConfig",
"stickinessPolicy" : None,
"config" : None
},
"scale" : 1,
"healthState" : "healthy",
"transitioningMessage" : None,
"transitioning" : "no",
"metadata" : None,
"description" : None,
"kind" : "loadBalancerService",
"fqdn" : "lb.galaxy.galaxians.org.",
"createdTS" : 1498650695000,
"stackId" : "1st19",
"upgrade" : None,
"currentScale" : 1,
"id" : "1s124"
}
resp = requests.put(
'https://rancher.galaxians.org/v2-beta/projects/1a5/loadbalancerservices/1s124',
auth=(RANCHER_ACCESS_KEY, RANCHER_SECRET_KEY),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
},
data=json.dumps(data),
).json()
return resp
def launch_container(team):
data = {
"removed" : None,
"selectorLink" : None,
"description" : None,
"created" : None,
"scale" : 1,
"healthState" : None,
"stackId" : "1st19",
"uuid" : None,
"name" : "galaxy-%s" % team.name,
"metadata": {
"super_secret_password": METADATA_FLAG,
},
"type" : "service",
"secondaryLaunchConfigs" : [],
"launchConfig" : {
"logConfig" : {
"config" : {},
"driver" : ""
},
"requestedIpAddress" : None,
"userdata" : None,
"isolation" : None,
"cgroupParent" : None,
"dns" : [],
"healthCheck" : {
"recreateOnQuorumStrategyConfig" : None,
"responseTimeout" : 2000,
"interval" : 2000,
"unhealthyThreshold" : 3,
"strategy" : "none",
"port" : 80,
"healthyThreshold" : 2,
"reinitializingTimeout" : 60000,
"initializingTimeout" : 60000,
"name" : None,
"type" : "instanceHealthCheck",
"requestLine" : "GET \"/\" \"HTTP/1.0\""
},
"hostname" : None,
"capAdd" : [],
"healthTimeout" : None,
"devices" : [],
"memory" : 1258291200,
"tty" : True,
"stdinOpen" : True,
"healthState" : None,
"diskQuota" : None,
"description" : None,
"labels" : {
"org.galaxians.ctf" : "gccctf2017",
"org.galaxians.ctf.team.name" : team.name,
"org.galaxians.ctf.team.id" : str(team.id),
"io.rancher.scheduler.affinity:host_label" : "role=compute",
"io.rancher.container.pull_image" : "always",
},
"cpuQuota" : None,
"count" : None,
"dnsSearch" : [],
"dataVolumes" : [],
"createIndex" : None,
"cpuPercent" : None,
"cpuPeriod" : None,
"externalId" : None,
"pidMode" : None,
"restartPolicy" : {
"name" : "always"
},
"workingDir" : None,
"ports" : [],
"privileged" : False,
"kernelMemory" : None,
"cpuSet" : None,
"dataVolumesFromLaunchConfigs" : [],
"memorySwappiness" : None,
"instanceTriggeredStop" : "stop",
"firstRunning" : None,
"uts" : None,
"ioMaximumIOps" : None,
"usernsMode" : None,
"capDrop" : [],
"startOnCreate" : True,
"secrets" : [],
"blkioWeight" : None,
"pidsLimit" : None,
"ip" : None,
"networkMode" : "managed",
"memoryReservation" : None,
"kind" : "container",
"vcpu" : 1,
"publishAllPorts" : False,
"dataVolumesFrom" : [],
"volumeDriver" : None,
"uuid" : None,
"deploymentUnitUuid" : None,
"cpuShares" : None,
"ioMaximumBandwidth" : None,
"healthInterval" : None,
"startCount" : None,
"memoryMb" : None,
"stopSignal" : None,
"readOnly" : False,
"memorySwap" : None,
"ipcMode" : None,
"ip6" : None,
"shmSize" : None,
"cpuSetMems" : None,
"environment" : get_env(team),
"type" : "launchConfig",
"user" : None,
"networkLaunchConfig" : None,
"cpuCount" : None,
"created" : None,
"milliCpuReservation" : None,
"removed" : None,
"domainName" : None,
"oomScoreAdj" : None,
"healthRetries" : None,
"imageUuid" : "docker:quay.io/erasche/gccctf2017:latest"
},
"assignServiceIpAddress" : False,
"kind" : None,
"externalId" : None,
"vip" : None,
"selectorContainer" : None,
"fqdn" : None,
"createIndex" : None,
"startOnCreate" : True
}
resp = requests.post(
'https://rancher.galaxians.org/v2-beta/projects/1a5/service',
auth=(RANCHER_ACCESS_KEY, RANCHER_SECRET_KEY),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
},
data=json.dumps(data),
).json()
return resp
class Command(BaseCommand):
help = 'gx-manager'
def handle(self, *args, **options):
if RANCHER_ACCESS_KEY is None:
raise Exception("Please set RANCHER_ACCESS_KEY")
if RANCHER_SECRET_KEY is None:
raise Exception("Please set RANCHER_SECRET_KEY")
if not(settings.COMPETITION_STARTS < timezone.now() < settings.COMPETITION_ENDS):
logging.info("Competition has not stated yet")
import sys
sys.exit(0)
current_state = get_current_state()
stateChanged = False
for team in Team.objects.all():
if team.name in current_state:
logging.debug("Team [%s] container available, continuing", team.name)
else:
stateChanged = True
# Then launch an image
logging.info("Team [%s] container not available, launching", team.name)
container = launch_container(team)
logging.info("Launched %s", container['id'])
# Refetch current state and update LB
if stateChanged:
update_load_balancer(get_current_state())
|
galaxy-ctf/milky-way
|
milkyway/management/commands/rancher_sync.py
|
Python
|
agpl-3.0
| 13,486
|
[
"Galaxy"
] |
eac0e1df2de833f29460c3b438bf93193cdb3a263e811475b2122b276180d3bb
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
# MOLDEN format:
# http://www.cmbi.ru.nl/molden/molden_format.html
import sys
import re
import numpy
import pyscf
from pyscf import lib
from pyscf import gto
from pyscf.lib import logger
from pyscf import __config__
IGNORE_H = getattr(__config__, 'molden_ignore_h', True)
def orbital_coeff(mol, fout, mo_coeff, spin='Alpha', symm=None, ene=None,
occ=None, ignore_h=IGNORE_H):
from pyscf.symm import label_orb_symm
if mol.cart:
# pyscf Cartesian GTOs are not normalized. This may not be consistent
# with the requirements of molden format. Normalize Cartesian GTOs here
norm = mol.intor('int1e_ovlp').diagonal() ** .5
mo_coeff = numpy.einsum('i,ij->ij', norm, mo_coeff)
if ignore_h:
mol, mo_coeff = remove_high_l(mol, mo_coeff)
aoidx = order_ao_index(mol)
nmo = mo_coeff.shape[1]
if symm is None:
symm = ['A']*nmo
if mol.symmetry:
try:
symm = label_orb_symm(mol, mol.irrep_name, mol.symm_orb,
mo_coeff, tol=1e-5)
except ValueError as e:
logger.warn(mol, str(e))
if ene is None:
ene = numpy.arange(nmo)
assert(spin == 'Alpha' or spin == 'Beta')
if occ is None:
occ = numpy.zeros(nmo)
neleca, nelecb = mol.nelec
if spin == 'Alpha':
occ[:neleca] = 1
else:
occ[:nelecb] = 1
fout.write('[MO]\n')
for imo in range(nmo):
fout.write(' Sym= %s\n' % symm[imo])
fout.write(' Ene= %15.10g\n' % ene[imo])
fout.write(' Spin= %s\n' % spin)
fout.write(' Occup= %10.5f\n' % occ[imo])
for i,j in enumerate(aoidx):
fout.write(' %3d %18.14g\n' % (i+1, mo_coeff[j,imo]))
def from_mo(mol, filename, mo_coeff, spin='Alpha', symm=None, ene=None,
occ=None, ignore_h=IGNORE_H):
'''Dump the given MOs in Molden format'''
with open(filename, 'w') as f:
header(mol, f, ignore_h)
orbital_coeff(mol, f, mo_coeff, spin, symm, ene, occ, ignore_h)
def from_scf(mf, filename, ignore_h=IGNORE_H):
'''Dump the given SCF object in Molden format'''
dump_scf(mf, filename, ignore_h)
def dump_scf(mf, filename, ignore_h=IGNORE_H):
import pyscf.scf
mol = mf.mol
mo_coeff = mf.mo_coeff
with open(filename, 'w') as f:
header(mol, f, ignore_h)
if isinstance(mf, pyscf.scf.uhf.UHF) or 'UHF' == mf.__class__.__name__:
orbital_coeff(mol, f, mo_coeff[0], spin='Alpha',
ene=mf.mo_energy[0], occ=mf.mo_occ[0],
ignore_h=ignore_h)
orbital_coeff(mol, f, mo_coeff[1], spin='Beta',
ene=mf.mo_energy[1], occ=mf.mo_occ[1],
ignore_h=ignore_h)
else:
orbital_coeff(mf.mol, f, mf.mo_coeff,
ene=mf.mo_energy, occ=mf.mo_occ, ignore_h=ignore_h)
def from_mcscf(mc, filename, ignore_h=IGNORE_H, cas_natorb=False):
mol = mc.mol
dm1 = mc.make_rdm1()
if cas_natorb:
mo_coeff, ci, mo_energy = mc.canonicalize(sort=True, cas_natorb=cas_natorb)
else:
mo_coeff, ci, mo_energy = mc.mo_coeff, mc.ci, mc.mo_energy
mo_inv = numpy.dot(mc._scf.get_ovlp(), mo_coeff)
occ = numpy.einsum('pi,pq,qi->i', mo_inv, dm1, mo_inv)
with open(filename, 'w') as f:
header(mol, f, ignore_h)
orbital_coeff(mol, f, mo_coeff, ene=mo_energy, occ=occ, ignore_h=ignore_h)
def from_chkfile(filename, chkfile, key='scf/mo_coeff', ignore_h=IGNORE_H):
import pyscf.scf
with open(filename, 'w') as f:
if key == 'scf/mo_coeff':
mol, mf = pyscf.scf.chkfile.load_scf(chkfile)
header(mol, f, ignore_h)
ene = mf['mo_energy']
occ = mf['mo_occ']
mo = mf['mo_coeff']
else:
mol = pyscf.scf.chkfile.load_mol(chkfile)
header(mol, f, ignore_h)
dat = pyscf.scf.chkfile.load(chkfile, key.split('/')[0])
if 'mo_energy' in dat:
ene = dat['mo_energy']
else:
ene = None
occ = dat['mo_occ']
mo = dat['mo_coeff']
if isinstance(ene, str) and ene == 'None':
ene = None
if isinstance(ene, str) and occ == 'None':
occ = None
if occ.ndim == 2:
orbital_coeff(mol, f, mo[0], spin='Alpha', ene=ene[0], occ=occ[0],
ignore_h=ignore_h)
orbital_coeff(mol, f, mo[1], spin='Beta', ene=ene[1], occ=occ[1],
ignore_h=ignore_h)
else:
orbital_coeff(mol, f, mo, ene=ene, occ=occ, ignore_h=ignore_h)
_SEC_RE = re.compile(r'\[[^]]+\]')
def _read_one_section(molden_fp):
sec = [None]
while True:
line = molden_fp.readline()
if not line:
break
line = line.strip()
if line == '' or line[0] == '#': # comment or blank line
continue
mo = _SEC_RE.match(line)
if mo:
if sec[0] is None:
sec[0] = line
else:
# Next section? rewind the fp pointer
molden_fp.seek(last_pos)
break
else:
sec.append(line)
last_pos = molden_fp.tell()
return sec
def _parse_natoms(lines, envs):
envs['natm'] = natm = int(lines[1])
return natm
def _parse_atoms(lines, envs):
if 'ANG' in lines[0].upper():
envs['unit'] = 1
unit = envs['unit']
envs['atoms'] = atoms = []
for line in lines[1:]:
dat = line.split()
symb, atmid, chg = dat[:3]
coord = numpy.array([float(x) for x in dat[3:]])*unit
atoms.append((gto.mole._std_symbol(symb)+atmid, coord))
if envs['natm'] is not None and envs['natm'] != len(atoms):
sys.stderr.write('Number of atoms in section ATOMS does not equal to N_ATOMS\n')
return atoms
def _parse_charge(lines, envs):
mulliken_charges = [float(_d2e(x)) for x in lines[1:]]
return mulliken_charges
def _parse_gto(lines, envs):
mol = envs['mol']
atoms = envs['atoms']
basis = {}
lines_iter = iter(lines)
next(lines_iter) # skip section header
# * Do not use iter() here. Python 2 and 3 are different in iter()
def read_one_bas(lsym, nb, fac=1):
fac = float(fac)
bas = [lib.param.ANGULARMAP[lsym.lower()],]
for i in range(int(nb)):
dat = _d2e(next(lines_iter)).split()
bas.append((float(dat[0]), float(dat[1])*fac))
return bas
# * Be careful with the atom sequence in [GTO] session, it does not correspond
# to the atom sequence in [Atoms] session.
atom_seq = []
for line in lines_iter:
dat = line.split()
if dat[0].isdigit():
atom_seq.append(int(dat[0])-1)
symb = atoms[int(dat[0])-1][0]
basis[symb] = []
elif dat[0].upper() in 'SPDFGHIJ':
basis[symb].append(read_one_bas(*dat))
mol.basis = envs['basis'] = basis
mol.atom = [atoms[i] for i in atom_seq]
return mol
def _parse_mo(lines, envs):
mol = envs['mol']
atoms = envs['atoms']
if not mol._built:
try:
mol.build(0, 0)
except RuntimeError:
mol.build(0, 0, spin=1)
irrep_labels = []
mo_energy = []
spins = []
mo_occ = []
mo_coeff = []
norb_alpha = -1
for line in lines[1:]:
line = line.upper()
if 'SYM' in line:
irrep_labels.append(line.split('=')[1].strip())
orb = []
mo_coeff.append(orb)
elif 'ENE' in line:
mo_energy.append(float(_d2e(line).split('=')[1].strip()))
elif 'SPIN' in line:
spins.append(line.split('=')[1].strip())
elif 'OCC' in line:
mo_occ.append(float(_d2e(line.split('=')[1].strip())))
else:
orb.append(float(_d2e(line.split()[1])))
mo_energy = numpy.array(mo_energy)
mo_occ = numpy.array(mo_occ)
aoidx = numpy.argsort(order_ao_index(mol))
mo_coeff = (numpy.array(mo_coeff).T)[aoidx]
if mol.cart:
# Cartesian GTOs are normalized in molden format but they are not in pyscf
s = mol.intor('int1e_ovlp')
mo_coeff = numpy.einsum('i,ij->ij', numpy.sqrt(1/s.diagonal()), mo_coeff)
return mol, mo_energy, mo_coeff, mo_occ, irrep_labels, spins
def _parse_core(lines, envs):
mol = envs['mol']
atoms = envs['atoms']
line_id = 1
max_lines = len(lines)
for line in lines[1:]:
dat = line.split(':')
if dat[0].strip().isdigit():
atm_id = int(dat[0].strip()) - 1
nelec_core = int(dat[1].strip())
mol.ecp[atoms[atm_id][0]] = [nelec_core, []]
if mol.ecp:
sys.stderr.write('\nECP were dectected in the molden file.\n'
'Note Molden format does not support ECP data. '
'ECP information was lost when saving to molden format.\n\n')
return mol.ecp
_SEC_PARSER = {'GTO' : _parse_gto,
'N_ATOMS' : _parse_natoms,
'ATOMS' : _parse_atoms,
'CHARGE' : _parse_charge,
'MO' : _parse_mo,
'CORE' : _parse_core,
'MOLDEN FORMAT' : lambda *args: None,
}
def load(moldenfile, verbose=0):
'''Extract mol and orbitals from molden file
'''
with open(moldenfile, 'r') as f:
mol = gto.Mole()
mol.cart = True
tokens = {'natm' : None,
'unit' : lib.param.BOHR,
'mol' : mol,
'atoms' : None,
'basis' : None,
}
mo_section_count = 0
while True:
lines = _read_one_section(f)
sec_title = lines[0]
if sec_title is None:
break
sec_title = sec_title[1:sec_title.index(']')].upper()
if sec_title == 'MO':
res = _parse_mo(lines, tokens)
if mo_section_count == 0: # Alpha orbitals
mol, mo_energy, mo_coeff, mo_occ, irrep_labels, spins = res
else:
mo_energy = mo_energy , res[1]
mo_coeff = mo_coeff , res[2]
mo_occ = mo_occ , res[3]
irrep_labels = irrep_labels, res[4]
spins = spins , res[5]
mo_section_count += 1
elif sec_title in _SEC_PARSER:
_SEC_PARSER[sec_title.upper()](lines, tokens)
elif sec_title[:2] in ('5D', '7F', '9G'):
mol.cart = False
elif sec_title[:2] == '6D' or sec_title[:3] in ('10F', '15G'):
mol.cart = True
else:
sys.stderr.write('Unknown section %s\n' % sec_title)
if mo_section_count == 0:
if spins[-1][0] == 'B': # If including beta orbitals
offset = spins.index(spins[-1])
mo_energy = mo_energy [:offset], mo_energy [offset:]
mo_coeff = mo_coeff [:offset], mo_coeff [offset:]
mo_occ = mo_occ [:offset], mo_occ [offset:]
irrep_labels = irrep_labels[:offset], irrep_labels[offset:]
spins = spins [:offset], spins [offset:]
if isinstance(mo_occ, tuple):
mol.spin = int(mo_occ[0].sum() - mo_occ[1].sum())
return mol, mo_energy, mo_coeff, mo_occ, irrep_labels, spins
parse = read = load
def _d2e(token):
return token.replace('D', 'e').replace('d', 'e')
def header(mol, fout, ignore_h=IGNORE_H):
if ignore_h:
mol = remove_high_l(mol)[0]
fout.write('[Molden Format]\n')
fout.write('made by pyscf v[%s]\n' % pyscf.__version__)
fout.write('[Atoms] (AU)\n')
for ia in range(mol.natm):
symb = mol.atom_pure_symbol(ia)
chg = mol.atom_charge(ia)
fout.write('%s %d %d ' % (symb, ia+1, chg))
coord = mol.atom_coord(ia)
fout.write('%18.14f %18.14f %18.14f\n' % tuple(coord))
fout.write('[GTO]\n')
for ia, (sh0, sh1, p0, p1) in enumerate(mol.offset_nr_by_atom()):
fout.write('%d 0\n' %(ia+1))
for ib in range(sh0, sh1):
l = mol.bas_angular(ib)
nprim = mol.bas_nprim(ib)
nctr = mol.bas_nctr(ib)
es = mol.bas_exp(ib)
cs = mol.bas_ctr_coeff(ib)
for ic in range(nctr):
fout.write(' %s %2d 1.00\n' % (lib.param.ANGULAR[l], nprim))
for ip in range(nprim):
fout.write(' %18.14g %18.14g\n' % (es[ip], cs[ip,ic]))
fout.write('\n')
if mol.cart:
fout.write('[6d]\n[10f]\n[15g]\n')
else:
fout.write('[5d]\n[7f]\n[9g]\n')
if mol.has_ecp(): # See https://github.com/zorkzou/Molden2AIM
fout.write('[core]\n')
for ia in range(mol.natm):
nelec_ecp_core = mol.atom_nelec_core(ia)
if nelec_ecp_core != 0:
fout.write('%s : %d\n' % (ia+1, nelec_ecp_core))
fout.write('\n')
def order_ao_index(mol):
# reorder d,f,g fucntion to
# 5D: D 0, D+1, D-1, D+2, D-2
# 6D: xx, yy, zz, xy, xz, yz
#
# 7F: F 0, F+1, F-1, F+2, F-2, F+3, F-3
# 10F: xxx, yyy, zzz, xyy, xxy, xxz, xzz, yzz, yyz, xyz
#
# 9G: G 0, G+1, G-1, G+2, G-2, G+3, G-3, G+4, G-4
# 15G: xxxx yyyy zzzz xxxy xxxz yyyx yyyz zzzx zzzy xxyy xxzz yyzz xxyz yyxz zzxy
idx = []
off = 0
if mol.cart:
for ib in range(mol.nbas):
l = mol.bas_angular(ib)
for n in range(mol.bas_nctr(ib)):
if l == 2:
idx.extend([off+0,off+3,off+5,off+1,off+2,off+4])
elif l == 3:
idx.extend([off+0,off+6,off+9,off+3,off+1,
off+2,off+5,off+8,off+7,off+4])
elif l == 4:
idx.extend([off+0 , off+10, off+14, off+1 , off+2 ,
off+6 , off+11, off+9 , off+13, off+3 ,
off+5 , off+12, off+4 , off+7 , off+8 ,])
elif l > 4:
raise RuntimeError('l=5 is not supported')
else:
idx.extend(range(off,off+(l+1)*(l+2)//2))
off += (l+1)*(l+2)//2
else: # spherical orbitals
for ib in range(mol.nbas):
l = mol.bas_angular(ib)
for n in range(mol.bas_nctr(ib)):
if l == 2:
idx.extend([off+2,off+3,off+1,off+4,off+0])
elif l == 3:
idx.extend([off+3,off+4,off+2,off+5,off+1,off+6,off+0])
elif l == 4:
idx.extend([off+4,off+5,off+3,off+6,off+2,
off+7,off+1,off+8,off+0])
elif l > 4:
raise RuntimeError('l=5 is not supported')
else:
idx.extend(range(off,off+l*2+1))
off += l * 2 + 1
return idx
def remove_high_l(mol, mo_coeff=None):
'''Remove high angular momentum (l >= 5) functions before dumping molden file.
If molden function raised error message ``RuntimeError l=5 is not supported``,
you can use this function to format orbitals.
Note the formated orbitals may have normalization problem. Some visualization
tool will complain about the orbital normalization error.
Examples:
>>> mol1, orb1 = remove_high_l(mol, mf.mo_coeff)
>>> molden.from_mo(mol1, outputfile, orb1)
'''
pmol = mol.copy()
pmol.basis = {}
for symb, bas in mol._basis.items():
pmol.basis[symb] = [b for b in bas if b[0] <= 4]
pmol.build(0, 0)
if mo_coeff is None:
return pmol, None
else:
p1 = 0
idx = []
for ib in range(mol.nbas):
l = mol.bas_angular(ib)
nc = mol.bas_nctr(ib)
if mol.cart:
nd = (l + 1) * (l + 2) // 2
else:
nd = l * 2 + 1
p0, p1 = p1, p1 + nd * nc
if l <= 4:
idx.append(range(p0, p1))
idx = numpy.hstack(idx)
return pmol, mo_coeff[idx]
if __name__ == '__main__':
from pyscf import scf
import tempfile
mol = gto.Mole()
mol.verbose = 5
mol.output = None#'out_gho'
mol.atom = [['C', (0.,0.,0.)],
['H', ( 1, 1, 1)],
['H', (-1,-1, 1)],
['H', ( 1,-1,-1)],
['H', (-1, 1,-1)], ]
mol.basis = {
'C': 'sto-3g',
'H': 'sto-3g'}
mol.build(dump_input=False)
m = scf.RHF(mol)
m.scf()
header(mol, mol.stdout)
print(order_ao_index(mol))
orbital_coeff(mol, mol.stdout, m.mo_coeff)
ftmp = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
from_mo(mol, ftmp.name, m.mo_coeff)
print(parse(ftmp.name))
|
gkc1000/pyscf
|
pyscf/tools/molden.py
|
Python
|
apache-2.0
| 17,740
|
[
"PySCF"
] |
9dd1da05fcd2c62378db5f1f19879c17962a0710c3fb39fada02fbaeff568c0c
|
#!/usr/bin/env python3
from __future__ import print_function
from glob import glob
from argparse import ArgumentParser
import os, re, sys
parser = ArgumentParser(prog='check-packages.py',
description="Check package table completeness")
parser.add_argument("-v", "--verbose",
action='store_const',
const=True, default=False,
help="Enable verbose output")
parser.add_argument("-d", "--doc",
help="Path to LAMMPS documentation sources")
parser.add_argument("-s", "--src",
help="Path to LAMMPS sources")
args = parser.parse_args()
verbose = args.verbose
src = args.src
doc = args.doc
if not args.src or not args.doc:
parser.print_help()
sys.exit(1)
if not os.path.isdir(src):
sys.exit("LAMMPS source path %s does not exist" % src)
if not os.path.isdir(doc):
sys.exit("LAMMPS documentation source path %s does not exist" % doc)
pkgdirs = glob(os.path.join(src, '[A-Z][A-Z]*'))
dirs = re.compile(".*/([0-9A-Z-]+)$")
user = re.compile("USER-.*")
stdpkg = []
usrpkg = []
# find package names and add to standard and user package lists.
# anything starting with at least two upper case characters, is a
# folder, and is not called 'MAKE' is a package
for d in pkgdirs:
pkg = dirs.match(d).group(1)
if not os.path.isdir(os.path.join(src,pkg)): continue
if pkg in ['DEPEND','MAKE','STUBS']: continue
if user.match(pkg):
usrpkg.append(pkg)
else:
stdpkg.append(pkg)
print("Found %d standard and %d user packages" % (len(stdpkg),len(usrpkg)))
counter = 0
fp = open(os.path.join(doc,'Packages_standard.rst'))
text = fp.read()
fp.close()
matches = re.findall(':ref:`([A-Z0-9-]+) <[A-Z0-9-]+>`',text,re.MULTILINE)
for p in stdpkg:
if not p in matches:
++counter
print("Standard package %s missing in Packages_standard.rst"
% p)
fp = open(os.path.join(doc,'Packages_user.rst'))
text = fp.read()
fp.close()
matches = re.findall(':ref:`([A-Z0-9-]+) <[A-Z0-9-]+>`',text,re.MULTILINE)
for p in usrpkg:
if not p in matches:
++counter
print("User package %s missing in Packages_user.rst"
% p)
fp = open(os.path.join(doc,'Packages_details.rst'))
text = fp.read()
fp.close()
matches = re.findall(':ref:`([A-Z0-9]+) <PKG-\\1>`',text,re.MULTILINE)
for p in stdpkg:
if not p in matches:
++counter
print("Standard package %s missing in Packages_details.rst"
% p)
matches = re.findall(':ref:`(USER-[A-Z0-9]+) <PKG-\\1>`',text,re.MULTILINE)
for p in usrpkg:
if not p in matches:
++counter
print("User package %s missing in Packages_details.rst"
% p)
if counter:
print("Found %d issue(s) with package lists" % counter)
|
pastewka/lammps
|
doc/utils/check-packages.py
|
Python
|
gpl-2.0
| 2,743
|
[
"LAMMPS"
] |
daf0806b45f7e18334b4946abef16ea757c3a12bd1dce1a070311bf954f87b0f
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# grid_monitor - Monitor page generator
# Copyright (C) 2003-2015 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Creating the MiG monitor page"""
import os
import time
import datetime
from shared.conf import get_configuration_object
from shared.defaults import default_vgrid
from shared.fileio import unpickle
from shared.gridstat import GridStat
from shared.html import get_cgi_html_header, get_cgi_html_footer, themed_styles
from shared.output import format_timedelta
from shared.resource import anon_resource_id
from shared.vgrid import vgrid_list_vgrids
print """
Running grid monitor generator.
Set the MIG_CONF environment to the server configuration path
unless it is available in mig/server/MiGserver.conf
"""
configuration = get_configuration_object()
logger = configuration.logger
# Make sure that the default VGrid home used by monitor exists
default_vgrid_dir = os.path.join(configuration.vgrid_home, default_vgrid)
if not os.path.isdir(default_vgrid_dir):
try:
os.makedirs(default_vgrid_dir)
except OSError, ose:
logger.error('Failed to create default VGrid home: %s' % ose)
def create_monitor(vgrid_name):
"""Write monitor HTML file for vgrid_name"""
html_file = os.path.join(configuration.vgrid_home, vgrid_name,
'%s.html' % configuration.vgrid_monitor)
print 'collecting statistics for VGrid %s' % vgrid_name
sleep_secs = configuration.sleep_secs
slackperiod = configuration.slackperiod
now = time.asctime(time.localtime())
html_vars = {
'sleep_secs': sleep_secs,
'vgrid_name': vgrid_name,
'logo_url': '/images/logo.jpg',
'now': now,
'short_title': configuration.short_title,
}
html = get_cgi_html_header(
configuration,
'%(short_title)s Monitor, VGrid %(vgrid_name)s' % html_vars,
'',
True,
'''<meta http-equiv="refresh" content="%(sleep_secs)s" />
''' % html_vars,
themed_styles(configuration),
'''
<script type="text/javascript" src="/images/js/jquery.js"></script>
<script type="text/javascript" src="/images/js/jquery.tablesorter.js"></script>
<script type="text/javascript" >
$(document).ready(function() {
// table initially sorted by col. 1 (name)
var sortOrder = [[1,0]];
// use image path for sorting if there is any inside
var imgTitle = function(contents) {
var key = $(contents).find("a").attr("class");
if (key == null) {
key = $(contents).html();
}
return key;
}
$("table.monitor").tablesorter({widgets: ["zebra"],
textExtraction: imgTitle,
});
$("table.monitor").each(function () {
try {
$(this).trigger("sorton", [sortOrder]);
} catch(err) {
/* tablesorter chokes on empty tables - just continue */
}
});
}
);
</script>
''',
'',
False,
)
html += \
'''
<!-- end of raw header: this line is used by showvgridmonitor -->
<h1>Statistics/monitor for the %(vgrid_name)s VGrid</h1>
<div class="generatornote smallcontent">
This page was generated %(now)s (automatic refresh every %(sleep_secs)s secs).
</div>
'''\
% html_vars
# loop and get totals
parse_count = 0
queued_count = 0
frozen_count = 0
executing_count = 0
finished_count = 0
failed_count = 0
retry_count = 0
canceled_count = 0
cpucount_requested = 0
cpucount_done = 0
nodecount_requested = 0
nodecount_done = 0
cputime_requested = 0
cputime_done = 0
used_walltime = 0
disk_requested = 0
disk_done = 0
memory_requested = 0
memory_done = 0
runtimeenv_dict = {'': 0}
runtimeenv_requested = 0
runtimeenv_done = 0
number_of_jobs = 0
up_count = 0
down_count = 0
slack_count = 0
job_assigned = 0
job_assigned_cpus = 0
gstat = GridStat(configuration, logger)
runtimeenv_dict = gstat.get_value(gstat.VGRID, vgrid_name.upper(),
'RUNTIMEENVIRONMENT', {})
parse_count = gstat.get_value(gstat.VGRID, vgrid_name.upper(),
'PARSE')
queued_count = gstat.get_value(gstat.VGRID, vgrid_name.upper(),
'QUEUED')
frozen_count = gstat.get_value(gstat.VGRID, vgrid_name.upper(),
'FROZEN')
executing_count = gstat.get_value(gstat.VGRID, vgrid_name.upper(),
'EXECUTING')
failed_count = gstat.get_value(gstat.VGRID, vgrid_name.upper(),
'FAILED')
retry_count = gstat.get_value(gstat.VGRID, vgrid_name.upper(),
'RETRY')
canceled_count = gstat.get_value(gstat.VGRID, vgrid_name.upper(),
'CANCELED')
expired_count = gstat.get_value(gstat.VGRID, vgrid_name.upper(),
'EXPIRED')
finished_count = gstat.get_value(gstat.VGRID, vgrid_name.upper(),
'FINISHED')
nodecount_requested = gstat.get_value(gstat.VGRID,
vgrid_name.upper(), 'NODECOUNT_REQ')
nodecount_done = gstat.get_value(gstat.VGRID, vgrid_name.upper(),
'NODECOUNT_DONE')
cputime_requested = gstat.get_value(gstat.VGRID,
vgrid_name.upper(), 'CPUTIME_REQ')
cputime_done = gstat.get_value(gstat.VGRID, vgrid_name.upper(),
'CPUTIME_DONE')
used_walltime = gstat.get_value(gstat.VGRID,
vgrid_name.upper(),
'USED_WALLTIME')
if (used_walltime == 0):
used_walltime = datetime.timedelta(0)
used_walltime = format_timedelta(used_walltime)
disk_requested = gstat.get_value(gstat.VGRID, vgrid_name.upper(),
'DISK_REQ')
disk_done = gstat.get_value(gstat.VGRID, vgrid_name.upper(),
'DISK_DONE')
memory_requested = gstat.get_value(gstat.VGRID, vgrid_name.upper(),
'MEMORY_REQ')
memory_done = gstat.get_value(gstat.VGRID, vgrid_name.upper(),
'MEMORY_DONE')
cpucount_requested = gstat.get_value(gstat.VGRID,
vgrid_name.upper(), 'CPUCOUNT_REQ')
cpucount_done = gstat.get_value(gstat.VGRID, vgrid_name.upper(),
'CPUCOUNT_DONE')
runtimeenv_requested = gstat.get_value(gstat.VGRID, vgrid_name.upper(),
'RUNTIMEENVIRONMENT_REQ')
runtimeenv_done = gstat.get_value(gstat.VGRID, vgrid_name.upper(),
'RUNTIMEENVIRONMENT_DONE')
number_of_jobs = parse_count
number_of_jobs += queued_count
number_of_jobs += frozen_count
number_of_jobs += expired_count
number_of_jobs += canceled_count
number_of_jobs += failed_count
number_of_jobs += executing_count
number_of_jobs += finished_count
number_of_jobs += retry_count
html_vars = {
'parse_count': parse_count,
'queued_count': queued_count,
'frozen_count': frozen_count,
'executing_count': executing_count,
'failed_count': failed_count,
'retry_count': retry_count,
'canceled_count': canceled_count,
'expired_count': expired_count,
'finished_count': finished_count,
'number_of_jobs': number_of_jobs,
'cpucount_requested': cpucount_requested,
'cpucount_done': cpucount_done,
'nodecount_requested': nodecount_requested,
'nodecount_done': nodecount_done,
'cputime_requested': cputime_requested,
'cputime_done': cputime_done,
'used_walltime': used_walltime,
'disk_requested': disk_requested,
'disk_done': disk_done,
'memory_requested': memory_requested,
'memory_done': memory_done,
'runtimeenv_requested': runtimeenv_requested,
'runtimeenv_done': runtimeenv_done,
}
html += \
"""<h2>Job Stats</h2><table class=monitorstats><tr><td>
<table class=monitorjobs><tr class=title><td>Job State</td><td>Number of jobs</td></tr>
<tr><td>Parse</td><td>%(parse_count)s</td></tr>
<tr><td>Queued</td><td>%(queued_count)s</td></tr>
<tr><td>Frozen</td><td>%(frozen_count)s</td></tr>
<tr><td>Executing</td><td>%(executing_count)s</td></tr>
<tr><td>Failed</td><td>%(failed_count)s</td></tr>
<tr><td>Retry</td><td>%(retry_count)s</td></tr>
<tr><td>Canceled</td><td>%(canceled_count)s</td></tr>
<tr><td>Expired</td><td>%(expired_count)s</td></tr>
<tr><td>Finished</td><td>%(finished_count)s</td></tr>
<tr><td>Total</td><td>%(number_of_jobs)s</td></tr>
</table>
</td><td>
<table class=monitorresreq>
<tr class=title><td>Requirement</td><td>Requested</td><td>Done</td></tr>
<tr><td>Cpucount</td><td>%(cpucount_requested)s</td><td>%(cpucount_done)s</td></tr>
<tr><td>Nodecount</td><td>%(nodecount_requested)s</td><td>%(nodecount_done)s</td></tr>
<tr><td>Cputime</td><td>%(cputime_requested)s</td><td>%(cputime_done)s</td></tr>
<tr><td>GB Disk</td><td>%(disk_requested)s</td><td>%(disk_done)s</td></tr>
<tr><td>MB Memory</td><td>%(memory_requested)s</td><td>%(memory_done)s</td></tr>
<tr><td>Runtime Envs</td><td>%(runtimeenv_requested)s</td><td>%(runtimeenv_done)s</td></tr>
<tr><td>Used Walltime</td><td colspan='2'>%(used_walltime)s</td></tr>
</table><br />
</td><td>
<div class=monitorruntimeenvdetails>
<table class=monitorruntimeenvdone>
<tr class=title><td>Runtime Envs Done</td><td></td></tr>
"""\
% html_vars
if len(runtimeenv_dict.keys()) < 1:
# No runtimeenv requests
html += '<tr><td></td><td>-</td></tr>\n'
else:
for entry in runtimeenv_dict.keys():
if not entry == '':
html += '<tr><td>' + entry + '</td><td>'\
+ str(runtimeenv_dict[entry]) + '</td></tr>\n'
total_number_of_exe_resources, total_number_of_store_resources = 0, 0
total_number_of_exe_cpus, total_number_of_store_gigs= 0, 0
vgrid_name_list = vgrid_name.split('/')
current_dir = ''
exes, stores = '', ''
for vgrid_name_part in vgrid_name_list:
current_dir = os.path.join(current_dir, vgrid_name_part)
abs_mon_dir = os.path.join(configuration.vgrid_home, current_dir)
# print 'dir: %s' % abs_mon_dir
# Potential race - just ignore if it disappeared
try:
sorted_names = os.listdir(abs_mon_dir)
except OSError:
continue
sorted_names.sort()
for filename in sorted_names:
# print filename
if filename.startswith('monitor_last_request_'):
# read last request helper file
mon_file_name = os.path.join(abs_mon_dir, filename)
print 'found ' + mon_file_name
last_request_dict = unpickle(mon_file_name, logger)
if not last_request_dict:
print 'could not open and unpickle: '\
+ mon_file_name
continue
difference = datetime.datetime.now()\
- last_request_dict['CREATED_TIME']
days = str(difference.days)
hours = str(difference.seconds / 3600)
minutes = str((difference.seconds % 3600) / 60)
seconds = str((difference.seconds % 60) % 60)
if last_request_dict.has_key('CPUTIME'):
cputime = last_request_dict['CPUTIME']
elif last_request_dict.has_key('cputime'):
cputime = last_request_dict['cputime']
else:
print 'ERROR: last request does not contain cputime field!: %s'\
% last_request_dict
continue
try:
cpusec = int(cputime)
except ValueError:
try:
cpusec = int(float(cputime))
except ValueError, verr:
print 'ERROR: failed to parse cputime %s: %s'\
% (cputime, verr)
# Include execution delay guesstimate for strict fill
# LRMS resources
try:
delay = int(last_request_dict['EXECUTION_DELAY'])
except KeyError:
delay = 0
except ValueError:
delay = 0
time_remaining = (last_request_dict['CREATED_TIME']
+ datetime.timedelta(seconds=cpusec)
+ datetime.timedelta(seconds=delay))\
- datetime.datetime.now()
days_rem = str(time_remaining.days)
hours_rem = str(time_remaining.seconds / 3600)
minutes_rem = str((time_remaining.seconds % 3600) / 60)
seconds_rem = str((time_remaining.seconds % 60) % 60)
if time_remaining.days < -7:
try:
print 'removing: %s as we havent seen him for %s days.'\
% (mon_file_name, abs(time_remaining).days)
os.remove(mon_file_name)
except Exception, err:
print "could not remove: '%s' Error: %s"\
% (mon_file_name, str(err))
pass
else:
unique_res_name_and_exe_list = \
filename.split('monitor_last_request_', 1)
if cpusec == 0:
resource_status = 'unavailable'
elif time_remaining.days < 0:
# time_remaining.days < 0 means that we have passed the specified time
time_rem_abs = abs(time_remaining)
if time_rem_abs.days == 0\
and int(time_rem_abs.seconds)\
< int(slackperiod):
resource_status = 'slack'
slack_count = slack_count + 1
else:
resource_status = 'offline'
down_count = down_count + 1
else:
resource_status = 'online'
up_count = up_count + 1
exes += '<tr>'
exes += \
'<td><img src=/images/status-icons/%s.png /></td>'\
% resource_status
public_id = unique_res_name_and_exe_list[1]
if last_request_dict['RESOURCE_CONFIG'].get('ANONYMOUS', True):
public_id = anon_resource_id(public_id)
public_name = last_request_dict['RESOURCE_CONFIG'].get('PUBLICNAME', '')
resource_parts = public_id.split('_', 2)
resource_name = "<a href='viewres.py?unique_resource_name=%s'>%s</a>" % \
(resource_parts[0], resource_parts[0])
if public_name:
resource_name += "<br />(alias %s)" % public_name
else:
resource_name += "<br />(no alias)"
resource_name += "<br />%s" % resource_parts[1]
exes += '<td>%s</td>' % resource_name
exes += '<td>%s<br />(%sd %sh %sm %ss ago)</td>' % \
(time.asctime(last_request_dict['CREATED_TIME'].timetuple()),
days, hours, minutes, seconds)
exes += '<td>' + vgrid_name + '</td>'
runtime_envs = last_request_dict['RESOURCE_CONFIG'
]['RUNTIMEENVIRONMENT']
re_list_text = ', '.join([i[0] for i in runtime_envs])
exes += '<td title="%s">' % re_list_text \
+ str(len(runtime_envs)) + '</td>'
exes += '<td>'\
+ str(last_request_dict['RESOURCE_CONFIG'
]['CPUTIME']) + '</td><td>'\
+ str(last_request_dict['RESOURCE_CONFIG'
]['NODECOUNT']) + '</td><td>'\
+ str(last_request_dict['RESOURCE_CONFIG'
]['CPUCOUNT']) + '</td><td>'\
+ str(last_request_dict['RESOURCE_CONFIG'
]['DISK']) + '</td><td>'\
+ str(last_request_dict['RESOURCE_CONFIG'
]['MEMORY']) + '</td><td>'\
+ str(last_request_dict['RESOURCE_CONFIG'
]['ARCHITECTURE']) + '</td>'
exes += '<td>' + last_request_dict['STATUS']\
+ '</td><td>' + str(last_request_dict['CPUTIME'
]) + '</td>'
exes += '<td class=status_%s>' % resource_status
if 'unavailable' == resource_status:
exes += '-'
elif 'slack' == resource_status:
exes += 'Within slack period (%s < %s secs)'\
% (time_rem_abs.seconds, slackperiod)
elif 'offline' == resource_status:
exes += 'down?'
else:
exes += '%sd, %sh, %sm, %ss'\
% (days_rem, hours_rem, minutes_rem,
seconds_rem)
exes += '</td>'
exes += '</tr>\n'
if last_request_dict['STATUS'] == 'Job assigned':
job_assigned = job_assigned + 1
job_assigned_cpus = job_assigned_cpus\
+ int(last_request_dict['RESOURCE_CONFIG'
]['NODECOUNT'])\
* int(last_request_dict['RESOURCE_CONFIG'
]['CPUCOUNT'])
total_number_of_exe_resources += 1
total_number_of_exe_cpus += int(
last_request_dict['RESOURCE_CONFIG']['NODECOUNT']) \
* int(last_request_dict['RESOURCE_CONFIG']['CPUCOUNT'])
elif filename.startswith('monitor_last_status_'):
# store must be linked to this vgrid, not only parent vgrid:
# inheritance only covers access, not automatic participation
if current_dir != vgrid_name:
continue
# read last resource action status file
mon_file_name = os.path.join(abs_mon_dir, filename)
print 'found ' + mon_file_name
last_status_dict = unpickle(mon_file_name, logger)
if not last_status_dict:
print 'could not open and unpickle: '\
+ mon_file_name
continue
difference = datetime.datetime.now()\
- last_status_dict['CREATED_TIME']
days = str(difference.days)
hours = str(difference.seconds / 3600)
minutes = str((difference.seconds % 3600) / 60)
seconds = str((difference.seconds % 60) % 60)
if last_status_dict['STATUS'] == 'stopped':
time_stopped = datetime.datetime.now() - \
last_status_dict['CREATED_TIME']
if time_stopped.days > 7:
try:
print 'removing: %s as we havent seen him for %s days.'\
% (mon_file_name, abs(time_stopped).days)
os.remove(mon_file_name)
except Exception, err:
print "could not remove: '%s' Error: %s"\
% (mon_file_name, str(err))
continue
unique_res_name_and_store_list = filename.split(
'monitor_last_status_', 1)
mount_point = last_status_dict.get('MOUNT_POINT', 'UNKNOWN')
is_live = os.path.ismount(mount_point)
public_id = unique_res_name_and_store_list[1]
if last_status_dict['RESOURCE_CONFIG'].get('ANONYMOUS', True):
public_id = anon_resource_id(public_id)
vgrid_link = os.path.join(
configuration.vgrid_files_home, vgrid_name, public_id)
is_linked = (os.path.realpath(vgrid_link) == mount_point)
total_disk = last_status_dict['RESOURCE_CONFIG']['DISK']
free_disk, avail_disk, used_disk, used_percent = 0, 0, 0, 0
gig_bytes = 1.0 * 2**30
# Fall back status - show last action unless statvfs succeeds
store_status = '<td>%s %s<br />(%sd %sh %sm %ss ago)</td>' % \
(last_status_dict['STATUS'],
time.asctime(last_status_dict['CREATED_TIME'].timetuple()),
days, hours, minutes, seconds)
# These disk stats are slightly confusing but match 'df'
# 'available' is the space that can actually be used so it
# is typically less than 'free'.
try:
disk_stats = os.statvfs(mount_point)
total_disk = disk_stats.f_bsize * disk_stats.f_blocks / \
gig_bytes
avail_disk = disk_stats.f_bsize * disk_stats.f_bavail / \
gig_bytes
free_disk = disk_stats.f_bsize * disk_stats.f_bfree / \
gig_bytes
used_disk = total_disk - free_disk
used_percent = 100.0 * used_disk / (avail_disk + used_disk)
store_status = '<td>%s %s<br />(%sd %sh %sm %ss ago)</td>' % \
('checked', time.asctime(), 0, 0, 0, 0)
except OSError, ose:
print 'could not stat mount point %s: %s' % \
(mount_point, ose)
is_live = False
if last_status_dict['STATUS'] == 'stopped':
resource_status = 'offline'
down_count = down_count + 1
elif last_status_dict['STATUS'] == 'started':
if is_live and is_linked:
resource_status = 'online'
up_count = up_count + 1
else:
resource_status = 'slack'
down_count = down_count + 1
else:
resource_status = 'unknown'
stores += '<tr>'
stores += \
'<td><img src=/images/status-icons/%s.png /></td>'\
% resource_status
public_name = last_status_dict['RESOURCE_CONFIG'].get('PUBLICNAME', '')
resource_parts = public_id.split('_', 2)
resource_name = "<a href='viewres.py?unique_resource_name=%s'>%s</a>" % \
(resource_parts[0], resource_parts[0])
if public_name:
resource_name += "<br />(alias %s)" % public_name
else:
resource_name += "<br />(no alias)"
resource_name += "<br />%s" % resource_parts[1]
stores += '<td>%s</td>' % resource_name
stores += store_status
stores += '<td>' + vgrid_name + '</td>'
stores += '<td>%d</td>' % total_disk
stores += '<td>%d</td>' % used_disk
stores += '<td>%d</td>' % avail_disk
stores += '<td>%d</td>' % used_percent
stores += '<td class=status_%s>' % resource_status
stores += resource_status + '</td>'
stores += '</tr>\n'
total_number_of_store_resources += 1
total_number_of_store_gigs += total_disk
html += """</table>
</div>
</td></tr>
</table>
<h2>Resource Job Requests</h2>
Listing the last request from each resource<br />
<br />
<table class="monitor columnsort">
<thead class="title">
<tr>
<th class="icon"><!-- Status icon --></th>
<th>Resource ID, unit</th>
<th>Last seen</th>
<th>VGrid</th>
<th>Runtime envs</th>
<th>CPU time (s)</th>
<th>Node count</th>
<th>CPU count</th>
<th>Disk (GB)</th>
<th>Memory (MB)</th>
<th>Arch</th>
<th>Status</th>
<th>Job (s)</th>
<th>Remaining</th>
</tr>
</thead>
<tbody>
"""
html += exes
html += '</tbody>\n</table>\n'
html += """
<h2>Resource Storage</h2>
Listing the last check for each resource<br />
<br />
<table class="monitor columnsort">
<thead class="title">
<tr>
<th class="icon"><!-- Status icon --></th>
<th>Resource ID, unit</th>
<th>Last Status</th>
<th>VGrid</th>
<th>Total Disk (GB)</th>
<th>Used Disk (GB)</th>
<th>Available Disk (GB)</th>
<th>Disk Use %</th>
<th>Status</th>
</tr>
</thead>
<tbody>
"""
html += stores
html += '</tbody>\n</table>\n'
html += '''
<h2>VGrid Totals</h2>
A total of <b>'''\
+ str(total_number_of_exe_resources) + '</b> exe resources ('\
+ str(total_number_of_exe_cpus) + " cpu's) and <b>"\
+ str(total_number_of_store_resources) + '</b> store resources ('\
+ str(int(total_number_of_store_gigs)) + " GB) joined this VGrid ("\
+ str(up_count) + ' up, ' + str(down_count) + ' down?, '\
+ str(slack_count) + ' slack)<br />'
html += str(job_assigned) + ' exe resources (' + str(job_assigned_cpus)\
+ """ cpu's) appear to be executing a job<br />
<br />
"""
html += \
'<!-- begin raw footer: this line is used by showvgridmonitor -->'
html += get_cgi_html_footer(configuration, '')
try:
file_handle = open(html_file, 'w')
file_handle.write(html)
file_handle.close()
except Exception, exc:
print 'Could not write monitor page %s: %s' % (html_file, exc)
while True:
(status, vgrids_list) = vgrid_list_vgrids(configuration)
# create global statistics ("")
# vgrids_list.append("")
print 'Updating cache.'
grid_stat = GridStat(configuration, logger)
grid_stat.update()
for vgrid_name in vgrids_list:
print 'creating monitor for vgrid: %s' % vgrid_name
create_monitor(vgrid_name)
print 'sleeping for %s seconds' % configuration.sleep_secs
time.sleep(float(configuration.sleep_secs))
|
heromod/migrid
|
mig/server/grid_monitor.py
|
Python
|
gpl-2.0
| 28,123
|
[
"Brian"
] |
cdf765e8dac6258ab6294e78c9a00d388759db0f80d949476e24d27700a67efa
|
# Docstrings for generated ufuncs
#
# The syntax is designed to look like the function add_newdoc is being
# called from numpy.lib, but in this file add_newdoc puts the
# docstrings in a dictionary. This dictionary is used in
# generate_ufuncs.py to generate the docstrings for the ufuncs in
# scipy.special at the C level when the ufuncs are created at compile
# time.
from __future__ import division, print_function, absolute_import
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc("scipy.special", "sph_harm",
r"""
sph_harm(m, n, theta, phi)
Compute spherical harmonics.
.. math:: Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi}\frac{(n-m)!}{(n+m)!}} e^{i m \theta} P^m_n(\cos(\phi))
Parameters
----------
m : int
``|m| <= n``; the order of the harmonic.
n : int
where `n` >= 0; the degree of the harmonic. This is often called
``l`` (lower case L) in descriptions of spherical harmonics.
theta : float
[0, 2*pi]; the azimuthal (longitudinal) coordinate.
phi : float
[0, pi]; the polar (colatitudinal) coordinate.
Returns
-------
y_mn : complex float
The harmonic :math:`Y^m_n` sampled at `theta` and `phi`
Notes
-----
There are different conventions for the meaning of input arguments
`theta` and `phi`. We take `theta` to be the azimuthal angle and
`phi` to be the polar angle. It is common to see the opposite
convention - that is `theta` as the polar angle and `phi` as the
azimuthal angle.
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30. http://dlmf.nist.gov/14.30
""")
add_newdoc("scipy.special", "_ellip_harm",
"""
Internal function, use `ellip_harm` instead.
""")
add_newdoc("scipy.special", "_ellip_norm",
"""
Internal function, use `ellip_norm` instead.
""")
add_newdoc("scipy.special", "_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("scipy.special", "airy",
r"""
airy(z)
Airy functions and their derivatives.
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
Ai, Aip, Bi, Bip : ndarrays
Airy functions Ai and Bi, and their derivatives Aip and Bip.
Notes
-----
The Airy functions Ai and Bi are two independent solutions of
.. math:: y''(x) = x y(x).
For real `z` in [-10, 10], the computation is carried out by calling
the Cephes [1]_ `airy` routine, which uses power series summation
for small `z` and rational minimax approximations for large `z`.
Outside this range, the AMOS [2]_ `zairy` and `zbiry` routines are
employed. They are computed using power series for :math:`|z| < 1` and
the following relations to modified Bessel functions for larger `z`
(where :math:`t \equiv 2 z^{3/2}/3`):
.. math::
Ai(z) = \frac{1}{\pi \sqrt{3}} K_{1/3}(t)
Ai'(z) = -\frac{z}{\pi \sqrt{3}} K_{2/3}(t)
Bi(z) = \sqrt{\frac{z}{3}} \left(I_{-1/3}(t) + I_{1/3}(t) \right)
Bi'(z) = \frac{z}{\sqrt{3}} \left(I_{-2/3}(t) + I_{2/3}(t)\right)
See also
--------
airye : exponentially scaled Airy functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/.org/amos/
""")
add_newdoc("scipy.special", "airye",
"""
airye(z)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs((2.0/3.0*z*sqrt(z)).real))
eBip = Bip * exp(-abs((2.0/3.0*z*sqrt(z)).real))
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
eAi, eAip, eBi, eBip : array_like
Airy functions Ai and Bi, and their derivatives Aip and Bip
Notes
-----
Wrapper for the AMOS [1]_ routines `zairy` and `zbiry`.
See also
--------
airy
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "bdtr",
r"""
bdtr(k, n, p)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through `k` of the Binomial probability density.
.. math::
\mathrm{bdtr}(k, n, p) = \sum_{j=0}^k {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (int).
n : array_like
Number of events (int).
p : array_like
Probability of success in a single event (float).
Returns
-------
y : ndarray
Probability of `k` or fewer successes in `n` independent events with
success probabilities of `p`.
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtr}(k, n, p) = I_{1 - p}(n - k, k + 1).
Wrapper for the Cephes [1]_ routine `bdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtrc",
r"""
bdtrc(k, n, p)
Binomial distribution survival function.
Sum of the terms `k + 1` through `n` of the binomial probability density,
.. math::
\mathrm{bdtrc}(k, n, p) = \sum_{j=k+1}^n {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (int).
n : array_like
Number of events (int)
p : array_like
Probability of success in a single event.
Returns
-------
y : ndarray
Probability of `k + 1` or more successes in `n` independent events
with success probabilities of `p`.
See also
--------
bdtr
betainc
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtrc}(k, n, p) = I_{p}(k + 1, n - k).
Wrapper for the Cephes [1]_ routine `bdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtri",
"""
bdtri(k, n, y)
Inverse function to `bdtr` with respect to `p`.
Finds the event probability `p` such that the sum of the terms 0 through
`k` of the binomial probability density is equal to the given cumulative
probability `y`.
Parameters
----------
k : array_like
Number of successes (float).
n : array_like
Number of events (float)
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
Returns
-------
p : ndarray
The event probability such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
betaincinv
Notes
-----
The computation is carried out using the inverse beta integral function
and the relation,::
1 - p = betaincinv(n - k, k + 1, y).
Wrapper for the Cephes [1]_ routine `bdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtrik",
"""
bdtrik(y, n, p)
Inverse function to `bdtr` with respect to `k`.
Finds the number of successes `k` such that the sum of the terms 0 through
`k` of the Binomial probability density for `n` events with probability
`p` is equal to the given cumulative probability `y`.
Parameters
----------
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
n : array_like
Number of events (float).
p : array_like
Success probability (float).
Returns
-------
k : ndarray
The number of successes `k` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `k` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `k`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("scipy.special", "bdtrin",
"""
bdtrin(k, y, p)
Inverse function to `bdtr` with respect to `n`.
Finds the number of events `n` such that the sum of the terms 0 through
`k` of the Binomial probability density for events with probability `p` is
equal to the given cumulative probability `y`.
Parameters
----------
k : array_like
Number of successes (float).
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
p : array_like
Success probability (float).
Returns
-------
n : ndarray
The number of events `n` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `n` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `n`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("scipy.special", "binom",
"""
binom(n, k)
Binomial coefficient
See Also
--------
comb : The number of combinations of N things taken k at a time.
""")
add_newdoc("scipy.special", "btdtria",
r"""
btdtria(p, b, x)
Inverse of `btdtr` with respect to `a`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `a`, returning the value of `a` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
p : array_like
Cumulative probability, in [0, 1].
b : array_like
Shape parameter (`b` > 0).
x : array_like
The quantile, in [0, 1].
Returns
-------
a : ndarray
The value of the shape parameter `a` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative density function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtrib : Inverse with respect to `b`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("scipy.special", "btdtrib",
r"""
btdtria(a, p, x)
Inverse of `btdtr` with respect to `b`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `b`, returning the value of `b` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
p : array_like
Cumulative probability, in [0, 1].
x : array_like
The quantile, in [0, 1].
Returns
-------
b : ndarray
The value of the shape parameter `b` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative density function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtria : Inverse with respect to `a`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("scipy.special", "bei",
"""
bei(x)
Kelvin function bei
""")
add_newdoc("scipy.special", "beip",
"""
beip(x)
Derivative of the Kelvin function `bei`
""")
add_newdoc("scipy.special", "ber",
"""
ber(x)
Kelvin function ber.
""")
add_newdoc("scipy.special", "berp",
"""
berp(x)
Derivative of the Kelvin function `ber`
""")
add_newdoc("scipy.special", "besselpoly",
r"""
besselpoly(a, lmb, nu)
Weighted integral of a Bessel function.
.. math::
\int_0^1 x^\lambda J_\nu(2 a x) \, dx
where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`,
:math:`\nu=nu`.
""")
add_newdoc("scipy.special", "beta",
"""
beta(a, b)
Beta function.
::
beta(a, b) = gamma(a) * gamma(b) / gamma(a+b)
""")
add_newdoc("scipy.special", "betainc",
"""
betainc(a, b, x)
Incomplete beta integral.
Compute the incomplete beta integral of the arguments, evaluated
from zero to `x`::
gamma(a+b) / (gamma(a)*gamma(b)) * integral(t**(a-1) (1-t)**(b-1), t=0..x).
Notes
-----
The incomplete beta is also sometimes defined without the terms
in gamma, in which case the above definition is the so-called regularized
incomplete beta. Under this definition, you can get the incomplete beta by
multiplying the result of the scipy function by beta(a, b).
""")
add_newdoc("scipy.special", "betaincinv",
"""
betaincinv(a, b, y)
Inverse function to beta integral.
Compute `x` such that betainc(a, b, x) = y.
""")
add_newdoc("scipy.special", "betaln",
"""
betaln(a, b)
Natural logarithm of absolute value of beta function.
Computes ``ln(abs(beta(a, b)))``.
""")
add_newdoc("scipy.special", "boxcox",
"""
boxcox(x, lmbda)
Compute the Box-Cox transformation.
The Box-Cox transformation is::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Returns `nan` if ``x < 0``.
Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox
>>> boxcox([1, 4, 10], 2.5)
array([ 0. , 12.4 , 126.09110641])
>>> boxcox(2, [0, 1, 2])
array([ 0.69314718, 1. , 1.5 ])
""")
add_newdoc("scipy.special", "boxcox1p",
"""
boxcox1p(x, lmbda)
Compute the Box-Cox transformation of 1 + `x`.
The Box-Cox transformation computed by `boxcox1p` is::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Returns `nan` if ``x < -1``.
Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox1p
>>> boxcox1p(1e-4, [0, 0.5, 1])
array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])
>>> boxcox1p([0.01, 0.1], 0.25)
array([ 0.00996272, 0.09645476])
""")
add_newdoc("scipy.special", "inv_boxcox",
"""
inv_boxcox(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox, inv_boxcox
>>> y = boxcox([1, 4, 10], 2.5)
>>> inv_boxcox(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "inv_boxcox1p",
"""
inv_boxcox1p(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox1p, inv_boxcox1p
>>> y = boxcox1p([1, 4, 10], 2.5)
>>> inv_boxcox1p(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "btdtr",
r"""
btdtr(a, b, x)
Cumulative density function of the beta distribution.
Returns the integral from zero to `x` of the beta probability density
function,
.. math::
I = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
Shape parameter (a > 0).
b : array_like
Shape parameter (b > 0).
x : array_like
Upper limit of integration, in [0, 1].
Returns
-------
I : ndarray
Cumulative density function of the beta distribution with parameters
`a` and `b` at `x`.
See Also
--------
betainc
Notes
-----
This function is identical to the incomplete beta integral function
`betainc`.
Wrapper for the Cephes [1]_ routine `btdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "btdtri",
r"""
btdtri(a, b, p)
The `p`-th quantile of the beta distribution.
This function is the inverse of the beta cumulative distribution function,
`btdtr`, returning the value of `x` for which `btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
b : array_like
Shape parameter (`b` > 0).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
See Also
--------
betaincinv
btdtr
Notes
-----
The value of `x` is found by interval halving or Newton iterations.
Wrapper for the Cephes [1]_ routine `incbi`, which solves the equivalent
problem of finding the inverse of the incomplete beta integral.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "cbrt",
"""
cbrt(x)
Cube root of `x`
""")
add_newdoc("scipy.special", "chdtr",
"""
chdtr(v, x)
Chi square cumulative distribution function
Returns the area under the left hand tail (from 0 to `x`) of the Chi
square probability density function with `v` degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=0..x)
""")
add_newdoc("scipy.special", "chdtrc",
"""
chdtrc(v, x)
Chi square survival function
Returns the area under the right hand tail (from `x` to
infinity) of the Chi square probability density function with `v`
degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=x..inf)
""")
add_newdoc("scipy.special", "chdtri",
"""
chdtri(v, p)
Inverse to `chdtrc`
Returns the argument x such that ``chdtrc(v, x) == p``.
""")
add_newdoc("scipy.special", "chdtriv",
"""
chdtriv(p, x)
Inverse to `chdtr` vs `v`
Returns the argument v such that ``chdtr(v, x) == p``.
""")
add_newdoc("scipy.special", "chndtr",
"""
chndtr(x, df, nc)
Non-central chi square cumulative distribution function
""")
add_newdoc("scipy.special", "chndtrix",
"""
chndtrix(p, df, nc)
Inverse to `chndtr` vs `x`
""")
add_newdoc("scipy.special", "chndtridf",
"""
chndtridf(x, p, nc)
Inverse to `chndtr` vs `df`
""")
add_newdoc("scipy.special", "chndtrinc",
"""
chndtrinc(x, df, p)
Inverse to `chndtr` vs `nc`
""")
add_newdoc("scipy.special", "cosdg",
"""
cosdg(x)
Cosine of the angle `x` given in degrees.
""")
add_newdoc("scipy.special", "cosm1",
"""
cosm1(x)
cos(x) - 1 for use when `x` is near zero.
""")
add_newdoc("scipy.special", "cotdg",
"""
cotdg(x)
Cotangent of the angle `x` given in degrees.
""")
add_newdoc("scipy.special", "dawsn",
"""
dawsn(x)
Dawson's integral.
Computes::
exp(-x**2) * integral(exp(t**2), t=0..x).
See Also
--------
wofz, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-15, 15, num=1000)
>>> plt.plot(x, special.dawsn(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$dawsn(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "ellipe",
"""
ellipe(m)
Complete elliptic integral of the second kind
This function is defined as
.. math:: E(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
m : array_like
Defines the parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpe`.
For `m > 0` the computation uses the approximation,
.. math:: E(m) \\approx P(1-m) - (1-m) \\log(1-m) Q(1-m),
where :math:`P` and :math:`Q` are tenth-order polynomials. For
`m < 0`, the relation
.. math:: E(m) = E(m/(m - 1)) \\sqrt(1-m)
is used.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipeinc",
"""
ellipeinc(phi, m)
Incomplete elliptic integral of the second kind
This function is defined as
.. math:: E(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral.
m : array_like
parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellie`.
Computation uses arithmetic-geometric means algorithm.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipj",
"""
ellipj(u, m)
Jacobian elliptic functions
Calculates the Jacobian elliptic functions of parameter `m` between
0 and 1, and real argument `u`.
Parameters
----------
m : array_like
Parameter.
u : array_like
Argument.
Returns
-------
sn, cn, dn, ph : ndarrays
The returned functions::
sn(u|m), cn(u|m), dn(u|m)
The value `ph` is such that if `u = ellipk(ph, m)`,
then `sn(u|m) = sin(ph)` and `cn(u|m) = cos(ph)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpj`.
These functions are periodic, with quarter-period on the real axis
equal to the complete elliptic integral `ellipk(m)`.
Relation to incomplete elliptic integral: If `u = ellipk(phi,m)`, then
`sn(u|m) = sin(phi)`, and `cn(u|m) = cos(phi)`. The `phi` is called
the amplitude of `u`.
Computation is by means of the arithmetic-geometric mean algorithm,
except when `m` is within 1e-9 of 0 or 1. In the latter case with `m`
close to 1, the approximation applies only for `phi < pi/2`.
See also
--------
ellipk : Complete elliptic integral of the first kind.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipkm1",
"""
ellipkm1(p)
Complete elliptic integral of the first kind around `m` = 1
This function is defined as
.. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
where `m = 1 - p`.
Parameters
----------
p : array_like
Defines the parameter of the elliptic integral as `m = 1 - p`.
Returns
-------
K : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpk`.
For `p <= 1`, computation uses the approximation,
.. math:: K(p) \\approx P(p) - \\log(p) Q(p),
where :math:`P` and :math:`Q` are tenth-order polynomials. The
argument `p` is used internally rather than `m` so that the logarithmic
singularity at `m = 1` will be shifted to the origin; this preserves
maximum accuracy. For `p > 1`, the identity
.. math:: K(p) = K(1/p)/\\sqrt(p)
is used.
See Also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipkinc",
"""
ellipkinc(phi, m)
Incomplete elliptic integral of the first kind
This function is defined as
.. math:: K(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{-1/2} dt
This function is also called `F(phi, m)`.
Parameters
----------
phi : array_like
amplitude of the elliptic integral
m : array_like
parameter of the elliptic integral
Returns
-------
K : ndarray
Value of the elliptic integral
Notes
-----
Wrapper for the Cephes [1]_ routine `ellik`. The computation is
carried out using the arithmetic-geometric mean algorithm.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "entr",
r"""
entr(x)
Elementwise function for computing entropy.
.. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The value of the elementwise entropy function at the given points `x`.
See Also
--------
kl_div, rel_entr
Notes
-----
This function is concave.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "erf",
"""
erf(z)
Returns the error function of complex argument.
It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The values of the error function at the given points `x`.
See Also
--------
erfc, erfinv, erfcinv, wofz, erfcx, erfi
Notes
-----
The cumulative of the unit normal distribution is given by
``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
References
----------
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover,
1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
.. [3] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erf(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erf(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfc",
"""
erfc(x)
Complementary error function, ``1 - erf(x)``.
See Also
--------
erf, erfi, erfcx, dawsn, wofz
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfc(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfc(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfi",
"""
erfi(z)
Imaginary error function, ``-i erf(i z)``.
See Also
--------
erf, erfc, erfcx, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfi(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfi(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfcx",
"""
erfcx(x)
Scaled complementary error function, ``exp(x**2) * erfc(x)``.
See Also
--------
erf, erfc, erfi, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfcx(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfcx(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "eval_jacobi",
r"""
eval_jacobi(n, alpha, beta, x, out=None)
Evaluate Jacobi polynomial at a point.
The Jacobi polynomials can be defined via the Gauss hypergeometric
function :math:`{}_2F_1` as
.. math::
P_n^{(\alpha, \beta)}(x) = \frac{(\alpha + 1)_n}{\Gamma(n + 1)}
{}_2F_1(-n, 1 + \alpha + \beta + n; \alpha + 1; (1 - z)/2)
where :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When
:math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the Gauss hypergeometric
function.
alpha : array_like
Parameter
beta : array_like
Parameter
x : array_like
Points at which to evaluate the polynomial
Returns
-------
P : ndarray
Values of the Jacobi polynomial
See Also
--------
roots_jacobi : roots and quadrature weights of Jacobi polynomials
jacobi : Jacobi polynomial object
hyp2f1 : Gauss hypergeometric function
""")
add_newdoc("scipy.special", "eval_sh_jacobi",
r"""
eval_sh_jacobi(n, p, q, x, out=None)
Evaluate shifted Jacobi polynomial at a point.
Defined by
.. math::
G_n^{(p, q)}(x)
= \binom{2n + p - 1}{n}^{-1} P_n^{(p - q, q - 1)}(2x - 1),
where :math:`P_n^{(\cdot, \cdot)}` is the n-th Jacobi polynomial.
Parameters
----------
n : int
Degree of the polynomial. If not an integer, the result is
determined via the relation to `binom` and `eval_jacobi`.
p : float
Parameter
q : float
Parameter
Returns
-------
G : ndarray
Values of the shifted Jacobi polynomial.
See Also
--------
roots_sh_jacobi : roots and quadrature weights of shifted Jacobi
polynomials
sh_jacobi : shifted Jacobi polynomial object
eval_jacobi : evaluate Jacobi polynomials
""")
add_newdoc("scipy.special", "eval_gegenbauer",
r"""
eval_gegenbauer(n, alpha, x, out=None)
Evaluate Gegenbauer polynomial at a point.
The Gegenbauer polynomials can be defined via the Gauss
hypergeometric function :math:`{}_2F_1` as
.. math::
C_n^{(\alpha)} = \frac{(2\alpha)_n}{\Gamma(n + 1)}
{}_2F_1(-n, 2\alpha + n; \alpha + 1/2; (1 - z)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
alpha : array_like
Parameter
x : array_like
Points at which to evaluate the Gegenbauer polynomial
Returns
-------
C : ndarray
Values of the Gegenbauer polynomial
See Also
--------
roots_gegenbauer : roots and quadrature weights of Gegenbauer
polynomials
gegenbauer : Gegenbauer polynomial object
hyp2f1 : Gauss hypergeometric function
""")
add_newdoc("scipy.special", "eval_chebyt",
r"""
eval_chebyt(n, x, out=None)
Evaluate Chebyshev polynomial of the first kind at a point.
The Chebyshev polynomials of the first kind can be defined via the
Gauss hypergeometric function :math:`{}_2F_1` as
.. math::
T_n(x) = {}_2F_1(n, -n; 1/2; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
T : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyt : roots and quadrature weights of Chebyshev
polynomials of the first kind
chebyu : Chebychev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
hyp2f1 : Gauss hypergeometric function
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
Notes
-----
This routine is numerically stable for `x` in ``[-1, 1]`` at least
up to order ``10000``.
""")
add_newdoc("scipy.special", "eval_chebyu",
r"""
eval_chebyu(n, x, out=None)
Evaluate Chebyshev polynomial of the second kind at a point.
The Chebyshev polynomials of the second kind can be defined via
the Gauss hypergeometric function :math:`{}_2F_1` as
.. math::
U_n(x) = (n + 1) {}_2F_1(-n, n + 2; 3/2; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
U : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyu : roots and quadrature weights of Chebyshev
polynomials of the second kind
chebyu : Chebyshev polynomial object
eval_chebyt : evaluate Chebyshev polynomials of the first kind
hyp2f1 : Gauss hypergeometric function
""")
add_newdoc("scipy.special", "eval_chebys",
r"""
eval_chebys(n, x, out=None)
Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a
point.
These polynomials are defined as
.. math::
S_n(x) = U_n(x/2)
where :math:`U_n` is a Chebyshev polynomial of the second kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyu`.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
S : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebys : roots and quadrature weights of Chebyshev
polynomials of the second kind on [-2, 2]
chebys : Chebyshev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
""")
add_newdoc("scipy.special", "eval_chebyc",
r"""
eval_chebyc(n, x, out=None)
Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a
point.
These polynomials are defined as
.. math::
S_n(x) = T_n(x/2)
where :math:`T_n` is a Chebyshev polynomial of the first kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyt`.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
C : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyc : roots and quadrature weights of Chebyshev
polynomials of the first kind on [-2, 2]
chebyc : Chebyshev polynomial object
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
eval_chebyt : evaluate Chebycshev polynomials of the first kind
""")
add_newdoc("scipy.special", "eval_sh_chebyt",
r"""
eval_sh_chebyt(n, x, out=None)
Evaluate shifted Chebyshev polynomial of the first kind at a
point.
These polynomials are defined as
.. math::
T_n^*(x) = T_n(2x - 1)
where :math:`T_n` is a Chebyshev polynomial of the first kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyt`.
x : array_like
Points at which to evaluate the shifted Chebyshev polynomial
Returns
-------
T : ndarray
Values of the shifted Chebyshev polynomial
See Also
--------
roots_sh_chebyt : roots and quadrature weights of shifted
Chebyshev polynomials of the first kind
sh_chebyt : shifted Chebyshev polynomial object
eval_chebyt : evalaute Chebyshev polynomials of the first kind
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
""")
add_newdoc("scipy.special", "eval_sh_chebyu",
r"""
eval_sh_chebyu(n, x, out=None)
Evaluate shifted Chebyshev polynomial of the second kind at a
point.
These polynomials are defined as
.. math::
U_n^*(x) = U_n(2x - 1)
where :math:`U_n` is a Chebyshev polynomial of the first kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyu`.
x : array_like
Points at which to evaluate the shifted Chebyshev polynomial
Returns
-------
U : ndarray
Values of the shifted Chebyshev polynomial
See Also
--------
roots_sh_chebyu : roots and quadrature weights of shifted
Chebychev polynomials of the second kind
sh_chebyu : shifted Chebyshev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
""")
add_newdoc("scipy.special", "eval_legendre",
r"""
eval_legendre(n, x, out=None)
Evaluate Legendre polynomial at a point.
The Legendre polynomials can be defined via the Gauss
hypergeometric function :math:`{}_2F_1` as
.. math::
P_n(x) = {}_2F_1(-n, n + 1; 1; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Legendre polynomial
Returns
-------
P : ndarray
Values of the Legendre polynomial
See Also
--------
roots_legendre : roots and quadrature weights of Legendre
polynomials
legendre : Legendre polynomial object
hyp2f1 : Gauss hypergeometric function
numpy.polynomial.legendre.Legendre : Legendre series
""")
add_newdoc("scipy.special", "eval_sh_legendre",
r"""
eval_sh_legendre(n, x, out=None)
Evaluate shifted Legendre polynomial at a point.
These polynomials are defined as
.. math::
P_n^*(x) = P_n(2x - 1)
where :math:`P_n` is a Legendre polynomial.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the value is
determined via the relation to `eval_legendre`.
x : array_like
Points at which to evaluate the shifted Legendre polynomial
Returns
-------
P : ndarray
Values of the shifted Legendre polynomial
See Also
--------
roots_sh_legendre : roots and quadrature weights of shifted
Legendre polynomials
sh_legendre : shifted Legendre polynomial object
eval_legendre : evaluate Legendre polynomials
numpy.polynomial.legendre.Legendre : Legendre series
""")
add_newdoc("scipy.special", "eval_genlaguerre",
r"""
eval_genlaguerre(n, alpha, x, out=None)
Evaluate generalized Laguerre polynomial at a point.
The generalized Laguerre polynomials can be defined via the
confluent hypergeometric function :math:`{}_1F_1` as
.. math::
L_n^{(\alpha)}(x) = \binom{n + \alpha}{n}
{}_1F_1(-n, \alpha + 1, x).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`. The Laguerre polynomials are the special case where
:math:`\alpha = 0`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the confluent hypergeometric
function.
alpha : array_like
Parameter; must have ``alpha > -1``
x : array_like
Points at which to evaluate the generalized Laguerre
polynomial
Returns
-------
L : ndarray
Values of the generalized Laguerre polynomial
See Also
--------
roots_genlaguerre : roots and quadrature weights of generalized
Laguerre polynomials
genlaguerre : generalized Laguerre polynomial object
hyp1f1 : confluent hypergeometric function
eval_laguerre : evaluate Laguerre polynomials
""")
add_newdoc("scipy.special", "eval_laguerre",
r"""
eval_laguerre(n, x, out=None)
Evaluate Laguerre polynomial at a point.
The Laguerre polynomials can be defined via the confluent
hypergeometric function :math:`{}_1F_1` as
.. math::
L_n(x) = {}_1F_1(-n, 1, x).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the confluent hypergeometric
function.
x : array_like
Points at which to evaluate the Laguerre polynomial
Returns
-------
L : ndarray
Values of the Laguerre polynomial
See Also
--------
roots_laguerre : roots and quadrature weights of Laguerre
polynomials
laguerre : Laguerre polynomial object
numpy.polynomial.laguerre.Laguerre : Laguerre series
eval_genlaguerre : evaluate generalized Laguerre polynomials
""")
add_newdoc("scipy.special", "eval_hermite",
r"""
eval_hermite(n, x, out=None)
Evaluate physicist's Hermite polynomial at a point.
Defined by
.. math::
H_n(x) = (-1)^n e^{x^2} \frac{d^n}{dx^n} e^{-x^2};
:math:`H_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial
x : array_like
Points at which to evaluate the Hermite polynomial
Returns
-------
H : ndarray
Values of the Hermite polynomial
See Also
--------
roots_hermite : roots and quadrature weights of physicist's
Hermite polynomials
hermite : physicist's Hermite polynomial object
numpy.polynomial.hermite.Hermite : Physicist's Hermite series
eval_hermitenorm : evaluate Probabilist's Hermite polynomials
""")
add_newdoc("scipy.special", "eval_hermitenorm",
r"""
eval_hermitenorm(n, x, out=None)
Evaluate probabilist's (normalized) Hermite polynomial at a
point.
Defined by
.. math::
He_n(x) = (-1)^n e^{x^2/2} \frac{d^n}{dx^n} e^{-x^2/2};
:math:`He_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial
x : array_like
Points at which to evaluate the Hermite polynomial
Returns
-------
He : ndarray
Values of the Hermite polynomial
See Also
--------
roots_hermitenorm : roots and quadrature weights of probabilist's
Hermite polynomials
hermitenorm : probabilist's Hermite polynomial object
numpy.polynomial.hermite_e.HermiteE : Probabilist's Hermite series
eval_hermite : evaluate physicist's Hermite polynomials
""")
add_newdoc("scipy.special", "exp1",
"""
exp1(z)
Exponential integral E_1 of complex argument z
::
integral(exp(-z*t)/t, t=1..inf).
""")
add_newdoc("scipy.special", "exp10",
"""
exp10(x)
10**x
""")
add_newdoc("scipy.special", "exp2",
"""
exp2(x)
2**x
""")
add_newdoc("scipy.special", "expi",
"""
expi(x)
Exponential integral Ei
Defined as::
integral(exp(t)/t, t=-inf..x)
See `expn` for a different exponential integral.
""")
add_newdoc('scipy.special', 'expit',
"""
expit(x)
Expit ufunc for ndarrays.
The expit function, also known as the logistic function, is defined as
expit(x) = 1/(1+exp(-x)). It is the inverse of the logit function.
Parameters
----------
x : ndarray
The ndarray to apply expit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are expit of the corresponding entry of x.
Notes
-----
As a ufunc expit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "expm1",
"""
expm1(x)
exp(x) - 1 for use when `x` is near zero.
""")
add_newdoc("scipy.special", "expn",
"""
expn(n, x)
Exponential integral E_n
Returns the exponential integral for integer `n` and non-negative `x` and
`n`::
integral(exp(-x*t) / t**n, t=1..inf).
""")
add_newdoc("scipy.special", "exprel",
r"""
exprel(x)
Relative error exponential, (exp(x)-1)/x, for use when `x` is near zero.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
expm1
.. versionadded:: 0.17.0
""")
add_newdoc("scipy.special", "fdtr",
r"""
fdtr(dfn, dfd, x)
F cumulative distribution function.
Returns the value of the cumulative density function of the
F-distribution, also known as Snedecor's F-distribution or the
Fisher-Snedecor distribution.
The F-distribution with parameters :math:`d_n` and :math:`d_d` is the
distribution of the random variable,
.. math::
X = \frac{U_n/d_n}{U_d/d_d},
where :math:`U_n` and :math:`U_d` are random variables distributed
:math:`\chi^2`, with :math:`d_n` and :math:`d_d` degrees of freedom,
respectively.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The CDF of the F-distribution with parameters `dfn` and `dfd` at `x`.
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{xd_n/(d_d + xd_n)}(d_n/2, d_d/2).
Wrapper for the Cephes [1]_ routine `fdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtrc",
r"""
fdtrc(dfn, dfd, x)
F survival function.
Returns the complemented F-distribution function (the integral of the
density from `x` to infinity).
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The complemented F-distribution function with parameters `dfn` and
`dfd` at `x`.
See also
--------
fdtr
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{d_d/(d_d + xd_n)}(d_d/2, d_n/2).
Wrapper for the Cephes [1]_ routine `fdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtri",
r"""
fdtri(dfn, dfd, p)
The `p`-th quantile of the F-distribution.
This function is the inverse of the F-distribution CDF, `fdtr`, returning
the `x` such that `fdtr(dfn, dfd, x) = p`.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
Notes
-----
The computation is carried out using the relation to the inverse
regularized beta function, :math:`I^{-1}_x(a, b)`. Let
:math:`z = I^{-1}_p(d_d/2, d_n/2).` Then,
.. math::
x = \frac{d_d (1 - z)}{d_n z}.
If `p` is such that :math:`x < 0.5`, the following relation is used
instead for improved stability: let
:math:`z' = I^{-1}_{1 - p}(d_n/2, d_d/2).` Then,
.. math::
x = \frac{d_d z'}{d_n (1 - z')}.
Wrapper for the Cephes [1]_ routine `fdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtridfd",
"""
fdtridfd(dfn, p, x)
Inverse to `fdtr` vs dfd
Finds the F density argument dfd such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fdtridfn",
"""
fdtridfn(p, dfd, x)
Inverse to `fdtr` vs dfn
finds the F density argument dfn such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fresnel",
"""
fresnel(z)
Fresnel sin and cos integrals
Defined as::
ssa = integral(sin(pi/2 * t**2), t=0..z)
csa = integral(cos(pi/2 * t**2), t=0..z)
Parameters
----------
z : float or complex array_like
Argument
Returns
-------
ssa, csa
Fresnel sin and cos integral values
""")
add_newdoc("scipy.special", "gamma",
"""
gamma(z)
Gamma function.
The gamma function is often referred to as the generalized
factorial since ``z*gamma(z) = gamma(z+1)`` and ``gamma(n+1) =
n!`` for natural number *n*.
""")
add_newdoc("scipy.special", "gammainc",
r"""
gammainc(a, x)
Regularized lower incomplete gamma function.
Defined as
.. math::
\frac{1}{\Gamma(a)} \int_0^x t^{a - 1}e^{-t} dt
for :math:`a > 0` and :math:`x \geq 0`. The function satisfies the
relation ``gammainc(a, x) + gammaincc(a, x) = 1`` where
`gammaincc` is the regularized upper incomplete gamma function.
Notes
-----
The implementation largely follows that of [1]_.
See also
--------
gammaincc : regularized upper incomplete gamma function
gammaincinv : inverse to ``gammainc`` versus ``x``
gammainccinv : inverse to ``gammaincc`` versus ``x``
References
----------
.. [1] Maddock et. al., "Incomplete Gamma Functions",
http://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html
""")
add_newdoc("scipy.special", "gammaincc",
r"""
gammaincc(a, x)
Regularized upper incomplete gamma function.
Defined as
.. math::
\frac{1}{\Gamma(a)} \int_x^\infty t^{a - 1}e^{-t} dt
for :math:`a > 0` and :math:`x \geq 0`. The function satisfies the
relation ``gammainc(a, x) + gammaincc(a, x) = 1`` where `gammainc`
is the regularized lower incomplete gamma function.
Notes
-----
The implementation largely follows that of [1]_.
See also
--------
gammainc : regularized lower incomplete gamma function
gammaincinv : inverse to ``gammainc`` versus ``x``
gammainccinv : inverse to ``gammaincc`` versus ``x``
References
----------
.. [1] Maddock et. al., "Incomplete Gamma Functions",
http://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html
""")
add_newdoc("scipy.special", "gammainccinv",
"""
gammainccinv(a, y)
Inverse to `gammaincc`
Returns `x` such that ``gammaincc(a, x) == y``.
""")
add_newdoc("scipy.special", "gammaincinv",
"""
gammaincinv(a, y)
Inverse to `gammainc`
Returns `x` such that ``gammainc(a, x) = y``.
""")
add_newdoc("scipy.special", "_gammaln",
"""
Internal function, use ``gammaln`` instead.
""")
add_newdoc("scipy.special", "gammasgn",
"""
gammasgn(x)
Sign of the gamma function.
See Also
--------
gammaln
loggamma
""")
add_newdoc("scipy.special", "gdtr",
r"""
gdtr(a, b, x)
Gamma distribution cumulative density function.
Returns the integral from zero to `x` of the gamma probability density
function,
.. math::
F = \int_0^x \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (upper limit of integration; float).
See also
--------
gdtrc : 1 - CDF of the gamma distribution.
Returns
-------
F : ndarray
The CDF of the gamma distribution with parameters `a` and `b`
evaluated at `x`.
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "gdtrc",
r"""
gdtrc(a, b, x)
Gamma distribution survival function.
Integral from `x` to infinity of the gamma probability density function,
.. math::
F = \int_x^\infty \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (lower limit of integration; float).
Returns
-------
F : ndarray
The survival function of the gamma distribution with parameters `a`
and `b` evaluated at `x`.
See Also
--------
gdtr, gdtri
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "gdtria",
"""
gdtria(p, b, x, out=None)
Inverse of `gdtr` vs a.
Returns the inverse with respect to the parameter `a` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
p : array_like
Probability values.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
a : ndarray
Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`
is the "scale" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtria
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtria(p, 3.4, 5.6)
1.2
""")
add_newdoc("scipy.special", "gdtrib",
"""
gdtrib(a, p, x, out=None)
Inverse of `gdtr` vs b.
Returns the inverse with respect to the parameter `b` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
p : array_like
Probability values.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
b : ndarray
Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is
the "shape" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrib
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrib(1.2, p, 5.6)
3.3999999999723882
""")
add_newdoc("scipy.special", "gdtrix",
"""
gdtrix(a, b, p, out=None)
Inverse of `gdtr` vs x.
Returns the inverse with respect to the parameter `x` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution. This is also known as the p'th quantile of the
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
p : array_like
Probability values.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
x : ndarray
Values of the `x` parameter such that `p = gdtr(a, b, x)`.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `x` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `x`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrix
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrix(1.2, 3.4, p)
5.5999999999999996
""")
add_newdoc("scipy.special", "hankel1",
r"""
hankel1(v, z)
Hankel function of the first kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the first kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
See also
--------
hankel1e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel1e",
r"""
hankel1e(v, z)
Exponentially scaled Hankel function of the first kind
Defined as::
hankel1e(v, z) = hankel1(v, z) * exp(-1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel2",
r"""
hankel2(v, z)
Hankel function of the second kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\imath \pi v/2) K_v(z \exp(\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
See also
--------
hankel2e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel2e",
r"""
hankel2e(v, z)
Exponentially scaled Hankel function of the second kind
Defined as::
hankel2e(v, z) = hankel2(v, z) * exp(1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\frac{\imath \pi v}{2}) K_v(z exp(\frac{\imath\pi}{2}))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "huber",
r"""
huber(delta, r)
Huber loss function.
.. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases}
Parameters
----------
delta : ndarray
Input array, indicating the quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Huber loss function values.
Notes
-----
This function is convex in r.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "hyp0f1",
r"""
hyp0f1(v, x)
Confluent hypergeometric limit function 0F1.
Parameters
----------
v, z : array_like
Input values.
Returns
-------
hyp0f1 : ndarray
The confluent hypergeometric limit function.
Notes
-----
This function is defined as:
.. math:: _0F_1(v, z) = \sum_{k=0}^{\infty}\frac{z^k}{(v)_k k!}.
It's also the limit as :math:`q \to \infty` of :math:`_1F_1(q; v; z/q)`,
and satisfies the differential equation :math:`f''(z) + vf'(z) = f(z)`.
""")
add_newdoc("scipy.special", "hyp1f1",
"""
hyp1f1(a, b, x)
Confluent hypergeometric function 1F1(a, b; x)
""")
add_newdoc("scipy.special", "hyp1f2",
"""
hyp1f2(a, b, c, x)
Hypergeometric function 1F2 and error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f0",
"""
hyp2f0(a, b, x, type)
Hypergeometric function 2F0 in y and an error estimate
The parameter `type` determines a convergence factor and can be
either 1 or 2.
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f1",
"""
hyp2f1(a, b, c, z)
Gauss hypergeometric function 2F1(a, b; c; z).
""")
add_newdoc("scipy.special", "hyp3f0",
"""
hyp3f0(a, b, c, x)
Hypergeometric function 3F0 in y and an error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyperu",
"""
hyperu(a, b, x)
Confluent hypergeometric function U(a, b, x) of the second kind
""")
add_newdoc("scipy.special", "i0",
r"""
i0(x)
Modified Bessel function of order 0.
Defined as,
.. math::
I_0(x) = \sum_{k=0}^\infty \frac{(x^2/4)^k}{(k!)^2} = J_0(\imath x),
where :math:`J_0` is the Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i0`.
See also
--------
iv
i0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i0e",
"""
i0e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i0e(x) = exp(-abs(x)) * i0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 0
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i0`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i0e`.
See also
--------
iv
i0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i1",
r"""
i1(x)
Modified Bessel function of order 1.
Defined as,
.. math::
I_1(x) = \frac{1}{2}x \sum_{k=0}^\infty \frac{(x^2/4)^k}{k! (k + 1)!}
= -\imath J_1(\imath x),
where :math:`J_1` is the Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i1`.
See also
--------
iv
i1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i1e",
"""
i1e(x)
Exponentially scaled modified Bessel function of order 1.
Defined as::
i1e(x) = exp(-abs(x)) * i1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 1
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i1`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i1e`.
See also
--------
iv
i1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "_igam_fac",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "it2i0k0",
"""
it2i0k0(x)
Integrals related to modified Bessel functions of order 0
Returns
-------
ii0
``integral((i0(t)-1)/t, t=0..x)``
ik0
``int(k0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2j0y0",
"""
it2j0y0(x)
Integrals related to Bessel functions of order 0
Returns
-------
ij0
``integral((1-j0(t))/t, t=0..x)``
iy0
``integral(y0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2struve0",
r"""
it2struve0(x)
Integral related to the Struve function of order 0.
Returns the integral,
.. math::
\int_x^\infty \frac{H_0(t)}{t}\,dt
where :math:`H_0` is the Struve function of order 0.
Parameters
----------
x : array_like
Lower limit of integration.
Returns
-------
I : ndarray
The value of the integral.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "itairy",
"""
itairy(x)
Integrals of Airy functions
Calculates the integrals of Airy functions from 0 to `x`.
Parameters
----------
x: array_like
Upper limit of integration (float).
Returns
-------
Apt
Integral of Ai(t) from 0 to x.
Bpt
Integral of Bi(t) from 0 to x.
Ant
Integral of Ai(-t) from 0 to x.
Bnt
Integral of Bi(-t) from 0 to x.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "iti0k0",
"""
iti0k0(x)
Integrals of modified Bessel functions of order 0
Returns simple integrals from 0 to `x` of the zeroth order modified
Bessel functions `i0` and `k0`.
Returns
-------
ii0, ik0
""")
add_newdoc("scipy.special", "itj0y0",
"""
itj0y0(x)
Integrals of Bessel functions of order 0
Returns simple integrals from 0 to `x` of the zeroth order Bessel
functions `j0` and `y0`.
Returns
-------
ij0, iy0
""")
add_newdoc("scipy.special", "itmodstruve0",
r"""
itmodstruve0(x)
Integral of the modified Struve function of order 0.
.. math::
I = \int_0^x L_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`L_0` from 0 to `x`.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "itstruve0",
r"""
itstruve0(x)
Integral of the Struve function of order 0.
.. math::
I = \int_0^x H_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`H_0` from 0 to `x`.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "iv",
r"""
iv(v, z)
Modified Bessel function of the first kind of real order.
Parameters
----------
v : array_like
Order. If `z` is of real type and negative, `v` must be integer
valued.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the modified Bessel function.
Notes
-----
For real `z` and :math:`v \in [-50, 50]`, the evaluation is carried out
using Temme's method [1]_. For larger orders, uniform asymptotic
expansions are applied.
For complex `z` and positive `v`, the AMOS [2]_ `zbesi` routine is
called. It uses a power series for small `z`, the asymptitic expansion
for large `abs(z)`, the Miller algorithm normalized by the Wronskian
and a Neumann series for intermediate magnitudes, and the uniform
asymptitic expansions for :math:`I_v(z)` and :math:`J_v(z)` for large
orders. Backward recurrence is used to generate sequences or reduce
orders when necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
See also
--------
kve : This function with leading exponential behavior stripped off.
References
----------
.. [1] Temme, Journal of Computational Physics, vol 21, 343 (1976)
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "ive",
r"""
ive(v, z)
Exponentially scaled modified Bessel function of the first kind
Defined as::
ive(v, z) = iv(v, z) * exp(-abs(z.real))
Parameters
----------
v : array_like of float
Order.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the exponentially scaled modified Bessel function.
Notes
-----
For positive `v`, the AMOS [1]_ `zbesi` routine is called. It uses a
power series for small `z`, the asymptitic expansion for large
`abs(z)`, the Miller algorithm normalized by the Wronskian and a
Neumann series for intermediate magnitudes, and the uniform asymptitic
expansions for :math:`I_v(z)` and :math:`J_v(z)` for large orders.
Backward recurrence is used to generate sequences or reduce orders when
necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "j0",
r"""
j0(x)
Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval the following rational approximation is used:
.. math::
J_0(x) \approx (w - r_1^2)(w - r_2^2) \frac{P_3(w)}{Q_8(w)},
where :math:`w = x^2` and :math:`r_1`, :math:`r_2` are the zeros of
:math:`J_0`, and :math:`P_3` and :math:`Q_8` are polynomials of degrees 3
and 8, respectively.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `j0`.
See also
--------
jv : Bessel function of real order and complex argument.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "j1",
"""
j1(x)
Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 24 term Chebyshev expansion is used. In the second, the
asymptotic trigonometric representation is employed using two rational
functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `j1`.
See also
--------
jv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "jn",
"""
jn(n, x)
Bessel function of the first kind of integer order and real argument.
Notes
-----
`jn` is an alias of `jv`.
See also
--------
jv
""")
add_newdoc("scipy.special", "jv",
r"""
jv(v, z)
Bessel function of the first kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the Bessel function, :math:`J_v(z)`.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(n\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-n\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
See also
--------
jve : :math:`J_v` with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "jve",
r"""
jve(v, z)
Exponentially scaled Bessel function of order `v`.
Defined as::
jve(v, z) = jv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(n\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-n\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "k0",
r"""
k0(x)
Modified Bessel function of the second kind of order 0, :math:`K_0`.
This function is also sometimes referred to as the modified Bessel
function of the third kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
K : ndarray
Value of the modified Bessel function :math:`K_0` at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0`.
See also
--------
kv
k0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k0e",
"""
k0e(x)
Exponentially scaled modified Bessel function K of order 0
Defined as::
k0e(x) = exp(x) * k0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0e`.
See also
--------
kv
k0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k1",
"""
k1(x)
Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the modified Bessel function K of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1`.
See also
--------
kv
k1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k1e",
"""
k1e(x)
Exponentially scaled modified Bessel function K of order 1
Defined as::
k1e(x) = exp(x) * k1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1e`.
See also
--------
kv
k1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "kei",
"""
kei(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "keip",
"""
keip(x)
Derivative of the Kelvin function kei
""")
add_newdoc("scipy.special", "kelvin",
"""
kelvin(x)
Kelvin functions as complex numbers
Returns
-------
Be, Ke, Bep, Kep
The tuple (Be, Ke, Bep, Kep) contains complex numbers
representing the real and imaginary Kelvin functions and their
derivatives evaluated at `x`. For example, kelvin(x)[0].real =
ber x and kelvin(x)[0].imag = bei x with similar relationships
for ker and kei.
""")
add_newdoc("scipy.special", "ker",
"""
ker(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "kerp",
"""
kerp(x)
Derivative of the Kelvin function ker
""")
add_newdoc("scipy.special", "kl_div",
r"""
kl_div(x, y)
Elementwise function for computing Kullback-Leibler divergence.
.. math:: \mathrm{kl\_div}(x, y) = \begin{cases} x \log(x / y) - x + y & x > 0, y > 0 \\ y & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, rel_entr
Notes
-----
This function is non-negative and is jointly convex in `x` and `y`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "kn",
r"""
kn(n, x)
Modified Bessel function of the second kind of integer order `n`
Returns the modified Bessel function of the second kind for integer order
`n` at real `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions.
Parameters
----------
n : array_like of int
Order of Bessel functions (floats will truncate with a warning)
z : array_like of float
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kv : Same function, but accepts real order and complex argument
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kn
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in range(6):
... plt.plot(x, kn(N, x), label='$K_{}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_n(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kn([4, 5, 6], 1)
array([ 44.23241585, 360.9605896 , 3653.83831186])
""")
add_newdoc("scipy.special", "kolmogi",
"""
kolmogi(p)
Inverse function to kolmogorov
Returns y such that ``kolmogorov(y) == p``.
""")
add_newdoc("scipy.special", "kolmogorov",
"""
kolmogorov(y)
Complementary cumulative distribution function of Kolmogorov distribution
Returns the complementary cumulative distribution function of
Kolmogorov's limiting distribution (Kn* for large n) of a
two-sided test for equality between an empirical and a theoretical
distribution. It is equal to the (limit as n->infinity of the)
probability that sqrt(n) * max absolute deviation > y.
""")
add_newdoc("scipy.special", "kv",
r"""
kv(v, z)
Modified Bessel function of the second kind of real order `v`
Returns the modified Bessel function of the second kind for real order
`v` at complex `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions. They are defined as those solutions
of the modified Bessel equation for which,
.. math::
K_v(x) \sim \sqrt{\pi/(2x)} \exp(-x)
as :math:`x \to \infty` [3]_.
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results. Note that input must be of complex type to get complex
output, e.g. ``kv(3, -2+0j)`` instead of ``kv(3, -2)``.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kve : This function with leading exponential behavior stripped off.
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
.. [3] NIST Digital Library of Mathematical Functions,
Eq. 10.25.E3. http://dlmf.nist.gov/10.25.E3
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kv
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in np.linspace(0, 6, 5):
... plt.plot(x, kv(N, x), label='$K_{{{}}}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_\nu(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kv([4, 4.5, 5], 1+2j)
array([ 0.1992+2.3892j, 2.3493+3.6j , 7.2827+3.8104j])
""")
add_newdoc("scipy.special", "kve",
r"""
kve(v, z)
Exponentially scaled modified Bessel function of the second kind.
Returns the exponentially scaled, modified Bessel function of the
second kind (sometimes called the third kind) for real order `v` at
complex `z`::
kve(v, z) = kv(v, z) * exp(z)
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The exponentially scaled modified Bessel function of the second kind.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
""")
add_newdoc("scipy.special", "_lanczos_sum_expg_scaled",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "_lgam1p",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "log1p",
"""
log1p(x)
Calculates log(1+x) for use when `x` is near zero
""")
add_newdoc("scipy.special", "_log1pmx",
"""
Internal function, do not use.
""")
add_newdoc('scipy.special', 'logit',
"""
logit(x)
Logit ufunc for ndarrays.
The logit function is defined as logit(p) = log(p/(1-p)).
Note that logit(0) = -inf, logit(1) = inf, and logit(p)
for p<0 or p>1 yields nan.
Parameters
----------
x : ndarray
The ndarray to apply logit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are logit of the corresponding entry of x.
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "lpmv",
"""
lpmv(m, v, x)
Associated legendre function of integer order.
Parameters
----------
m : int
Order
v : float
Degree.
x : float
Argument. Must be ``|x| <= 1``.
Returns
-------
res : float
The value of the function.
See Also
--------
lpmn : Similar, but computes values for all orders 0..m and degrees 0..n.
clpmn : Similar to `lpmn` but allows a complex argument.
Notes
-----
It is possible to extend the domain of this function to all
complex m, v, x, but this is not yet implemented.
""")
add_newdoc("scipy.special", "mathieu_a",
"""
mathieu_a(m, q)
Characteristic value of even Mathieu functions
Returns the characteristic value for the even solution,
``ce_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_b",
"""
mathieu_b(m, q)
Characteristic value of odd Mathieu functions
Returns the characteristic value for the odd solution,
``se_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_cem",
"""
mathieu_cem(m, q, x)
Even Mathieu function and its derivative
Returns the even Mathieu function, ``ce_m(x, q)``, of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of ce_m(x, q)
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem1",
"""
mathieu_modcem1(m, q, x)
Even modified Mathieu function of the first kind and its derivative
Evaluates the even modified Mathieu function of the first kind,
``Mc1m(x, q)``, and its derivative at `x` for order `m` and parameter
`q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem2",
"""
mathieu_modcem2(m, q, x)
Even modified Mathieu function of the second kind and its derivative
Evaluates the even modified Mathieu function of the second kind,
Mc2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem1",
"""
mathieu_modsem1(m, q, x)
Odd modified Mathieu function of the first kind and its derivative
Evaluates the odd modified Mathieu function of the first kind,
Ms1m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem2",
"""
mathieu_modsem2(m, q, x)
Odd modified Mathieu function of the second kind and its derivative
Evaluates the odd modified Mathieu function of the second kind,
Ms2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_sem",
"""
mathieu_sem(m, q, x)
Odd Mathieu function and its derivative
Returns the odd Mathieu function, se_m(x, q), of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of se_m(x, q).
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "modfresnelm",
"""
modfresnelm(x)
Modified Fresnel negative integrals
Returns
-------
fm
Integral ``F_-(x)``: ``integral(exp(-1j*t*t), t=x..inf)``
km
Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modfresnelp",
"""
modfresnelp(x)
Modified Fresnel positive integrals
Returns
-------
fp
Integral ``F_+(x)``: ``integral(exp(1j*t*t), t=x..inf)``
kp
Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modstruve",
r"""
modstruve(v, x)
Modified Struve function.
Return the value of the modified Struve function of order `v` at `x`. The
modified Struve function is defined as,
.. math::
L_v(x) = -\imath \exp(-\pi\imath v/2) H_v(x),
where :math:`H_v` is the Struve function.
Parameters
----------
v : array_like
Order of the modified Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
L : ndarray
Value of the modified Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the function:
- power series
- expansion in Bessel functions (if :math:`|z| < |v| + 20`)
- asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
struve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/11
""")
add_newdoc("scipy.special", "nbdtr",
r"""
nbdtr(k, n, p)
Negative binomial cumulative distribution function.
Returns the sum of the terms 0 through `k` of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=0}^k {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that `k` or fewer failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k` or fewer failures before `n` successes in a
sequence of events with individual success probability `p`.
See also
--------
nbdtrc
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtr}(k, n, p) = I_{p}(n, k + 1).
Wrapper for the Cephes [1]_ routine `nbdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtrc",
r"""
nbdtrc(k, n, p)
Negative binomial survival function.
Returns the sum of the terms `k + 1` to infinity of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that more than `k` failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k + 1` or more failures before `n` successes in a
sequence of events with individual success probability `p`.
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtrc}(k, n, p) = I_{1 - p}(k + 1, n).
Wrapper for the Cephes [1]_ routine `nbdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtri",
"""
nbdtri(k, n, y)
Inverse of `nbdtr` vs `p`.
Returns the inverse with respect to the parameter `p` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
Returns
-------
p : ndarray
Probability of success in a single event (float) such that
`nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `nbdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtrik",
r"""
nbdtrik(y, n, p)
Inverse of `nbdtr` vs `k`.
Returns the inverse with respect to the parameter `k` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
k : ndarray
The maximum number of allowed failures such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `k` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `k`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "nbdtrin",
r"""
nbdtrin(k, y, p)
Inverse of `nbdtr` vs `n`.
Returns the inverse with respect to the parameter `n` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
p : array_like
Probability of success in a single event (float).
Returns
-------
n : ndarray
The number of successes `n` such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `n` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `n`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "ncfdtr",
r"""
ncfdtr(dfn, dfd, nc, f)
Cumulative distribution function of the non-central F distribution.
The non-central F describes the distribution of,
.. math::
Z = \frac{X/d_n}{Y/d_d}
where :math:`X` and :math:`Y` are independently distributed, with
:math:`X` distributed non-central :math:`\chi^2` with noncentrality
parameter `nc` and :math:`d_n` degrees of freedom, and :math:`Y`
distributed :math:`\chi^2` with :math:`d_d` degrees of freedom.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
ncdfdtri : Inverse CDF (iCDF) of the non-central F distribution.
ncdfdtridfd : Calculate dfd, given CDF and iCDF values.
ncdfdtridfn : Calculate dfn, given CDF and iCDF values.
ncdfdtrinc : Calculate noncentrality parameter, given CDF, iCDF, dfn, dfd.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdffnc`.
The cumulative distribution function is computed using Formula 26.6.20 of
[2]_:
.. math::
F(d_n, d_d, n_c, f) = \sum_{j=0}^\infty e^{-n_c/2} \frac{(n_c/2)^j}{j!} I_{x}(\frac{d_n}{2} + j, \frac{d_d}{2}),
where :math:`I` is the regularized incomplete beta function, and
:math:`x = f d_n/(f d_n + d_d)`.
The computation time required for this routine is proportional to the
noncentrality parameter `nc`. Very large values of this parameter can
consume immense computer resources. This is why the search range is
bounded by 10,000.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central F distribution, for nc=0. Compare with the
F-distribution from scipy.stats:
>>> x = np.linspace(-1, 8, num=500)
>>> dfn = 3
>>> dfd = 2
>>> ncf_stats = stats.f.cdf(x, dfn, dfd)
>>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, ncf_stats, 'b-', lw=3)
>>> ax.plot(x, ncf_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "ncfdtri",
"""
ncfdtri(p, dfn, dfd, nc)
Inverse cumulative distribution function of the non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtridfd",
"""
ncfdtridfd(p, f, dfn, nc)
Calculate degrees of freedom (denominator) for the noncentral F-distribution.
See `ncfdtr` for more details.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
""")
add_newdoc("scipy.special", "ncfdtridfn",
"""
ncfdtridfn(p, f, dfd, nc)
Calculate degrees of freedom (numerator) for the noncentral F-distribution.
See `ncfdtr` for more details.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
""")
add_newdoc("scipy.special", "ncfdtrinc",
"""
ncfdtrinc(p, f, dfn, dfd)
Calculate non-centrality parameter for non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "nctdtr",
"""
nctdtr(df, nc, t)
Cumulative distribution function of the non-central `t` distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central t distribution, for nc=0. Compare with the
t-distribution from scipy.stats:
>>> x = np.linspace(-5, 5, num=500)
>>> df = 3
>>> nct_stats = stats.t.cdf(x, df)
>>> nct_special = special.nctdtr(df, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, nct_stats, 'b-', lw=3)
>>> ax.plot(x, nct_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "nctdtridf",
"""
nctdtridf(p, nc, t)
Calculate degrees of freedom for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrinc",
"""
nctdtrinc(df, p, t)
Calculate non-centrality parameter for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
p : array_like
CDF values, in range (0, 1].
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrit",
"""
nctdtrit(df, nc, p)
Inverse cumulative distribution function of the non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
p : array_like
CDF values, in range (0, 1].
""")
add_newdoc("scipy.special", "ndtr",
r"""
ndtr(x)
Gaussian cumulative distribution function.
Returns the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`
.. math::
\frac{1}{\sqrt{2\pi}} \int_{-\infty}^x \exp(-t^2/2) dt
Parameters
----------
x : array_like, real or complex
Argument
Returns
-------
ndarray
The value of the normal CDF evaluated at `x`
See Also
--------
erf
erfc
scipy.stats.norm
log_ndtr
""")
add_newdoc("scipy.special", "nrdtrimn",
"""
nrdtrimn(p, x, std)
Calculate mean of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
std : array_like
Standard deviation.
Returns
-------
mn : float or ndarray
The mean of the normal distribution.
See Also
--------
nrdtrimn, ndtr
""")
add_newdoc("scipy.special", "nrdtrisd",
"""
nrdtrisd(p, x, mn)
Calculate standard deviation of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
mn : float or ndarray
The mean of the normal distribution.
Returns
-------
std : array_like
Standard deviation.
See Also
--------
nrdtristd, ndtr
""")
add_newdoc("scipy.special", "log_ndtr",
"""
log_ndtr(x)
Logarithm of Gaussian cumulative distribution function.
Returns the log of the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`::
log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
Parameters
----------
x : array_like, real or complex
Argument
Returns
-------
ndarray
The value of the log of the normal CDF evaluated at `x`
See Also
--------
erf
erfc
scipy.stats.norm
ndtr
""")
add_newdoc("scipy.special", "ndtri",
"""
ndtri(y)
Inverse of `ndtr` vs x
Returns the argument x for which the area under the Gaussian
probability density function (integrated from minus infinity to `x`)
is equal to y.
""")
add_newdoc("scipy.special", "obl_ang1",
"""
obl_ang1(m, n, c, x)
Oblate spheroidal angular function of the first kind and its derivative
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_ang1_cv",
"""
obl_ang1_cv(m, n, c, cv, x)
Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_cv",
"""
obl_cv(m, n, c)
Characteristic value of oblate spheroidal function
Computes the characteristic value of oblate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("scipy.special", "obl_rad1",
"""
obl_rad1(m, n, c, x)
Oblate spheroidal radial function of the first kind and its derivative
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad1_cv",
"""
obl_rad1_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2",
"""
obl_rad2(m, n, c, x)
Oblate spheroidal radial function of the second kind and its derivative.
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2_cv",
"""
obl_rad2_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbdv",
"""
pbdv(v, x)
Parabolic cylinder function D
Returns (d, dp) the parabolic cylinder function Dv(x) in d and the
derivative, Dv'(x) in dp.
Returns
-------
d
Value of the function
dp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbvv",
"""
pbvv(v, x)
Parabolic cylinder function V
Returns the parabolic cylinder function Vv(x) in v and the
derivative, Vv'(x) in vp.
Returns
-------
v
Value of the function
vp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbwa",
"""
pbwa(a, x)
Parabolic cylinder function W
Returns the parabolic cylinder function W(a, x) in w and the
derivative, W'(a, x) in wp.
.. warning::
May not be accurate for large (>5) arguments in a and/or x.
Returns
-------
w
Value of the function
wp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pdtr",
"""
pdtr(k, m)
Poisson cumulative distribution function
Returns the sum of the first `k` terms of the Poisson distribution:
sum(exp(-m) * m**j / j!, j=0..k) = gammaincc( k+1, m). Arguments
must both be positive and `k` an integer.
""")
add_newdoc("scipy.special", "pdtrc",
"""
pdtrc(k, m)
Poisson survival function
Returns the sum of the terms from k+1 to infinity of the Poisson
distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
k+1, m). Arguments must both be positive and `k` an integer.
""")
add_newdoc("scipy.special", "pdtri",
"""
pdtri(k, y)
Inverse to `pdtr` vs m
Returns the Poisson variable `m` such that the sum from 0 to `k` of
the Poisson density is equal to the given probability `y`:
calculated by gammaincinv(k+1, y). `k` must be a nonnegative
integer and `y` between 0 and 1.
""")
add_newdoc("scipy.special", "pdtrik",
"""
pdtrik(p, m)
Inverse to `pdtr` vs k
Returns the quantile k such that ``pdtr(k, m) = p``
""")
add_newdoc("scipy.special", "poch",
"""
poch(z, m)
Rising factorial (z)_m
The Pochhammer symbol (rising factorial), is defined as::
(z)_m = gamma(z + m) / gamma(z)
For positive integer `m` it reads::
(z)_m = z * (z + 1) * ... * (z + m - 1)
""")
add_newdoc("scipy.special", "pro_ang1",
"""
pro_ang1(m, n, c, x)
Prolate spheroidal angular function of the first kind and its derivative
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_ang1_cv",
"""
pro_ang1_cv(m, n, c, cv, x)
Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_cv",
"""
pro_cv(m, n, c)
Characteristic value of prolate spheroidal function
Computes the characteristic value of prolate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("scipy.special", "pro_rad1",
"""
pro_rad1(m, n, c, x)
Prolate spheroidal radial function of the first kind and its derivative
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad1_cv",
"""
pro_rad1_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2",
"""
pro_rad2(m, n, c, x)
Prolate spheroidal radial function of the secon kind and its derivative
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2_cv",
"""
pro_rad2_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pseudo_huber",
r"""
pseudo_huber(delta, r)
Pseudo-Huber loss function.
.. math:: \mathrm{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right)
Parameters
----------
delta : ndarray
Input array, indicating the soft quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Pseudo-Huber loss function values.
Notes
-----
This function is convex in :math:`r`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "psi",
"""
psi(z, out=None)
The digamma function.
The logarithmic derivative of the gamma function evaluated at ``z``.
Parameters
----------
z : array_like
Real or complex argument.
out : ndarray, optional
Array for the computed values of ``psi``.
Returns
-------
digamma : ndarray
Computed values of ``psi``.
Notes
-----
For large values not close to the negative real axis ``psi`` is
computed using the asymptotic series (5.11.2) from [1]_. For small
arguments not close to the negative real axis the recurrence
relation (5.5.2) from [1]_ is used until the argument is large
enough to use the asymptotic series. For values close to the
negative real axis the reflection formula (5.5.4) from [1]_ is
used first. Note that ``psi`` has a family of zeros on the
negative real axis which occur between the poles at nonpositive
integers. Around the zeros the reflection formula suffers from
cancellation and the implementation loses precision. The sole
positive zero and the first negative zero, however, are handled
separately by precomputing series expansions using [2]_, so the
function should maintain full accuracy around the origin.
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/5
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("scipy.special", "radian",
"""
radian(d, m, s)
Convert from degrees to radians
Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
radians.
""")
add_newdoc("scipy.special", "rel_entr",
r"""
rel_entr(x, y)
Elementwise function for computing relative entropy.
.. math:: \mathrm{rel\_entr}(x, y) = \begin{cases} x \log(x / y) & x > 0, y > 0 \\ 0 & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, kl_div
Notes
-----
This function is jointly convex in x and y.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "rgamma",
"""
rgamma(z)
Gamma function inverted
Returns ``1/gamma(x)``
""")
add_newdoc("scipy.special", "round",
"""
round(x)
Round to nearest integer
Returns the nearest integer to `x` as a double precision floating
point result. If `x` ends in 0.5 exactly, the nearest even integer
is chosen.
""")
add_newdoc("scipy.special", "shichi",
r"""
shichi(x, out=None)
Hyperbolic sine and cosine integrals.
The hyperbolic sine integral is
.. math::
\int_0^x \frac{\sinh{t}}{t}dt
and the hyperbolic cosine integral is
.. math::
\gamma + \log(x) + \int_0^x \frac{\cosh{t} - 1}{t} dt
where :math:`\gamma` is Euler's constant and :math:`\log` is the
principle branch of the logarithm.
Parameters
----------
x : array_like
Real or complex points at which to compute the hyperbolic sine
and cosine integrals.
Returns
-------
si : ndarray
Hyperbolic sine integral at ``x``
ci : ndarray
Hyperbolic cosine integral at ``x``
Notes
-----
For real arguments with ``x < 0``, ``chi`` is the real part of the
hyperbolic cosine integral. For such points ``chi(x)`` and ``chi(x
+ 0j)`` differ by a factor of ``1j*pi``.
For real arguments the function is computed by calling Cephes'
[1]_ *shichi* routine. For complex arguments the algorithm is based
on Mpmath's [2]_ *shi* and *chi* routines.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("scipy.special", "sici",
r"""
sici(x, out=None)
Sine and cosine integrals.
The sine integral is
.. math::
\int_0^x \frac{\sin{t}}{t}dt
and the cosine integral is
.. math::
\gamma + \log(x) + \int_0^x \frac{\cos{t} - 1}{t}dt
where :math:`\gamma` is Euler's constant and :math:`\log` is the
principle branch of the logarithm.
Parameters
----------
x : array_like
Real or complex points at which to compute the sine and cosine
integrals.
Returns
-------
si : ndarray
Sine integral at ``x``
ci : ndarray
Cosine integral at ``x``
Notes
-----
For real arguments with ``x < 0``, ``ci`` is the real part of the
cosine integral. For such points ``ci(x)`` and ``ci(x + 0j)``
differ by a factor of ``1j*pi``.
For real arguments the function is computed by calling Cephes'
[1]_ *sici* routine. For complex arguments the algorithm is based
on Mpmath's [2]_ *si* and *ci* routines.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("scipy.special", "sindg",
"""
sindg(x)
Sine of angle given in degrees
""")
add_newdoc("scipy.special", "smirnov",
"""
smirnov(n, e)
Kolmogorov-Smirnov complementary cumulative distribution function
Returns the exact Kolmogorov-Smirnov complementary cumulative
distribution function (Dn+ or Dn-) for a one-sided test of
equality between an empirical and a theoretical distribution. It
is equal to the probability that the maximum difference between a
theoretical distribution and an empirical one based on `n` samples
is greater than e.
""")
add_newdoc("scipy.special", "smirnovi",
"""
smirnovi(n, y)
Inverse to `smirnov`
Returns ``e`` such that ``smirnov(n, e) = y``.
""")
add_newdoc("scipy.special", "spence",
r"""
spence(z)
Spence's function, also known as the dilogarithm. It is defined to
be
.. math::
\int_0^z \frac{\log(t)}{1 - t}dt
for complex :math:`z`, where the contour of integration is taken
to avoid the branch cut of the logarithm. Spence's function is
analytic everywhere except the negative real axis where it has a
branch cut.
Note that there is a different convention which defines Spence's
function by the integral
.. math::
-\int_0^z \frac{\log(1 - t)}{t}dt;
this is our ``spence(1 - z)``.
""")
add_newdoc("scipy.special", "stdtr",
"""
stdtr(df, t)
Student t distribution cumulative density function
Returns the integral from minus infinity to t of the Student t
distribution with df > 0 degrees of freedom::
gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *
integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)
""")
add_newdoc("scipy.special", "stdtridf",
"""
stdtridf(p, t)
Inverse of `stdtr` vs df
Returns the argument df such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("scipy.special", "stdtrit",
"""
stdtrit(df, p)
Inverse of `stdtr` vs `t`
Returns the argument `t` such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("scipy.special", "struve",
r"""
struve(v, x)
Struve function.
Return the value of the Struve function of order `v` at `x`. The Struve
function is defined as,
.. math::
H_v(x) = (z/2)^{v + 1} \sum_{n=0}^\infty \frac{(-1)^n (z/2)^{2n}}{\Gamma(n + \frac{3}{2}) \Gamma(n + v + \frac{3}{2})},
where :math:`\Gamma` is the gamma function.
Parameters
----------
v : array_like
Order of the Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
H : ndarray
Value of the Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the Struve function:
- power series
- expansion in Bessel functions (if :math:`|z| < |v| + 20`)
- asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
modstruve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/11
""")
add_newdoc("scipy.special", "tandg",
"""
tandg(x)
Tangent of angle x given in degrees.
""")
add_newdoc("scipy.special", "tklmbda",
"""
tklmbda(x, lmbda)
Tukey-Lambda cumulative distribution function
""")
add_newdoc("scipy.special", "wofz",
"""
wofz(z)
Faddeeva function
Returns the value of the Faddeeva function for complex argument::
exp(-z**2) * erfc(-i*z)
See Also
--------
dawsn, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.wofz(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$wofz(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "xlogy",
"""
xlogy(x, y)
Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "xlog1py",
"""
xlog1py(x, y)
Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log1p(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "y0",
r"""
y0(x)
Bessel function of the second kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval a rational approximation :math:`R(x)` is employed to
compute,
.. math::
Y_0(x) = R(x) + \frac{2 \log(x) J_0(x)}{\pi},
where :math:`J_0` is the Bessel function of the first kind of order 0.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `y0`.
See also
--------
j0
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "y1",
"""
y1(x)
Bessel function of the second kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 25 term Chebyshev expansion is used, and computing
:math:`J_1` (the Bessel function of the first kind) is required. In the
second, the asymptotic trigonometric representation is employed using two
rational functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `y1`.
See also
--------
j1
yn
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "yn",
r"""
yn(n, x)
Bessel function of the second kind of integer order and real argument.
Parameters
----------
n : array_like
Order (integer).
z : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function, :math:`Y_n(x)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `yn`.
The function is evaluated by forward recurrence on `n`, starting with
values computed by the Cephes routines `y0` and `y1`. If `n = 0` or 1,
the routine for `y0` or `y1` is called directly.
See also
--------
yv : For real order and real or complex argument.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "yv",
r"""
yv(v, z)
Bessel function of the second kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind, :math:`Y_v(x)`.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
See also
--------
yve : :math:`Y_v` with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "yve",
r"""
yve(v, z)
Exponentially scaled Bessel function of the second kind of real order.
Returns the exponentially scaled Bessel function of the second
kind of real order `v` at complex `z`::
yve(v, z) = yv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "_zeta",
"""
_zeta(x, q)
Internal function, Hurwitz zeta.
""")
add_newdoc("scipy.special", "zetac",
"""
zetac(x)
Riemann zeta function minus 1.
This function is defined as
.. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
where ``x > 1``.
See Also
--------
zeta
""")
add_newdoc("scipy.special", "_struve_asymp_large_z",
"""
_struve_asymp_large_z(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using asymptotic expansion
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_power_series",
"""
_struve_power_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using power series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_bessel_series",
"""
_struve_bessel_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using Bessel function series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_spherical_jn",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("scipy.special", "_spherical_jn_d",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("scipy.special", "_spherical_yn",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("scipy.special", "_spherical_yn_d",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("scipy.special", "_spherical_in",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("scipy.special", "_spherical_in_d",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("scipy.special", "_spherical_kn",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("scipy.special", "_spherical_kn_d",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("scipy.special", "loggamma",
r"""
loggamma(z, out=None)
Principal branch of the logarithm of the Gamma function.
Defined to be :math:`\log(\Gamma(x))` for :math:`x > 0` and
extended to the complex plane by analytic continuation. The
function has a single branch cut on the negative real axis.
.. versionadded:: 0.18.0
Parameters
----------
z : array-like
Values in the complex plain at which to compute ``loggamma``
out : ndarray, optional
Output array for computed values of ``loggamma``
Returns
-------
loggamma : ndarray
Values of ``loggamma`` at z.
Notes
-----
It is not generally true that :math:`\log\Gamma(z) =
\log(\Gamma(z))`, though the real parts of the functions do
agree. The benefit of not defining ``loggamma`` as
:math:`\log(\Gamma(z))` is that the latter function has a
complicated branch cut structure whereas ``loggamma`` is analytic
except for on the negative real axis.
The identities
.. math::
\exp(\log\Gamma(z)) &= \Gamma(z) \\
\log\Gamma(z + 1) &= \log(z) + \log\Gamma(z)
make ``loggama`` useful for working in complex logspace. However,
``loggamma`` necessarily returns complex outputs for real inputs,
so if you want to work only with real numbers use `gammaln`. On
the real line the two functions are related by ``exp(loggamma(x))
= gammasgn(x)*exp(gammaln(x))``, though in practice rounding
errors will introduce small spurious imaginary components in
``exp(loggamma(x))``.
The implementation here is based on [hare1997]_.
See also
--------
gammaln : logarithm of the absolute value of the Gamma function
gammasgn : sign of the gamma function
References
----------
.. [hare1997] D.E.G. Hare,
*Computing the Principal Branch of log-Gamma*,
Journal of Algorithms, Volume 25, Issue 2, November 1997, pages 221-236.
""")
add_newdoc("scipy.special", "_sinpi",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "_cospi",
"""
Internal function, do not use.
""")
|
gdooper/scipy
|
scipy/special/add_newdocs.py
|
Python
|
bsd-3-clause
| 154,618
|
[
"Gaussian"
] |
d034452b3148837754e323d4a1138ffdea6fe0cf6814e4f6172e1407977d83e2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
use leaf alogrithm to remove redundancy
this algorithm is published in this following paper
1. Bull, S. C., Muldoon, M. R. & Doig, A. J. Maximising the Size of Non-Redundant Protein Datasets Using Graph Theory. PLoS One 8, (2013).
Simon Bull gives an implementation for python 3, hosted at https://github.com/SimonCB765/Leaf
this implementation is for python 2.7
this script can read in a multiple alignment file in fasta format, compute pairwise similarities, and remove redundancy accordint to similarity cutoff
you can choose to use igraph to plot the network
usage python leaf_seq.py test.fa
dependent packages: BioPython,python-igraph
"""
import os
import sys
import igraph
import numpy as np
from multiprocessing import Pool
from Bio import pairwise2
from Bio.SubsMat import MatrixInfo as matlist
def read_fa(fa_f):
# readin seqs in fasta format
# seqs foramt:[(pro,seq),...]
with open(fa_f) as o_f:
lines = o_f.readlines()
lines = [line.rstrip('\r\n') for line in lines]
pro_line_num = [i for i, line in enumerate(
lines) if '>' in line] + [len(lines)]
seqs = [lines[n:pro_line_num[i + 1]]
for i, n in enumerate(pro_line_num[:-1])]
seqs = [(seq[0][1:], ''.join(seq[1:])) for seq in seqs]
return seqs
def align(p):
s1,s2,seq1,seq2 = p
matrix = matlist.blosum62
gap_open = -10 # usual value
gap_extend = -0.5 # usual value
alns = pairwise2.align.globalds(seq1, seq2, matrix, gap_open, gap_extend)
seq1 = alns[0][0]
seq2 = alns[0][1]
identity = [1 for i, s in enumerate(seq1) if s == seq2[i]]
identity = 1.0 * len(identity)/ len(seq1)
return s1,s2,float('{0:<4.2f}'.format(identity))
def get_similarity(seqs):
seq_pairs = []
seq_num = len(seqs)
for i in range(seq_num):
for j in range(seq_num):
if j > i:
seq_pairs.append((i,j,seqs[i][1],seqs[j][1]))
p = Pool(6)
results = p.map(align,seq_pairs)
p.close()
results = sorted(results)
scores = np.ones(shape=(seq_num,seq_num))
for i,j,s in results:
scores[i][j] = s
scores[j][i] = s
return scores
def leaf(labels, similarities, cutoff):
print 'before leaf ',len(labels)
matrix = [map(lambda x: 1 if x > cutoff else 0, row)
for row in similarities]
for i in range(len(matrix)):
matrix[i][i] = 0
# use igraph to plot the initial network
# graph = igraph.Graph.Adjacency(matrix, mode='undirected')
# igraph.plot(graph, filename + '.png', vertex_label=range(len(labels)))
adjlist = [[i for i,n in enumerate(row ) if n] for row in matrix]
neighbors = []
remove = []
# for i,a in enumerate(adjlist):
# print '{0}:{1},'.format(i,a)
# transform adjlist to set
neighbors = [set(n) for i, n in enumerate(adjlist)]
# detect possible max clique
max_neighbors = max(len(l) for l in neighbors)
# the possible clique size is 2 to max_neighbors+1, so the possible
# neighborsize is 1 to max_neighbors
for clique_num in range(1, max_neighbors + 1):
nodes_index = set([i for i, l in enumerate(
neighbors) if len(l) == clique_num])
for i in nodes_index:
if not i in remove: # do not compute removed vertex
# a clique is set of vertex connecting to each other
nodesofinterest = neighbors[i].union([i])
# print 'initial nodesofinterest: ',nodesofinterest
if set.intersection(*[neighbors[i].union([i]) for i in nodesofinterest]) == nodesofinterest:
# print 'clique nodesofinterest: ',nodesofinterest
# detect vertex without linking to outside vertex
in_clique = [i for i in nodesofinterest if not neighbors[
i].union([i]).difference(nodesofinterest)]
# keep one of the vertex without linking to outside vertex,
# remove rest
if in_clique:
# print 'in_clique: ',in_clique
keep = [in_clique[0]]
# print 'keep: ',keep
remove_iter = nodesofinterest.difference(set(keep))
# print 'remove_iter: ',remove_iter
for r in remove_iter:
if not r in remove: # do not compute removed vertex
# print 'remove: ',r
for i in range(len(neighbors)):
if r in neighbors[i]:
neighbors[i].remove(r)
remove += remove_iter
# print 'after leaf: ',neighbors
nr_matrix = [matrix[i] for i in range(len(matrix)) if not i in remove]
nr_matrix = [[row[i] for i in range(
len(matrix)) if not i in remove] for row in nr_matrix]
# graph = igraph.Graph.Adjacency(nr_matrix, mode='undirected')
nr_labels = [i for i in range(len(matrix)) if not i in remove]
# igraph.plot(graph, filename + '_leaf.png', vertex_label=nr_labels)
# continue to remove the one with most neighbors until no vertex has
# neighbors, removed vertex is not considered
while max([len(r) for i, r in enumerate(neighbors) if not i in remove]) > 0:
max_index = max([(len(r), i) for i, r in enumerate(neighbors) if not i in remove])[1]
# print 'remove: ',max_index
remove.append(max_index)
for i in set(range(len(neighbors))).difference(set(remove)): # do not compute remove vertex
if max_index in neighbors[i]:
neighbors[i].remove(max_index)
# print 'final remove: ',remove
nr_matrix = [matrix[i] for i in range(len(matrix)) if not i in remove]
nr_matrix = [[row[i] for i in range(
len(matrix)) if not i in remove] for row in nr_matrix]
nr_labels = [i for i in range(len(matrix)) if not i in remove]
# plot non-redundant notwork
# graph = igraph.Graph.Adjacency(nr_matrix, mode='undirected')
# igraph.plot(graph, filename + '_nr.png', vertex_label=nr_labels)
nr_similarities = [similarities[i] for i in range(len(similarities)) if not i in remove]
nr_similarities = [[row[i] for i in range(
len(similarities)) if not i in remove] for row in nr_similarities]
nr_labels = [labels[i] for i in range(len(similarities)) if not i in remove]
print 'after leaf ',len(nr_labels)
return nr_labels, nr_similarities
def leaf_seqs(seqs,cutoff=0.9):
seqnames = [seq[0] for seq in seqs]
similarities = get_similarity(seqs)
nr_names,nr_similarities = leaf(seqnames, similarities, cutoff)
nr_seqs = [seq for seq in seqs if seq[0] in nr_names]
return nr_seqs
def main():
seqs = read_fa(sys.argv[-1])
filename = os.path.splitext(os.path.split(sys.argv[-1])[1])[0]
for cutoff in [0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95]:
# for cutoff in [0.8]:
nr_seqs = leaf_seqs(seqs,cutoff)
with open(filename+'_nr_seqs_'+str(cutoff)+'.fas','w') as w_f:
for pro,seq in nr_seqs:
print >> w_f,'>{0}'.format(pro)
print >> w_f,'{0}'.format(seq)
if __name__ == "__main__":
main()
|
lituan/tools
|
leaf_seq.py
|
Python
|
cc0-1.0
| 7,347
|
[
"Biopython"
] |
48a0d61e51ea651ea2614e45de582da7487d63d5c37a9476f332dac4902948ac
|
#!/usr/bin/python3
import sys, os
import time
import argparse
import json
import curses
import logging
import configparser
import subprocess
import urllib
from urllib.request import urlopen
from urllib.parse import quote_plus
import gettext
from monitor import Colors
from monitor import WarningWindow
from monitor import MainWindow
from monitor import Rule
from monitor import utils
from monitor.core.utils import tr
def load_rule_module(module_filename):
''' Loading custom rules (see example rules.py for usage).
Custom rules module is loaded from $XDG_DATA_HOME/pygod/rules.py
'''
if not os.path.isfile(module_filename):
return []
import types
module_name = os.path.splitext(os.path.basename(module_filename))[0]
is_function = lambda var: isinstance(var, types.FunctionType)
try:
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, module_filename)
custom_rules_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(custom_rules_module)
except AttributeError:
from importlib.machinery import SourceFileLoader
custom_rules_module = SourceFileLoader(module_name, module_filename).load_module()
public_objects = [name for name in dir(custom_rules_module) if not name.startswith('_')]
return list(filter(is_function, map(custom_rules_module.__dict__.get, public_objects)))
# Basic custom rules.
CUSTOM_RULE_MODULE = os.path.join(utils.get_data_dir(), "rules.py")
CUSTOM_RULES = load_rule_module(CUSTOM_RULE_MODULE)
def fetch_remote_state(godname, token=None):
url = 'http://godville.net/gods/api/{0}'.format(quote_plus(godname))
if token:
url += '/{0}'.format(token)
connection = urlopen(url)
if connection is None or connection.getcode() == 404:
old_url = 'http://godville.net/gods/api/{0}.json'.format(quote_plus(godname))
logging.error(
'load_hero_state: new api url %s returned 404\n'
' will try old api url %s',
url, old_url)
connection = urlopen(old_url)
return connection.read().decode('utf-8')
def load_hero_state(godname, token=None, filename=None):
state = None
if filename:
with open(filename, 'rb') as f:
state = f.read().decode('utf-8')
else:
state = fetch_remote_state(godname, token)
state = json.loads(state)
if 'health' not in state:
if token:
state['token_expired'] = True
# Public API only, some keys might be not available.
state['health'] = state['max_health']
state['exp_progress'] = '...'
state['distance'] = '...'
state['inventory_num'] = '...'
state['quest'] = tr('Generate secret token on https://godville.net/user/profile')
state['quest_progress'] = '...'
state['diary_last'] = ''
return state
class Monitor:
def __init__(self, args):
self.controls = {}
self.init_windows()
self.godname = args.god_name
self.dump_file = args.state
self.state = {}
self.notification_command = args.notification_command
self.quiet = args.quiet
self.browser = args.browser if args.browser else "x-www-browser"
self.refresh_command = args.refresh_command
self.autorefresh = args.autorefresh
self.open_browser_on_start = args.open_browser_on_start
self.token = args.token
self.rules = []
self.prev_state = None
self.error = None
curses.noecho()
try:
curses.cbreak()
except curses.error:
logging.error('curses error: cbreak returned ERR, probably invalid terminal. Try screen or tmux.')
pass
self.init_colors()
self.init_keys()
self.init_status_checkers()
def finalize(self):
curses.echo()
try:
curses.nocbreak()
except curses.error:
logging.error('curses error: cbreak returned ERR, probably invalid terminal. Try screen or tmux.')
pass
curses.endwin()
def init_keys(self):
self.controls['q'] = self.quit
self.controls['f'] = self.open_browser
self.controls['F'] = self.refresh_session
self.controls[' '] = self.remove_warning
def init_windows(self):
self.stdscr = curses.initscr()
self.stdscr.clear()
self.stdscr.nodelay(True)
curses.start_color()
self.main_window = MainWindow(self.stdscr)
self.warning_windows = []
def init_colors(self):
curses.use_default_colors()
COLOR_TRANSPARENT = -1
curses.init_pair(Colors.STANDART,
curses.COLOR_WHITE,
COLOR_TRANSPARENT)
curses.init_pair(Colors.HEALTH_POINTS,
curses.COLOR_RED,
COLOR_TRANSPARENT)
curses.init_pair(Colors.POWER_POINTS,
curses.COLOR_BLUE,
COLOR_TRANSPARENT)
curses.init_pair(Colors.ATTENTION,
curses.COLOR_WHITE,
curses.COLOR_RED)
curses.init_pair(Colors.MONEY,
curses.COLOR_YELLOW,
COLOR_TRANSPARENT)
curses.init_pair(Colors.HEALING,
curses.COLOR_GREEN,
COLOR_TRANSPARENT)
def post_warning(self, warning_message):
if self.quiet:
return
if self.notification_command:
os.system(self.notification_command.format(warning_message)) # FIXME: Highly insecure!
self.warning_windows.append(WarningWindow(self.stdscr, warning_message))
def remove_warning(self):
if len(self.warning_windows) != 0:
del self.warning_windows[-1]
self.main_window.update(self.state)
def handle_expired_session(self):
if self.autorefresh:
if self.expired_on_start:
self.expired_on_start = False
if self.open_browser_on_start:
self.open_browser()
else:
self.refresh_session()
else:
self.refresh_session()
else:
self.post_warning(tr('Session is expired. Please reconnect.'))
def init_status_checkers(self):
self.rules.append(Rule(
lambda info: 'expired' in info and info['expired'],
self.handle_expired_session
))
for custom_rule in CUSTOM_RULES:
action = custom_rule(None)
if isinstance(action, str) or isinstance(action, unicode):
# Trick to bind message text at the creation time, not call time.
action = lambda action=action: self.post_warning(action)
self.rules.append(Rule(custom_rule, action))
def read_state(self):
logging.debug('%s: reading state',
self.read_state.__name__)
state = None
try:
if self.dump_file != None:
state = self.read_dump(self.dump_file)
else:
state = load_hero_state(self.godname, self.token)
self.error = None
except urllib.error.URLError as e:
logging.error('%s: reading state error \n %s',
self.read_state.__name__,
str(e))
self.post_warning(tr('Connection error: {0}').format(e))
if self.prev_state is None:
print(tr('Error occured, please see the pygod.log'))
sys.exit(1)
state = self.prev_state
self.error = str(e)
except Exception as e:
logging.error('%s: reading state error \n %s %s %s',
self.read_state.__name__,
str(type(e)), repr(e), str(e))
print(tr('Error occured, please see the pygod.log'))
sys.exit(1)
if 'token_expired' in state:
self.post_warning(tr('Token is expired.\n'
'Visit user profile page to generate a new one:\n'
'https://godville.net/user/profile'
))
self.prev_state = state
return state
def read_dump(self, dumpfile):
state = None
try:
state = load_hero_state(self.godname, filename=dumpfile)
except IOError:
logging.error('%s: Error reading file %s',
self.read_dump.__name__,
dumpfile)
return state
def handle_key(self):
try:
key = self.stdscr.getkey()
if key in self.controls:
self.controls[key]()
except curses.error as e:
if not 'no input' in e.args:
raise
def quit(self):
sys.exit(0)
def open_browser(self):
subprocess.Popen("{0} http://godville.net/superhero".format(self.browser), shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) # FIXME also unsafe!
def refresh_session(self):
if self.refresh_command:
subprocess.Popen(self.refresh_command, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) # FIXME also unsafe!
def check_status(self, state):
for rule in self.rules:
rule.check(state)
def main_loop(self):
UPDATE_INTERVAL = 61
last_update_time = time.time()
self.state = self.read_state()
if self.error:
self.state['error'] = self.error
self.expired_on_start = 'expired' in self.state and self.state['expired']
self.check_status(self.state)
self.main_window.update(self.state)
while(True):
if last_update_time + UPDATE_INTERVAL < time.time():
last_update_time = time.time()
self.state = self.read_state()
self.check_status(self.state)
self.main_window.update(self.state)
if len(self.warning_windows) != 0:
self.warning_windows[-1].update({})
self.handle_key()
time.sleep(0.1)
def main():
# Parsing arguments
parser = argparse.ArgumentParser()
parser.add_argument('god_name', nargs='?',
help = 'Name of the god to me monitored. Overrides value from config file.')
parser.add_argument('-c',
'--config',
type = str,
help = 'loads config file (default location is XDG_CONFIG_HOME/pygod/pygod.ini')
parser.add_argument('-s',
'--state',
type = str,
help = 'read state from the dump file (debug option)')
parser.add_argument('-o',
'--open-browser',
action='store_true', dest='open_browser_on_start',
help = 'opens browser link on start instead of refresh command if session is expired')
parser.add_argument('-d',
'--dump',
action = 'store_true',
help = 'dump state to file and exit (debug option)')
parser.add_argument('-q',
'--quiet',
action = 'store_true',
default=False,
help = 'do not show notifications')
parser.add_argument('-D',
'--debug',
action = 'store_true',
help = 'enable debug logs')
args = parser.parse_args()
# Config.
config_files = [utils.get_config_file(), os.path.join(utils.get_data_dir(), "auth.cfg")]
if args.config:
config_files.append(args.config)
settings = configparser.SafeConfigParser()
settings.read(config_files)
if args.god_name is None:
if 'auth' in settings and 'god_name' in settings['auth']:
args.god_name = utils.unquote_string(settings.get('auth', 'god_name'))
elif 'main' in settings and 'god_name' in settings['main']:
args.god_name = utils.unquote_string(settings.get('main', 'god_name'))
args.notification_command = None
if 'main' in settings and 'notification_command' in settings['main']:
args.notification_command = utils.unquote_string(settings.get('main', 'notification_command'))
args.browser = None
if 'main' in settings and 'browser' in settings['main']:
args.browser = utils.unquote_string(settings.get('main', 'browser'))
args.autorefresh = False
if 'main' in settings and 'autorefresh' in settings['main']:
args.autorefresh = utils.unquote_string(settings.get('main', 'autorefresh')).lower() == "true"
args.refresh_command = None
if 'main' in settings and 'refresh_command' in settings['main']:
args.refresh_command = utils.unquote_string(settings.get('main', 'refresh_command'))
args.token = None
if 'auth' in settings and 'token' in settings['auth']:
args.token = utils.unquote_string(settings.get('auth', 'token'))
# Configuring logs
log_level = logging.WARNING
if (args.debug):
log_level = logging.DEBUG
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
filename=os.path.join(utils.get_log_dir(), 'pygod.log'),
filemode='a+',
level=log_level)
if args.god_name is None:
print(tr('God name must be specified either via command line or using config file!'))
sys.exit(1)
logging.debug('Starting PyGod with username %s', args.god_name)
if args.dump:
state = load_hero_state(args.god_name, args.token, filename=args.state)
prettified_state = json.dumps(state, indent=4, ensure_ascii=False)
dump_file = '{0}.json'.format(args.god_name)
with open(dump_file, 'wb') as f:
f.write(prettified_state.encode('utf-8'))
print(tr('Dumped current state to {0}.'.format(dump_file)))
else:
try:
monitor = Monitor(args)
monitor.main_loop()
finally:
monitor.finalize()
if __name__ == '__main__':
main()
|
aserebryakov/godville-monitor-console
|
pygod.py
|
Python
|
gpl-2.0
| 14,394
|
[
"VisIt"
] |
80bf59c919ed9c8a6ca270bd3d59584149fb7c8ef70d49ef0d38c765e9289c11
|
import numpy as np
import pandas as pd
from statsmodels.sandbox.stats.multicomp import multipletests
import regreg.api as rr
from ...api import (randomization,
glm_group_lasso,
multiple_queries)
from ...tests.instance import (gaussian_instance,
logistic_instance)
from ...tests.flags import SMALL_SAMPLES, SET_SEED
from ...tests.decorators import (wait_for_return_value,
set_seed_iftrue,
set_sampling_params_iftrue)
from ..query import naive_confidence_intervals, naive_pvalues
from ..M_estimator import restricted_Mest
from ..cv_view import CV_view
from ..glm import (glm_nonparametric_bootstrap,
pairs_bootstrap_glm)
if SMALL_SAMPLES:
nboot = 10
else:
nboot = -1
@set_seed_iftrue(SET_SEED)
@set_sampling_params_iftrue(SMALL_SAMPLES, burnin=10, ndraw=10)
@wait_for_return_value()
def test_cv(n=100, p=50, s=5, signal=7.5, K=5, rho=0.,
randomizer = 'gaussian',
randomizer_scale = 1.,
scale1 = 0.1,
scale2 = 0.2,
lam_frac = 1.,
glmnet = True,
loss = 'gaussian',
bootstrap = False,
condition_on_CVR = True,
marginalize_subgrad = True,
ndraw = 10000,
burnin = 2000,
nboot = nboot):
print(n,p,s, condition_on_CVR, scale1, scale2)
if randomizer == 'laplace':
randomizer = randomization.laplace((p,), scale=randomizer_scale)
elif randomizer == 'gaussian':
randomizer = randomization.isotropic_gaussian((p,),randomizer_scale)
elif randomizer == 'logistic':
randomizer = randomization.logistic((p,), scale=randomizer_scale)
if loss == "gaussian":
X, y, beta, nonzero, sigma = gaussian_instance(n=n, p=p, s=s, rho=rho, signal=signal, sigma=1)
glm_loss = rr.glm.gaussian(X, y)
elif loss == "logistic":
X, y, beta, _ = logistic_instance(n=n, p=p, s=s, rho=rho, signal=signal)
glm_loss = rr.glm.logistic(X, y)
epsilon = 1./np.sqrt(n)
# view 1
cv = CV_view(glm_loss,
loss_label=loss,
lasso_randomization=randomizer,
epsilon=epsilon,
scale1=scale1,
scale2=scale2)
if glmnet:
try:
cv.solve(glmnet=glmnet)
except ImportError:
cv.solve(glmnet=False)
else:
cv.solve(glmnet=False)
# for the test make sure we also run the python code
cv_py = CV_view(glm_loss,
loss_label=loss,
lasso_randomization=randomizer,
epsilon=epsilon,
scale1=scale1,
scale2=scale2)
cv_py.solve(glmnet=False)
lam = cv.lam_CVR
print("lam", lam)
if condition_on_CVR:
cv.condition_on_opt_state()
lam = cv.one_SD_rule(direction="up")
print("new lam", lam)
# non-randomized Lasso, just looking how many vars it selects
problem = rr.simple_problem(glm_loss, rr.l1norm(p, lagrange=lam))
beta_hat = problem.solve()
active_hat = beta_hat !=0
print("non-randomized lasso ", active_hat.sum())
# view 2
W = lam_frac * np.ones(p) * lam
penalty = rr.group_lasso(np.arange(p),
weights=dict(zip(np.arange(p), W)), lagrange=1.)
M_est = glm_group_lasso(glm_loss, epsilon, penalty, randomizer)
if nboot > 0:
cv.nboot = M_est.nboot = nboot
mv = multiple_queries([cv, M_est])
mv.solve()
active_union = M_est._overall
nactive = np.sum(active_union)
print("nactive", nactive)
if nactive==0:
return None
nonzero = np.where(beta)[0]
if set(nonzero).issubset(np.nonzero(active_union)[0]):
active_set = np.nonzero(active_union)[0]
true_vec = beta[active_union]
if marginalize_subgrad == True:
M_est.decompose_subgradient(conditioning_groups=np.zeros(p, bool),
marginalizing_groups=np.ones(p, bool))
selected_features = np.zeros(p, np.bool)
selected_features[active_set] = True
unpenalized_mle = restricted_Mest(M_est.loss, selected_features)
form_covariances = glm_nonparametric_bootstrap(n, n)
target_info, target_observed = pairs_bootstrap_glm(M_est.loss, selected_features, inactive=None)
cov_info = M_est.setup_sampler()
target_cov, score_cov = form_covariances(target_info,
cross_terms=[cov_info],
nsample=M_est.nboot)
opt_sample = M_est.sampler.sample(ndraw,
burnin)
pvalues = M_est.sampler.coefficient_pvalues(unpenalized_mle,
target_cov,
score_cov,
parameter=np.zeros(selected_features.sum()),
sample=opt_sample)
intervals = M_est.sampler.confidence_intervals(unpenalized_mle, target_cov, score_cov, sample=opt_sample)
L, U = intervals.T
sel_covered = np.zeros(nactive, np.bool)
sel_length = np.zeros(nactive)
LU_naive = naive_confidence_intervals(np.diag(target_cov), target_observed)
naive_covered = np.zeros(nactive, np.bool)
naive_length = np.zeros(nactive)
naive_pvals = naive_pvalues(np.diag(target_cov), target_observed, true_vec)
active_var = np.zeros(nactive, np.bool)
for j in range(nactive):
if (L[j] <= true_vec[j]) and (U[j] >= true_vec[j]):
sel_covered[j] = 1
if (LU_naive[j, 0] <= true_vec[j]) and (LU_naive[j, 1] >= true_vec[j]):
naive_covered[j] = 1
sel_length[j] = U[j]-L[j]
naive_length[j] = LU_naive[j,1]-LU_naive[j,0]
active_var[j] = active_set[j] in nonzero
q = 0.2
BH_desicions = multipletests(pvalues, alpha=q, method="fdr_bh")[0]
return sel_covered, sel_length, naive_pvals, naive_covered, naive_length, active_var, BH_desicions, active_var
|
selective-inference/selective-inference
|
selectinf/randomized/tests/sandbox/test_cv.py
|
Python
|
bsd-3-clause
| 6,370
|
[
"Gaussian"
] |
bd37fcf32b60f737b5af2fc63c2d9bd367a12daa3b39f671e28b2f596db0ba17
|
from __future__ import absolute_import, division, print_function
import sys, os
import argparse
import time
import warnings
import numpy as np
from scipy.constants import speed_of_light
from scipy.stats import cauchy
from astropy.table import Table,Column
import astropy.io.fits as pyfits
import multiprocessing
import healpy
from desiutil.log import get_logger
from desispec.io.util import write_bintable
from desispec.io.fibermap import read_fibermap
from desisim.simexp import reference_conditions
from desisim.templates import SIMQSO, QSO
from desisim.scripts.quickspectra import sim_spectra
from desisim.lya_spectra import read_lya_skewers , apply_lya_transmission, apply_metals_transmission, lambda_RF_LYA
from desisim.dla import dla_spec,insert_dlas
from desisim.bal import BAL
from desisim.io import empty_metatable
from desisim.eboss import FootprintEBOSS, sdss_subsample, RedshiftDistributionEBOSS, sdss_subsample_redshift
from desispec.interpolation import resample_flux
from desimodel.io import load_pixweight
from desimodel import footprint
from speclite import filters
from desitarget.cuts import isQSO_colors
from desiutil.dust import SFDMap, ext_odonnell
try:
c = speed_of_light/1000. #- km/s
except TypeError:
#
# This can happen in documentation builds.
#
c = 299792458.0/1000.0
def parse(options=None):
parser=argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Fast simulation of QSO Lya spectra into the final DESI format\
(Spectra class) that can be directly used as an input to the redshift fitter\
(redrock) or correlation function code (picca). The input file is a Lya\
transmission skewer fits file which format is described in\
https://desi.lbl.gov/trac/wiki/LymanAlphaWG/LyaSpecSim.")
#- Required
parser.add_argument('-i','--infile', type=str, nargs= "*", required=True, help="Input skewer healpix fits file(s)")
parser.add_argument('-o','--outfile', type=str, required=False, help="Output spectra (only used if single input file)")
parser.add_argument('--outdir', type=str, default=".", required=False, help="Output directory")
#- Optional observing conditions to override program defaults
parser.add_argument('--program', type=str, default="DARK", help="Program (DARK, GRAY or BRIGHT)")
parser.add_argument('--seeing', type=float, default=None, help="Seeing FWHM [arcsec]")
parser.add_argument('--airmass', type=float, default=None, help="Airmass")
parser.add_argument('--exptime', type=float, default=None, help="Exposure time [sec]")
parser.add_argument('--moonfrac', type=float, default=None, help="Moon illumination fraction; 1=full")
parser.add_argument('--moonalt', type=float, default=None, help="Moon altitude [degrees]")
parser.add_argument('--moonsep', type=float, default=None, help="Moon separation to tile [degrees]")
parser.add_argument('--seed', type=int, default=None, required = False, help="Global random seed (will be used to generate a seed per each file")
parser.add_argument('--skyerr', type=float, default=0.0, help="Fractional sky subtraction error")
parser.add_argument('--downsampling', type=float, default=None,help="fractional random down-sampling (value between 0 and 1)")
parser.add_argument('--zmin', type=float, default=0, help="Min redshift")
parser.add_argument('--zmax', type=float, default=10, help="Max redshift")
parser.add_argument('--wmin', type=float, default=3500, help="Min wavelength (obs. frame)")
parser.add_argument('--wmax', type=float, default=10000, help="Max wavelength (obs. frame)")
parser.add_argument('--dwave', type=float, default=0.2, help="Internal wavelength step (don't change this)")
parser.add_argument('--dwave_desi', type=float, default=0.8, help="Output wavelength step for DESI mocks)")
parser.add_argument('--zbest', action = "store_true", help="add a zbest file per spectrum either with the truth\
redshift or adding some error (optionally use it with --sigma_kms_fog and/or --gamma_kms_zfit)")
parser.add_argument('--sigma_kms_fog',type=float,default=150, help="Adds a gaussian error to the quasar \
redshift that simulate the fingers of god effect")
parser.add_argument('--gamma_kms_zfit',nargs='?',type=float,const=400,help="Adds a Lorentzian distributed shift\
to the quasar redshift, to simulate the redshift fitting step. E.g. --gamma_kms_zfit 400 will use a gamma \
parameter of 400 km/s . If a number is not specified, a value of 400 is used.")
parser.add_argument('--shift_kms_los',type=float,default=0,help="Adds a shift to the quasar redshift written in\
the zbest file (in km/s)")
parser.add_argument('--target-selection', action = "store_true" ,help="apply QSO target selection cuts to the simulated quasars")
parser.add_argument('--mags', action = "store_true", help="DEPRECATED; use --bbflux")
parser.add_argument('--bbflux', action = "store_true", help="compute and write the QSO broad-band fluxes in the fibermap")
parser.add_argument('--add-LYB', action='store_true', help = "Add LYB absorption from transmision file")
parser.add_argument('--metals', type=str, default=None, required=False, help = "list of metals to get the\
transmission from, if 'all' runs on all metals", nargs='*')
#parser.add_argument('--metals-from-file', action = 'store_true', help = "add metals from HDU in file")
parser.add_argument('--metals-from-file',type=str,const='all',help = "list of metals,'SI1260,SI1207' etc, to get from HDUs in file. \
Use 'all' or no argument for mock version < 7.3 or final metal runs. ",nargs='?')
parser.add_argument('--dla',type=str,required=False, help="Add DLA to simulated spectra either randonmly\
(--dla random) or from transmision file (--dla file)")
parser.add_argument('--balprob',type=float,required=False, help="To add BAL features with the specified probability\
(e.g --balprob 0.5). Expect a number between 0 and 1 ")
parser.add_argument('--no-simqso',action = "store_true", help="Does not use desisim.templates.SIMQSO\
to generate templates, and uses desisim.templates.QSO instead.")
parser.add_argument('--save-continuum',action = "store_true", help="Save true continum to file")
parser.add_argument('--save-continuum-dwave',type=float, default=2, help="Delta wavelength to save true continum")
parser.add_argument('--desi-footprint', action = "store_true" ,help="select QSOs in DESI footprint")
parser.add_argument('--eboss',action = 'store_true', help='Setup footprint, number density, redshift distribution,\
and exposure time to generate eBOSS-like mocks')
parser.add_argument('--extinction',action='store_true',help='Adds Galactic extinction')
parser.add_argument('--no-transmission',action = 'store_true', help='Do not multiply continuum\
by transmission, use F=1 everywhere')
parser.add_argument('--nproc', type=int, default=1,help="number of processors to run faster")
parser.add_argument('--overwrite', action = "store_true" ,help="rerun if spectra exists (default is skip)")
parser.add_argument('--nmax', type=int, default=None, help="Max number of QSO per input file, for debugging")
parser.add_argument('--save-resolution',action='store_true', help="Save full resolution in spectra file. By default only one matrix is saved in the truth file.")
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def mod_cauchy(loc,scale,size,cut):
samples=cauchy.rvs(loc=loc,scale=scale,size=3*size)
samples=samples[abs(samples)<cut]
if len(samples)>=size:
samples=samples[:size]
else:
samples=mod_cauchy(loc,scale,size,cut) ##Only added for the very unlikely case that there are not enough samples after the cut.
return samples
def get_spectra_filename(args,nside,pixel):
if args.outfile :
return args.outfile
filename="{}/{}/spectra-{}-{}.fits".format(pixel//100,pixel,nside,pixel)
return os.path.join(args.outdir,filename)
def get_zbest_filename(args,pixdir,nside,pixel):
if args.zbest :
return os.path.join(pixdir,"zbest-{}-{}.fits".format(nside,pixel))
return None
def get_truth_filename(args,pixdir,nside,pixel):
return os.path.join(pixdir,"truth-{}-{}.fits".format(nside,pixel))
def is_south(dec):
"""Identify which QSOs are in the south vs the north, since these are on
different photometric systems. See
https://github.com/desihub/desitarget/issues/353 for details.
"""
return dec <= 32.125 # constant-declination cut!
def get_healpix_info(ifilename):
"""Read the header of the tranmission file to find the healpix pixel, nside
and if we are lucky the scheme. If it fails, try to guess it from the
filename (for backward compatibility).
Args:
ifilename: full path to input transmission file
Returns:
healpix: HEALPix pixel corresponding to the file
nside: HEALPix nside value
hpxnest: Whether HEALPix scheme in the file was nested
"""
log = get_logger()
print('ifilename',ifilename)
healpix=-1
nside=-1
hpxnest=True
hdulist=pyfits.open(ifilename)
if "METADATA" in hdulist :
head=hdulist["METADATA"].header
for k in ["HPXPIXEL","PIXNUM"] :
if k in head :
healpix=int(head[k])
log.info("healpix={}={}".format(k,healpix))
break
for k in ["HPXNSIDE","NSIDE"] :
if k in head :
nside=int(head[k])
log.info("nside={}={}".format(k,nside))
break
for k in ["HPXNEST","NESTED","SCHEME"] :
if k in head :
if k == "SCHEME" :
hpxnest=(head[k]=="NEST")
else :
hpxnest=bool(head[k])
log.info("hpxnest from {} = {}".format(k,hpxnest))
break
hdulist.close()
if healpix >= 0 and nside < 0 :
log.error("Read healpix in header but not nside.")
raise ValueError("Read healpix in header but not nside.")
if healpix < 0 :
vals = os.path.basename(ifilename).split(".")[0].split("-")
if len(vals)<3 :
error_msg="Could not guess healpix info from {}".format(ifilename)
log.error(error_msg)
raise ValueError(error_msg)
try :
healpix=int(vals[-1])
nside=int(vals[-2])
except ValueError:
error_msg="Could not guess healpix info from {}".format(ifilename)
log.error(error_msg)
raise ValueError(error_msg)
log.warning("Guessed healpix and nside from filename, assuming the healpix scheme is 'NESTED'")
return healpix, nside, hpxnest
def get_pixel_seed(pixel, nside, global_seed):
if global_seed is None:
# return a random seed
return np.random.randint(2**32, size=1)[0]
npix=healpy.nside2npix(nside)
np.random.seed(global_seed)
seeds = np.unique(np.random.randint(2**32, size=10*npix))[:npix]
pixel_seed = seeds[pixel]
return pixel_seed
def simulate_one_healpix(ifilename,args,model,obsconditions,decam_and_wise_filters,
bassmzls_and_wise_filters,footprint_healpix_weight,
footprint_healpix_nside,
bal=None,sfdmap=None,eboss=None) :
log = get_logger()
# open filename and extract basic HEALPix information
pixel, nside, hpxnest = get_healpix_info(ifilename)
# using global seed (could be None) get seed for this particular pixel
global_seed = args.seed
seed = get_pixel_seed(pixel, nside, global_seed)
# use this seed to generate future random numbers
np.random.seed(seed)
# get output file (we will write there spectra for this HEALPix pixel)
ofilename = get_spectra_filename(args,nside,pixel)
# get directory name (we will also write there zbest file)
pixdir = os.path.dirname(ofilename)
# get filename for truth file
truth_filename = get_truth_filename(args,pixdir,nside,pixel)
# get filename for zbest file
zbest_filename = get_zbest_filename(args,pixdir,nside,pixel)
if not args.overwrite :
# check whether output exists or not
if args.zbest :
if os.path.isfile(ofilename) and os.path.isfile(zbest_filename) :
log.info("skip existing {} and {}".format(ofilename,zbest_filename))
return
else : # only test spectra file
if os.path.isfile(ofilename) :
log.info("skip existing {}".format(ofilename))
return
# create sub-directories if required
if len(pixdir)>0 :
if not os.path.isdir(pixdir) :
log.info("Creating dir {}".format(pixdir))
os.makedirs(pixdir)
if not eboss is None:
dwave_out = None
else:
dwave_out = args.dwave_desi
log.info("Read skewers in {}, random seed = {}".format(ifilename,seed))
# Read transmission from files. It might include DLA information, and it
# might add metal transmission as well (from the HDU file).
log.info("Read transmission file {}".format(ifilename))
trans_wave, transmission, metadata, dla_info = read_lya_skewers(ifilename,read_dlas=(args.dla=='file'),add_metals=args.metals_from_file,add_lyb=args.add_LYB)
### Add Finger-of-God, before generate the continua
log.info("Add FOG to redshift with sigma {} to quasar redshift".format(args.sigma_kms_fog))
DZ_FOG = args.sigma_kms_fog/c*(1.+metadata['Z'])*np.random.normal(0,1,metadata['Z'].size)
metadata['Z'] += DZ_FOG
### Select quasar within a given redshift range
w = (metadata['Z']>=args.zmin) & (metadata['Z']<=args.zmax)
transmission = transmission[w]
metadata = metadata[:][w]
DZ_FOG = DZ_FOG[w]
# option to make for BOSS+eBOSS
if not eboss is None:
if args.downsampling or args.desi_footprint:
raise ValueError("eboss option can not be run with "
+"desi_footprint or downsampling")
# Get the redshift distribution from SDSS
selection = sdss_subsample_redshift(metadata["RA"],metadata["DEC"],metadata['Z'],eboss['redshift'])
log.info("Select QSOs in BOSS+eBOSS redshift distribution {} -> {}".format(metadata['Z'].size,selection.sum()))
if selection.sum()==0:
log.warning("No intersection with BOSS+eBOSS redshift distribution")
return
transmission = transmission[selection]
metadata = metadata[:][selection]
DZ_FOG = DZ_FOG[selection]
# figure out the density of all quasars
N_highz = metadata['Z'].size
# area of healpix pixel, in degrees
area_deg2 = healpy.pixelfunc.nside2pixarea(nside,degrees=True)
input_highz_dens_deg2 = N_highz/area_deg2
selection = sdss_subsample(metadata["RA"], metadata["DEC"],
input_highz_dens_deg2,eboss['footprint'])
log.info("Select QSOs in BOSS+eBOSS footprint {} -> {}".format(transmission.shape[0],selection.size))
if selection.size == 0 :
log.warning("No intersection with BOSS+eBOSS footprint")
return
transmission = transmission[selection]
metadata = metadata[:][selection]
DZ_FOG = DZ_FOG[selection]
if args.desi_footprint :
footprint_healpix = footprint.radec2pix(footprint_healpix_nside, metadata["RA"], metadata["DEC"])
selection = np.where(footprint_healpix_weight[footprint_healpix]>0.99)[0]
log.info("Select QSOs in DESI footprint {} -> {}".format(transmission.shape[0],selection.size))
if selection.size == 0 :
log.warning("No intersection with DESI footprint")
return
transmission = transmission[selection]
metadata = metadata[:][selection]
DZ_FOG = DZ_FOG[selection]
nqso=transmission.shape[0]
if args.downsampling is not None :
if args.downsampling <= 0 or args.downsampling > 1 :
log.error("Down sampling fraction={} must be between 0 and 1".format(args.downsampling))
raise ValueError("Down sampling fraction={} must be between 0 and 1".format(args.downsampling))
indices = np.where(np.random.uniform(size=nqso)<args.downsampling)[0]
if indices.size == 0 :
log.warning("Down sampling from {} to 0 (by chance I presume)".format(nqso))
return
transmission = transmission[indices]
metadata = metadata[:][indices]
DZ_FOG = DZ_FOG[indices]
nqso = transmission.shape[0]
if args.nmax is not None :
if args.nmax < nqso :
log.info("Limit number of QSOs from {} to nmax={} (random subsample)".format(nqso,args.nmax))
# take a random subsample
indices = np.random.choice(np.arange(nqso),args.nmax,replace=False)
transmission = transmission[indices]
metadata = metadata[:][indices]
DZ_FOG = DZ_FOG[indices]
nqso = args.nmax
# In previous versions of the London mocks we needed to enforce F=1 for
# z > z_qso here, but this is not needed anymore. Moreover, now we also
# have metal absorption that implies F < 1 for z > z_qso
#for ii in range(len(metadata)):
# transmission[ii][trans_wave>lambda_RF_LYA*(metadata[ii]['Z']+1)]=1.0
# if requested, add DLA to the transmission skewers
if args.dla is not None :
# if adding random DLAs, we will need a new random generator
if args.dla=='random':
log.info('Adding DLAs randomly')
random_state_just_for_dlas = np.random.RandomState(seed)
elif args.dla=='file':
log.info('Adding DLAs from transmission file')
else:
log.error("Wrong option for args.dla: "+args.dla)
sys.exit(1)
# if adding DLAs, the information will be printed here
dla_filename=os.path.join(pixdir,"dla-{}-{}.fits".format(nside,pixel))
dla_NHI, dla_z, dla_qid,dla_id = [], [], [],[]
# identify minimum Lya redshift in transmission files
min_lya_z = np.min(trans_wave/lambda_RF_LYA - 1)
# loop over quasars in pixel
for ii in range(len(metadata)):
# quasars with z < min_z will not have any DLA in spectrum
if min_lya_z>metadata['Z'][ii]: continue
# quasar ID
idd=metadata['MOCKID'][ii]
dlas=[]
if args.dla=='file':
for dla in dla_info[dla_info['MOCKID']==idd]:
# Adding only DLAs with z < zqso
if dla['Z_DLA_RSD']>=metadata['Z'][ii]: continue
dlas.append(dict(z=dla['Z_DLA_RSD'],N=dla['N_HI_DLA'],dlaid=dla['DLAID']))
transmission_dla = dla_spec(trans_wave,dlas)
elif args.dla=='random':
dlas, transmission_dla = insert_dlas(trans_wave, metadata['Z'][ii], rstate=random_state_just_for_dlas)
for idla in dlas:
idla['dlaid']+=idd*1000 #Added to have unique DLA ids. Same format as DLAs from file.
# multiply transmissions and store information for the DLA file
if len(dlas)>0:
transmission[ii] = transmission_dla * transmission[ii]
dla_z += [idla['z'] for idla in dlas]
dla_NHI += [idla['N'] for idla in dlas]
dla_id += [idla['dlaid'] for idla in dlas]
dla_qid += [idd]*len(dlas)
log.info('Added {} DLAs'.format(len(dla_id)))
# write file with DLA information
if len(dla_id)>0:
dla_meta=Table()
dla_meta['NHI'] = dla_NHI
dla_meta['Z_DLA'] = dla_z #This is Z_DLA_RSD in transmision.
dla_meta['TARGETID']=dla_qid
dla_meta['DLAID'] = dla_id
hdu_dla = pyfits.convenience.table_to_hdu(dla_meta)
hdu_dla.name="DLA_META"
del(dla_meta)
log.info("DLA metadata to be saved in {}".format(truth_filename))
else:
hdu_dla=pyfits.PrimaryHDU()
hdu_dla.name="DLA_META"
# if requested, extend transmission skewers to cover full spectrum
if args.target_selection or args.bbflux :
wanted_min_wave = 3329. # needed to compute magnitudes for decam2014-r (one could have trimmed the transmission file ...)
wanted_max_wave = 55501. # needed to compute magnitudes for wise2010-W2
if trans_wave[0]>wanted_min_wave :
log.info("Increase wavelength range from {}:{} to {}:{} to compute magnitudes".format(int(trans_wave[0]),int(trans_wave[-1]),int(wanted_min_wave),int(trans_wave[-1])))
# pad with ones at short wavelength, we assume F = 1 for z <~ 1.7
# we don't need any wavelength resolution here
new_trans_wave = np.append([wanted_min_wave,trans_wave[0]-0.01],trans_wave)
new_transmission = np.ones((transmission.shape[0],new_trans_wave.size))
new_transmission[:,2:] = transmission
trans_wave = new_trans_wave
transmission = new_transmission
if trans_wave[-1]<wanted_max_wave :
log.info("Increase wavelength range from {}:{} to {}:{} to compute magnitudes".format(int(trans_wave[0]),int(trans_wave[-1]),int(trans_wave[0]),int(wanted_max_wave)))
# pad with ones at long wavelength because we assume F = 1
coarse_dwave = 2. # we don't care about resolution, we just need a decent QSO spectrum, there is no IGM transmission in this range
n = int((wanted_max_wave-trans_wave[-1])/coarse_dwave)+1
new_trans_wave = np.append(trans_wave,np.linspace(trans_wave[-1]+coarse_dwave,trans_wave[-1]+coarse_dwave*(n+1),n))
new_transmission = np.ones((transmission.shape[0],new_trans_wave.size))
new_transmission[:,:trans_wave.size] = transmission
trans_wave = new_trans_wave
transmission = new_transmission
# whether to use QSO or SIMQSO to generate quasar continua. Simulate
# spectra in the north vs south separately because they're on different
# photometric systems.
south = np.where( is_south(metadata['DEC']) )[0]
north = np.where( ~is_south(metadata['DEC']) )[0]
meta, qsometa = empty_metatable(nqso, objtype='QSO', simqso=not args.no_simqso)
if args.no_simqso:
log.info("Simulate {} QSOs with QSO templates".format(nqso))
tmp_qso_flux = np.zeros([nqso, len(model.eigenwave)], dtype='f4')
tmp_qso_wave = np.zeros_like(tmp_qso_flux)
else:
log.info("Simulate {} QSOs with SIMQSO templates".format(nqso))
tmp_qso_flux = np.zeros([nqso, len(model.basewave)], dtype='f4')
tmp_qso_wave = model.basewave
for these, issouth in zip( (north, south), (False, True) ):
# number of quasars in these
nt = len(these)
if nt<=0: continue
if not eboss is None:
# for eBOSS, generate only quasars with r<22
magrange = (17.0, 21.3)
_tmp_qso_flux, _tmp_qso_wave, _meta, _qsometa \
= model.make_templates(nmodel=nt,
redshift=metadata['Z'][these], magrange=magrange,
lyaforest=False, nocolorcuts=True,
noresample=True, seed=seed, south=issouth)
else:
_tmp_qso_flux, _tmp_qso_wave, _meta, _qsometa \
= model.make_templates(nmodel=nt,
redshift=metadata['Z'][these],
lyaforest=False, nocolorcuts=True,
noresample=True, seed=seed, south=issouth)
_meta['TARGETID'] = metadata['MOCKID'][these]
_qsometa['TARGETID'] = metadata['MOCKID'][these]
meta[these] = _meta
qsometa[these] = _qsometa
tmp_qso_flux[these, :] = _tmp_qso_flux
if args.no_simqso:
tmp_qso_wave[these, :] = _tmp_qso_wave
log.info("Resample to transmission wavelength grid")
qso_flux=np.zeros((tmp_qso_flux.shape[0],trans_wave.size))
if args.no_simqso:
for q in range(tmp_qso_flux.shape[0]) :
qso_flux[q]=np.interp(trans_wave,tmp_qso_wave[q],tmp_qso_flux[q])
else:
for q in range(tmp_qso_flux.shape[0]) :
qso_flux[q]=np.interp(trans_wave,tmp_qso_wave,tmp_qso_flux[q])
tmp_qso_flux = qso_flux
tmp_qso_wave = trans_wave
if args.save_continuum :
true_wave=np.linspace(args.wmin,args.wmax,int((args.wmax-args.wmin)/args.save_continuum_dwave)+1)
true_flux=np.zeros((tmp_qso_flux.shape[0],true_wave.size))
for q in range(tmp_qso_flux.shape[0]) :
true_flux[q]=resample_flux(true_wave,tmp_qso_wave,tmp_qso_flux[q])
continum_meta=Table()
continum_meta['TARGETID'] = qsometa['TARGETID']
continum_meta['TRUE_CONT'] = true_flux
hdu_trueCont = pyfits.convenience.table_to_hdu(continum_meta)
hdu_trueCont.name = "TRUE_CONT"
hdu_trueCont.header['wmin'] = args.wmin
hdu_trueCont.header['wmax'] = args.wmax
hdu_trueCont.header['dwave'] = args.save_continuum_dwave
del(continum_meta,true_wave,true_flux)
log.info("True continum to be saved in {}".format(truth_filename))
# if requested, add BAL features to the quasar continua
if args.balprob:
if args.balprob <= 1. and args.balprob > 0:
from desisim.io import find_basis_template
log.info("Adding BALs with probability {}".format(args.balprob))
# save current random state
rnd_state = np.random.get_state()
tmp_qso_flux,meta_bal = bal.insert_bals(tmp_qso_wave, tmp_qso_flux, metadata['Z'],
balprob= args.balprob, seed=seed, qsoid=metadata['MOCKID'])
# restore random state to get the same random numbers later
# as when we don't insert BALs
np.random.set_state(rnd_state)
w = np.in1d(qsometa['TARGETID'], meta_bal['TARGETID'])
qsometa['BAL_TEMPLATEID'][w] = meta_bal['BAL_TEMPLATEID']
hdu_bal=pyfits.convenience.table_to_hdu(meta_bal); hdu_bal.name="BAL_META"
#Trim to only show the version, assuming it is located in os.environ['DESI_BASIS_TEMPLATES']
hdu_bal.header["BALTEMPL"]=find_basis_template(objtype='BAL').split('basis_templates/')[1]
del meta_bal
else:
balstr=str(args.balprob)
log.error("BAL probability is not between 0 and 1 : "+balstr)
sys.exit(1)
# Multiply quasar continua by transmitted flux fraction
# (at this point transmission file might include Ly-beta, metals and DLAs)
log.info("Apply transmitted flux fraction")
if not args.no_transmission:
tmp_qso_flux = apply_lya_transmission(tmp_qso_wave,tmp_qso_flux,
trans_wave,transmission)
# if requested, compute metal transmission on the fly
# (if not included already from the transmission file)
if args.metals is not None:
if args.metals_from_file :
log.error('you cannot add metals twice')
raise ValueError('you cannot add metals twice')
if args.no_transmission:
log.error('you cannot add metals if asking for no-transmission')
raise ValueError('can not add metals if using no-transmission')
lstMetals = ''
for m in args.metals: lstMetals += m+', '
log.info("Apply metals: {}".format(lstMetals[:-2]))
tmp_qso_flux = apply_metals_transmission(tmp_qso_wave,tmp_qso_flux,
trans_wave,transmission,args.metals)
# if requested, compute magnitudes and apply target selection. Need to do
# this calculation separately for QSOs in the north vs south.
bbflux=None
if args.target_selection or args.bbflux :
bands=['FLUX_G','FLUX_R','FLUX_Z', 'FLUX_W1', 'FLUX_W2']
bbflux=dict()
bbflux['SOUTH'] = is_south(metadata['DEC'])
for band in bands:
bbflux[band] = np.zeros(nqso)
# need to recompute the magnitudes to account for lya transmission
log.info("Compute QSO magnitudes")
for these, filters in zip( (~bbflux['SOUTH'], bbflux['SOUTH']),
(bassmzls_and_wise_filters, decam_and_wise_filters) ):
if np.count_nonzero(these) > 0:
maggies = filters.get_ab_maggies(1e-17 * tmp_qso_flux[these, :], tmp_qso_wave)
for band, filt in zip( bands, maggies.colnames ):
bbflux[band][these] = np.ma.getdata(1e9 * maggies[filt]) # nanomaggies
if args.target_selection :
log.info("Apply target selection")
isqso = np.ones(nqso, dtype=bool)
for these, issouth in zip( (~bbflux['SOUTH'], bbflux['SOUTH']), (False, True) ):
if np.count_nonzero(these) > 0:
# optical cuts only if using QSO vs SIMQSO
isqso[these] &= isQSO_colors(gflux=bbflux['FLUX_G'][these],
rflux=bbflux['FLUX_R'][these],
zflux=bbflux['FLUX_Z'][these],
w1flux=bbflux['FLUX_W1'][these],
w2flux=bbflux['FLUX_W2'][these],
south=issouth, optical=args.no_simqso)
log.info("Target selection: {}/{} QSOs selected".format(np.sum(isqso),nqso))
selection=np.where(isqso)[0]
if selection.size==0 : return
tmp_qso_flux = tmp_qso_flux[selection]
metadata = metadata[:][selection]
meta = meta[:][selection]
qsometa = qsometa[:][selection]
DZ_FOG = DZ_FOG[selection]
for band in bands :
bbflux[band] = bbflux[band][selection]
bbflux['SOUTH']=bbflux['SOUTH'][selection]
nqso = selection.size
log.info("Resample to a linear wavelength grid (needed by DESI sim.)")
# careful integration of bins, not just a simple interpolation
qso_wave=np.linspace(args.wmin,args.wmax,int((args.wmax-args.wmin)/args.dwave)+1)
qso_flux=np.zeros((tmp_qso_flux.shape[0],qso_wave.size))
for q in range(tmp_qso_flux.shape[0]) :
qso_flux[q]=resample_flux(qso_wave,tmp_qso_wave,tmp_qso_flux[q])
log.info("Simulate DESI observation and write output file")
if "MOCKID" in metadata.dtype.names :
#log.warning("Using MOCKID as TARGETID")
targetid=np.array(metadata["MOCKID"]).astype(int)
elif "ID" in metadata.dtype.names :
log.warning("Using ID as TARGETID")
targetid=np.array(metadata["ID"]).astype(int)
else :
log.warning("No TARGETID")
targetid=None
specmeta={"HPXNSIDE":nside,"HPXPIXEL":pixel, "HPXNEST":hpxnest}
if args.target_selection or args.bbflux :
fibermap_columns = dict(
FLUX_G = bbflux['FLUX_G'],
FLUX_R = bbflux['FLUX_R'],
FLUX_Z = bbflux['FLUX_Z'],
FLUX_W1 = bbflux['FLUX_W1'],
FLUX_W2 = bbflux['FLUX_W2'],
)
photsys = np.full(len(bbflux['FLUX_G']), 'N', dtype='S1')
photsys[bbflux['SOUTH']] = b'S'
fibermap_columns['PHOTSYS'] = photsys
else :
fibermap_columns=None
# Attenuate the spectra for extinction
if not sfdmap is None:
Rv=3.1 #set by default
indx=np.arange(metadata['RA'].size)
extinction =Rv*ext_odonnell(qso_wave)
EBV = sfdmap.ebv(metadata['RA'],metadata['DEC'], scaling=1.0)
qso_flux *=10**( -0.4 * EBV[indx, np.newaxis] * extinction)
if fibermap_columns is not None:
fibermap_columns['EBV']=EBV
EBV0=0.0
EBV_med=np.median(EBV)
Ag = 3.303 * (EBV_med - EBV0)
exptime_fact=np.power(10.0, (2.0 * Ag / 2.5))
obsconditions['EXPTIME']*=exptime_fact
log.info("Dust extinction added")
log.info('exposure time adjusted to {}'.format(obsconditions['EXPTIME']))
if args.eboss:
specsim_config_file = 'eboss'
else:
specsim_config_file = 'desi'
### use Poisson = False to get reproducible results.
### use args.save_resolution = False to not save the matrix resolution per quasar in spectra files.
resolution=sim_spectra(qso_wave,qso_flux, args.program, obsconditions=obsconditions,spectra_filename=ofilename,
sourcetype="qso", skyerr=args.skyerr,ra=metadata["RA"],dec=metadata["DEC"],targetid=targetid,
meta=specmeta,seed=seed,fibermap_columns=fibermap_columns,use_poisson=False,
specsim_config_file=specsim_config_file, dwave_out=dwave_out, save_resolution=args.save_resolution)
### Keep input redshift
Z_spec = metadata['Z'].copy()
Z_input = metadata['Z'].copy()-DZ_FOG
### Add a shift to the redshift, simulating the systematic imprecision of redrock
DZ_sys_shift = args.shift_kms_los/c*(1.+Z_input)
log.info('Added a shift of {} km/s to the redshift'.format(args.shift_kms_los))
meta['REDSHIFT'] += DZ_sys_shift
metadata['Z'] += DZ_sys_shift
### Add a shift to the redshift, simulating the statistic imprecision of redrock
if args.gamma_kms_zfit:
log.info("Added zfit error with gamma {} to zbest".format(args.gamma_kms_zfit))
DZ_stat_shift = mod_cauchy(loc=0,scale=args.gamma_kms_zfit,size=nqso,cut=3000)/c*(1.+Z_input)
meta['REDSHIFT'] += DZ_stat_shift
metadata['Z'] += DZ_stat_shift
## Write the truth file, including metadata for DLAs and BALs
log.info('Writing a truth file {}'.format(truth_filename))
meta.rename_column('REDSHIFT','Z')
meta.add_column(Column(Z_spec,name='TRUEZ'))
meta.add_column(Column(Z_input,name='Z_INPUT'))
meta.add_column(Column(DZ_FOG,name='DZ_FOG'))
meta.add_column(Column(DZ_sys_shift,name='DZ_SYS'))
if args.gamma_kms_zfit:
meta.add_column(Column(DZ_stat_shift,name='DZ_STAT'))
if 'Z_noRSD' in metadata.dtype.names:
meta.add_column(Column(metadata['Z_noRSD'],name='Z_NORSD'))
else:
log.info('Z_noRSD field not present in transmission file. Z_NORSD not saved to truth file')
#Save global seed and pixel seed to primary header
hdr=pyfits.Header()
hdr['GSEED']=global_seed
hdr['PIXSEED']=seed
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=".*nanomaggies.*")
hdu = pyfits.convenience.table_to_hdu(meta)
hdu.header['EXTNAME'] = 'TRUTH'
hduqso=pyfits.convenience.table_to_hdu(qsometa)
hduqso.header['EXTNAME'] = 'TRUTH_QSO'
hdulist=pyfits.HDUList([pyfits.PrimaryHDU(header=hdr),hdu,hduqso])
if args.dla :
hdulist.append(hdu_dla)
if args.balprob :
hdulist.append(hdu_bal)
if args.save_continuum :
hdulist.append(hdu_trueCont)
# Save one resolution matrix per camera to the truth file instead of one per quasar to the spectra files.
if not args.save_resolution:
for band in resolution.keys():
hdu = pyfits.ImageHDU(name="{}_RESOLUTION".format(band.upper()))
hdu.data = resolution[band].astype("f4")
hdulist.append(hdu)
hdulist.writeto(truth_filename, overwrite=True)
hdulist.close()
if args.zbest :
log.info("Read fibermap")
fibermap = read_fibermap(ofilename)
log.info("Writing a zbest file {}".format(zbest_filename))
columns = [
('CHI2', 'f8'),
('COEFF', 'f8' , (4,)),
('Z', 'f8'),
('ZERR', 'f8'),
('ZWARN', 'i8'),
('SPECTYPE', (str,96)),
('SUBTYPE', (str,16)),
('TARGETID', 'i8'),
('DELTACHI2', 'f8'),
('BRICKNAME', (str,8))]
zbest = Table(np.zeros(nqso, dtype=columns))
zbest['CHI2'][:] = 0.
zbest['Z'][:] = metadata['Z']
zbest['ZERR'][:] = 0.
zbest['ZWARN'][:] = 0
zbest['SPECTYPE'][:] = 'QSO'
zbest['SUBTYPE'][:] = ''
zbest['TARGETID'][:] = metadata['MOCKID']
zbest['DELTACHI2'][:] = 25.
hzbest = pyfits.convenience.table_to_hdu(zbest); hzbest.name='ZBEST'
hfmap = pyfits.convenience.table_to_hdu(fibermap); hfmap.name='FIBERMAP'
hdulist =pyfits.HDUList([pyfits.PrimaryHDU(),hzbest,hfmap])
hdulist.writeto(zbest_filename, overwrite=True)
hdulist.close() # see if this helps with memory issue
def _func(arg) :
""" Used for multiprocessing.Pool """
return simulate_one_healpix(**arg)
def main(args=None):
log = get_logger()
if isinstance(args, (list, tuple, type(None))):
args = parse(args)
if args.outfile is not None and len(args.infile)>1 :
log.error("Cannot specify single output file with multiple inputs, use --outdir option instead")
return 1
if not os.path.isdir(args.outdir) :
log.info("Creating dir {}".format(args.outdir))
os.makedirs(args.outdir)
if args.mags :
log.warning('--mags is deprecated; please use --bbflux instead')
args.bbflux = True
exptime = args.exptime
if exptime is None :
exptime = 1000. # sec
if args.eboss:
exptime = 1000. # sec (added here in case we change the default)
#- Generate obsconditions with args.program, then override as needed
obsconditions = reference_conditions[args.program.upper()]
if args.airmass is not None:
obsconditions['AIRMASS'] = args.airmass
if args.seeing is not None:
obsconditions['SEEING'] = args.seeing
if exptime is not None:
obsconditions['EXPTIME'] = exptime
if args.moonfrac is not None:
obsconditions['MOONFRAC'] = args.moonfrac
if args.moonalt is not None:
obsconditions['MOONALT'] = args.moonalt
if args.moonsep is not None:
obsconditions['MOONSEP'] = args.moonsep
if args.no_simqso:
log.info("Load QSO model")
model=QSO()
else:
log.info("Load SIMQSO model")
#lya_simqso_model.py is located in $DESISIM/py/desisim/scripts/.
#Uses a different emmision lines model than the default SIMQSO.
#We will update this soon to match with the one used in select_mock_targets.
model=SIMQSO(nproc=1,sqmodel='lya_simqso_model')
decam_and_wise_filters = None
bassmzls_and_wise_filters = None
if args.target_selection or args.bbflux :
log.info("Load DeCAM and WISE filters for target selection sim.")
# ToDo @moustakas -- load north/south filters
decam_and_wise_filters = filters.load_filters('decam2014-g', 'decam2014-r', 'decam2014-z',
'wise2010-W1', 'wise2010-W2')
bassmzls_and_wise_filters = filters.load_filters('BASS-g', 'BASS-r', 'MzLS-z',
'wise2010-W1', 'wise2010-W2')
footprint_healpix_weight = None
footprint_healpix_nside = None
if args.desi_footprint :
if not 'DESIMODEL' in os.environ :
log.error("To apply DESI footprint, I need the DESIMODEL variable to find the file $DESIMODEL/data/footprint/desi-healpix-weights.fits")
sys.exit(1)
footprint_filename=os.path.join(os.environ['DESIMODEL'],'data','footprint','desi-healpix-weights.fits')
if not os.path.isfile(footprint_filename):
log.error("Cannot find $DESIMODEL/data/footprint/desi-healpix-weights.fits")
sys.exit(1)
pixmap=pyfits.open(footprint_filename)[0].data
footprint_healpix_nside=256 # same resolution as original map so we don't loose anything
footprint_healpix_weight = load_pixweight(footprint_healpix_nside, pixmap=pixmap)
if args.gamma_kms_zfit and not args.zbest:
log.info("Setting --zbest to true as required by --gamma_kms_zfit")
args.zbest = True
if args.extinction:
sfdmap= SFDMap()
else:
sfdmap=None
if args.balprob:
bal=BAL()
else:
bal=None
if args.eboss:
eboss = { 'footprint':FootprintEBOSS(), 'redshift':RedshiftDistributionEBOSS() }
else:
eboss = None
if args.nproc > 1:
func_args = [ {"ifilename":filename , \
"args":args, "model":model , \
"obsconditions":obsconditions , \
"decam_and_wise_filters": decam_and_wise_filters , \
"bassmzls_and_wise_filters": bassmzls_and_wise_filters , \
"footprint_healpix_weight": footprint_healpix_weight , \
"footprint_healpix_nside": footprint_healpix_nside , \
"bal":bal,"sfdmap":sfdmap,"eboss":eboss \
} for i,filename in enumerate(args.infile) ]
pool = multiprocessing.Pool(args.nproc)
pool.map(_func, func_args)
else:
for i,ifilename in enumerate(args.infile) :
simulate_one_healpix(ifilename,args,model,obsconditions,
decam_and_wise_filters,bassmzls_and_wise_filters,
footprint_healpix_weight,footprint_healpix_nside,
bal=bal,sfdmap=sfdmap,eboss=eboss)
|
desihub/desisim
|
py/desisim/scripts/quickquasars.py
|
Python
|
bsd-3-clause
| 41,683
|
[
"Gaussian"
] |
54ac955d84030bc93c8ec936d3f023d88e05553b3445b07d4a5e34db33dac2b3
|
# Copyright 2012 Patrick Varilly, Stefano Angioletti-Uberti
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/env python
# Python script to produce plate-plate and sphere-sphere potentials
# for DNACCs with single-stranded DNA tethers.
#
# The tethers are modeled as freely jointed chains of 8 segments, each
# of length 5 nm. Bortolo Mognetti has calculated the integrated
# Boltzmann factors owing to configurational entropy, and stored these
# in the accompanying files interN.dat, intraRed.dat and 4P.dat
#
# These calculations were done to prepare a response to the following paper:
#
# W.B. Rogers and J.C. Crocker, Proc. Natl. Acad. Sci. USA 108, 15687 (2011),
# doi: 10.1073/pnas.1109853108
import numpy as np
from math import pi
import subprocess
import scipy.interpolate
import dnacc
from dnacc.units import nm
# Set up TetherStatistics object for pre-computed ssDNA Boltzmann factors
class ssDNAStatistics(object):
# Read in integrated Boltzmann factors
l_Kuhn = 5 * nm
raw = np.loadtxt('interN.dat')
interp_bridge = scipy.interpolate.interp1d(
raw[:, 0] * l_Kuhn, raw[:, 1] * l_Kuhn ** 2,
bounds_error=False, fill_value=0.0)
raw = np.loadtxt('intraRed.dat')
interp_loop = scipy.interpolate.interp1d(
raw[:, 0] * l_Kuhn, raw[:, 1] * l_Kuhn ** 2,
bounds_error=False, fill_value=raw[-1, 1] * l_Kuhn ** 2)
raw = np.loadtxt('4P.dat')
interp_exclude = scipy.interpolate.interp1d(
raw[:, 0] * 1 * nm, np.exp(-raw[:, 1]),
bounds_error=False, fill_value=1.0)
# These methods get called by dnacc code
@classmethod
def calc_boltz_binding_cnf_bridge(cls, system,
type_info_i, type_info_j):
return (float(cls.interp_bridge(system.separation)) *
float(cls.interp_exclude(system.separation)) ** 2)
@classmethod
def calc_boltz_binding_cnf_loop(cls, system,
type_info_i, type_info_j):
return (float(cls.interp_loop(system.separation)) *
float(cls.interp_exclude(system.separation)) ** 2)
@classmethod
def calc_boltz_exclusion(cls, system, type_info_i):
return float(cls.interp_exclude(system.separation))
@classmethod
def check_system(cls, system):
if system.separation <= 0:
raise ValueError("Invalid plate separation")
# Set up basic system
plates = dnacc.PlatesMeanField(ssDNAStatistics)
R = 550 * nm
area = 4 * pi * R ** 2
ALPHA = plates.add_tether_type(
plate='lower', sigma=4800.0 / area, sticky_end='alpha')
ALPHA_P = plates.add_tether_type(
plate='upper', sigma=4200.0 / area, sticky_end='alphap')
# Solution hybridisation free energy in kT given by -(c1 / (zr + TinC) - c2)
c1 = 24070.0
c2 = 70.2964
zr = 273.15
# The distances that we'll sample
hArr = np.arange(5 * nm, 81 * nm, 1 * nm)
# Look at various temperatures
for T in (30.5, 32.0, 33.0, 35.0, 36.0):
beta_DeltaG0 = -(c1 / (zr + T) - c2)
plates.beta_DeltaG0['alpha', 'alphap'] = beta_DeltaG0
# Calculate plate potential
betaFPlate = [plates.at(h).free_energy_density for h in hArr]
betaFRepPlate = [plates.at(h).rep_free_energy_density for h in hArr]
# Print out plate potential
with open('plates-A_B-T%.1f-G%.1f.txt' %
(T, beta_DeltaG0), 'w') as f:
f.write('# h (nm)\t' 'F_rep (kT/nm^2)\t' 'F_att (kT/nm^2)\t'
'F_plate (kT/nm^2)\n')
for h, betaF, betaFRep in zip(hArr, betaFPlate, betaFRepPlate):
betaFAtt = betaF - betaFRep
f.write('%g\t%g\t%g\t%g\n' %
(h / nm,
betaFRep / (1 / nm ** 2),
betaFAtt / (1 / nm ** 2),
betaF / (1 / nm ** 2)))
# Now compute a similar plate potential using the Poisson approximation
badBetaFPlate = [plates.rep_free_energy_density -
plates.at(h).sigma_bound[ALPHA, ALPHA_P] for h in hArr]
with open('bad-plates-A_B-T%.1f-G%.1f.txt' %
(T, beta_DeltaG0), 'w') as f:
f.write('# h (nm)\t' 'F_rep (kT/nm^2)\t' 'F_att (kT/nm^2)\t'
'F_plate (kT/nm^2)\n')
for h, betaF, betaFRep in zip(hArr, badBetaFPlate, betaFRepPlate):
betaFAtt = betaF - betaFRep
f.write('%g\t%g\t%g\t%g\n' %
(h / nm,
betaFRep / (1 / nm ** 2),
betaFAtt / (1 / nm ** 2),
betaF / (1 / nm ** 2)))
# Now for sphere-sphere potentials
betaFSpheres = dnacc.calc_spheres_potential(hArr, betaFPlate, R)
betaFRepSpheres = dnacc.calc_spheres_potential(hArr, betaFRepPlate, R)
with open('spheres-A_B-T%.1f-G%.1f.txt' %
(T, beta_DeltaG0), 'w') as f:
f.write('# h (nm)\t' 'F_rep (kT)\t' 'F_att (kT)\t'
'F_spheres (kT)\n')
for h, betaF, betaFRep in zip(hArr, betaFSpheres, betaFRepSpheres):
betaFAtt = betaF - betaFRep
f.write('%g\t%g\t%g\t%g\n' %
(h / nm, betaFRep, betaFAtt, betaF))
# Same, but with the Poisson approximation
badBetaFSpheres = dnacc.calc_spheres_potential(hArr, badBetaFPlate, R)
with open('bad-spheres-A_B-T%.1f-G%.1f.txt' %
(T, beta_DeltaG0), 'w') as f:
f.write('# h (nm)\t' 'F_rep (kT)\t' 'F_att (kT)\t'
'F_spheres (kT)\n')
for h, betaF, betaFRep in zip(hArr, badBetaFSpheres, betaFRepSpheres):
betaFAtt = betaF - betaFRep
f.write('%g\t%g\t%g\t%g\n' %
(h / nm, betaFRep, betaFAtt, betaF))
# Now convolve with a Gaussian
# ============================
# Set up Gaussian kernel and blurring function
Ncn = 201
blurSigma = 3 * nm
filtX = np.linspace(-3 * blurSigma, +3 * blurSigma, Ncn)
filtDx = filtX[1] - filtX[0]
filtX += 0.5 * filtDx
filtY = np.exp(-filtX ** 2 / (2 * blurSigma ** 2))
filtY /= np.sum(filtY)
resampHArr = np.arange(hArr[0], hArr[-1], filtDx)
def blur(origBetaF):
interpBetaF = scipy.interpolate.interp1d(hArr, origBetaF)
resampBetaF = interpBetaF(resampHArr)
resampExpMinusBetaF = np.exp(-resampBetaF)
paddedInput = np.concatenate((np.zeros(Ncn / 2), resampExpMinusBetaF,
np.ones(Ncn / 2)))
blurredExpMinusBetaF = np.convolve(paddedInput, filtY, mode='valid')
blurredBetaF = -np.log(blurredExpMinusBetaF)
return blurredBetaF
# Blur good sphere-sphere potentials
blurredBetaFSpheres = blur(betaFSpheres)
blurredBetaFRepSpheres = blur(betaFRepSpheres)
with open('blurred-spheres-A_B-T%.1f-G%.1f.txt' %
(T, beta_DeltaG0), 'w') as f:
f.write('# h (nm)\t' 'blurred F_rep (kT)\t' 'blurred F_att (kT)\t'
'blurred F_spheres (kT)\n')
for h, betaF, betaFRep in zip(resampHArr, blurredBetaFSpheres,
blurredBetaFRepSpheres):
betaFAtt = betaF - betaFRep
f.write('%g\t%g\t%g\t%g\n' %
(h / nm, betaFRep, betaFAtt, betaF))
# Blur "bad" (i.e., Poisson approx) sphere-sphere potentials
blurredBadBetaFSpheres = blur(badBetaFSpheres)
with open('blurred-bad-spheres-A_B-T%.1f-G%.1f.txt' %
(T, beta_DeltaG0), 'w') as f:
f.write('# h (nm)\t' 'blurred F_rep (kT)\t' 'blurred F_att (kT)\t'
'blurred F_spheres (kT)\n')
for h, betaF, betaFRep in zip(resampHArr, blurredBadBetaFSpheres,
blurredBetaFRepSpheres):
betaFAtt = betaF - betaFRep
f.write('%g\t%g\t%g\t%g\n' %
(h / nm, betaFRep, betaFAtt, betaF))
|
patvarilly/DNACC
|
examples/ssDNA_tethers/ssDNA_tethers.py
|
Python
|
gpl-3.0
| 8,404
|
[
"Gaussian"
] |
d36fdef23abe538c18e5714232ca4f3ab419f1e9c3896256a59e2f978a9cc08c
|
import os
import numpy as np
import h5py
import time
from pyscf import gto, scf, mp, cc, ci, fci, ao2mo
from pyscf.cc import ccsd_rdm
from frankenstein.be.sd import SD
from frankenstein.tools.tensor_utils import get_symm_mat_pow
from frankenstein.be.sd import schmidt_decomposition, schmidt_decomposition_new
from frankenstein.tools.io_utils import prtvar
from frankenstein.tools.scf_utils import get_fock
from frankenstein.tools.pyscf_utils import tmpCCSD
def get_lao(pymf, C):
S = pymf.get_ovlp()
# if not given, use Löwdin
if not C is None:
if not np.allclose(C.T@S@C, np.eye(C.shape[1])):
raise RunError("mo coeff matrix is not orthonormal.")
else:
C = get_symm_mat_pow(S, -0.5)
return C
def schmidt_decomposition_pyrhf(pymf, fragsites, Clao, S=None, frag_eye=True,
cutoff=1.E-5, addvirt=True, balance_fb=True, ret_Cs=False, skip_TE=False,
verbose=0):
if S is None: S = pymf.get_ovlp()
ncore = Clao.shape[0] - Clao.shape[1]
nact = Clao.shape[1]
# xform mo_coeff into LAO basis
C = Clao.T @ S @ pymf.mo_coeff[:,ncore:]
nocc = pymf.mol.nelectron // 2 - ncore
try:
rets = schmidt_decomposition_new(C, nocc, fragsites,
frag_eye=frag_eye, ret_Cs=ret_Cs, skip_Te=skip_TE, verbose=verbose)
except:
rets = schmidt_decomposition(C, nocc, fragsites,
frag_eye=frag_eye, cutoff=cutoff, addvirt=addvirt,
balance_fb=balance_fb, ret_Cs=ret_Cs, verbose=verbose)
if ret_Cs:
nf, T, TE, ssv, Cs = rets
else:
nf, T, TE, ssv = rets
# xform back to AO basis
T = Clao @ T
if not TE is None : TE = Clao @ TE
if ret_Cs : Cs = Clao @ Cs
rets = (nf, T, TE, ssv)
if ret_Cs:
rets += (Cs, )
return rets
def get_pymfs(msd, skip_kernel=False):
if not msd.hV_xformed:
msd.xform_hV()
pymols = gto.M()
pymols.nelectron = msd.nsocc * 2
pymols.verbose = 0
pymols.incore_anyway = True
pymols.max_memory = msd.max_mem
pymfs = scf.RHF(pymols)
pymfs.get_hcore = lambda *args: msd.hs
pymfs.get_ovlp = lambda *args: np.eye(msd.nsao)
pymfs._eri = msd.restore_eri(8)
if not skip_kernel:
if not msd.mo_coeffs is None:
dm0 = msd.mo_coeffs[:,:msd.nsocc] @ msd.mo_coeffs[:,:msd.nsocc].T
pymfs.kernel(dm0=dm0*2.)
else:
pymfs.kernel()
# check convergence
if not pymfs.converged:
err_grad = np.linalg.norm(pymfs.get_grad(pymfs.mo_coeff, pymfs.mo_occ))
print("Suspicious non-convergent SCF for fragment %s. Checking residual gradient..." % (msd.fraglabel), flush=True)
if err_grad < 1.E-4: # deemed converged
print("|grad| = %.3E < 1.E-4 --> Never mind..." % err_grad, flush=True)
pymfs.converged = True
else:
dm0 = msd.mo_coeffs[:,:msd.nsocc] @ msd.mo_coeffs[:,:msd.nsocc].T
pymfs.verbose = 4
pymfs.kernel(dm0=dm0*2.)
pymfs.verbose = 0
if not pymfs.converged:
err_grad = np.linalg.norm(pymfs.get_grad(pymfs.mo_coeff, pymfs.mo_occ))
print("Suspicious non-convergent SCF for fragment %s. Checking residual gradient..." % (msd.fraglabel), flush=True)
if err_grad < 1.E-4: # deemed converged
print("|grad| = %.3E < 1.E-4 --> Never mind..." % err_grad, flush=True)
pymfs.converged = True
else:
raise RuntimeError("SCF calculations of fragment %s does not converge." % (msd.fraglabel))
return pymfs
def make_rdm2_mf(pymf):
rdm1 = pymf.make_rdm1() * 0.5
return 2.*np.einsum("ij,kl", rdm1, rdm1) - np.einsum("il,kj", rdm1, rdm1)
from pyscf import lib
from pyscf.cc.ccsd import _ChemistsERIs
def _make_eris_incore(mycc, erif, mo_coeff, nf):
""" For non-interacting bath, only the ffff block of the eri is non-zero and needs to be xformed. The pyscf built-in ao2mo xform does not benefit from it. This function provides such a implementation.
"""
cput0 = (time.clock(), time.time())
eris = _ChemistsERIs()
eris._common_init_(mycc, mo_coeff)
nocc = eris.nocc
nmo = eris.fock.shape[0]
nvir = nmo - nocc
eri1 = ao2mo.incore.full(erif, eris.mo_coeff[:nf].copy())
nvir_pair = nvir * (nvir+1) // 2
eris.oooo = np.empty((nocc,nocc,nocc,nocc))
eris.ovoo = np.empty((nocc,nvir,nocc,nocc))
eris.ovvo = np.empty((nocc,nvir,nvir,nocc))
eris.ovov = np.empty((nocc,nvir,nocc,nvir))
eris.ovvv = np.empty((nocc,nvir,nvir_pair))
eris.vvvv = np.empty((nvir_pair,nvir_pair))
ij = 0
outbuf = np.empty((nmo,nmo,nmo))
oovv = np.empty((nocc,nocc,nvir,nvir))
for i in range(nocc):
buf = lib.unpack_tril(eri1[ij:ij+i+1], out=outbuf[:i+1])
for j in range(i+1):
eris.oooo[i,j] = eris.oooo[j,i] = buf[j,:nocc,:nocc]
oovv[i,j] = oovv[j,i] = buf[j,nocc:,nocc:]
ij += i + 1
eris.oovv = oovv
oovv = None
ij1 = 0
for i in range(nocc,nmo):
buf = lib.unpack_tril(eri1[ij:ij+i+1], out=outbuf[:i+1])
eris.ovoo[:,i-nocc] = buf[:nocc,:nocc,:nocc]
eris.ovvo[:,i-nocc] = buf[:nocc,nocc:,:nocc]
eris.ovov[:,i-nocc] = buf[:nocc,:nocc,nocc:]
eris.ovvv[:,i-nocc] = lib.pack_tril(buf[:nocc,nocc:,nocc:])
dij = i - nocc + 1
lib.pack_tril(buf[nocc:i+1,nocc:,nocc:],
out=eris.vvvv[ij1:ij1+dij])
ij += i + 1
ij1 += dij
return eris
from frankenstein.optimizer import DIIS
class RHFlite:
"""Light implementation of RHF that can be used by DIIS, which needs
conv
unrestricted
nao
fock
Roothaan_step()
update()
get_diis_errvec()
get_diif_fockvec()
"""
def __init__(self, pymf, mo_core, mo_act, Gcore0=None, **kwargs):
"""Constructor for RHFlite.
RHFlite takes a pyscf.scf.hf.RHF instance (which defines the molecules), core MOs and active MOs (both are in AO basis), solves the RHF problem within the active space and with the core being frozen. The final MOs will be a unitary rotation of the active MOs.
"""
if not isinstance(pymf, scf.hf.RHF):
raise ValueError("Arg1 of RHFlite must be an instance of pyscf.scf.hf.RHF.")
self.pymf = pymf
# some sanity checks
nao = self.pymf.mol.nao_nr()
nao_act = mo_act.shape[0]
if nao != 0:
nao_core = nao if mo_core is None else mo_core.shape[0]
check_nao1 = nao_act == nao
check_nao2 = nao_core == nao
if not (check_nao1 and check_nao2):
raise ValueError("nao derived from pymf ({:d}), mo_core ({:d}), and mo_act ({:d}) must be the same!".format(nao, nao_act, nao_core))
else:
nao = nao_act
S = pymf.get_ovlp()
ncore = 0 if mo_core is None else mo_core.shape[1]
nsao = mo_act.shape[1]
check_orth1 = np.allclose(mo_act.T@S@mo_act, np.eye(nsao))
check_orth2 = True if mo_core is None \
else np.allclose(mo_core.T@S@mo_core, np.eye(ncore))
if not (check_orth1 and check_orth2):
raise ValueError("Input mo_core or mo_act do not represent orthogonal orbs.")
# these properties can be set at initialization
self.conv = 7
self.max_iter = 100
self.max_diis = 20
self.verbose = self.pymf.mol.verbose
self.heff = np.zeros([nsao,nsao])
self.__dict__.update(kwargs)
if not isinstance(self.heff, np.ndarray):
raise ValueError("Inpute heff must be a np array")
elif self.heff.shape != (nsao,nsao):
raise ValueError("Input heff has incorrect shape!")
# do not touch these properties UNLESS YOU KNOW WHAT YOU ARE DOING
self.is_converged = False
self.unrestricted = False
"""Some explanations for "self.nao0" and "self.nao" here:
"nao" is the real size of the underlying AO basis, and "nsao" is the # of active orbs. However, when interfacing with the DIIS optimizer, it requires "self.nao" to be the # of bas funcs it sees, which is "nsao" in our case.
Same nomenclature is used for "self.nocc0" and "self.nocc".
"""
self.mo_act = mo_act
self.nao0 = nao
self.nao = nsao
self.nmo = self.nao
self.nocc0 = self.pymf.mol.nelectron//2
self.ncore = ncore
self.nocc = self.nocc0 - self.ncore
self.h0 = self.pymf.get_hcore()
if not mo_core is None:
Pcore0 = mo_core @ mo_core.T
if Gcore0 is None:
Gcore0 = self.pymf.get_fock(dm=Pcore0*2.) - self.h0
self.Ecore = np.einsum("ij,ji->", 2*self.h0 + Gcore0, Pcore0)
self.Gcore = self.mo_act.T @ Gcore0 @ self.mo_act
else:
self.Ecore = 0.
self.Gcore = 0.
self.h = self.mo_act.T @ self.h0 @ self.mo_act + self.Gcore
self.rdm1 = None
self.fock = None
self.mo_coeff = None
self.mo_energy = None
self.e_scf = None
@property
def mo_coeff_occ(self):
return self.mo_coeff[:,:self.nocc]
def Roothaan_step(self):
self.mo_energy, self.mo_coeff = np.linalg.eigh(self.fock)
def update(self):
self.rdm1 = self.mo_coeff_occ @ self.mo_coeff_occ.T
rdm10 = self.mo_act @ self.rdm1 @ self.mo_act.T
fock0 = self.pymf.get_fock(dm=rdm10*2.)
self.fock = self.mo_act.T @ fock0 @ self.mo_act + self.Gcore + self.heff
self.e_scf = self.Ecore + np.einsum("ij,ji->", self.h + self.fock,
self.rdm1)
def get_diis_errvec(self):
X = self.rdm1 @ self.fock
X -= X.T
return X.ravel()
def get_diis_fockvec(self):
return self.fock.ravel()
def get_init_guess(self):
self.mo_coeff = np.diag([1. if i < self.nocc else 0.
for i in range(self.nao)])
def kernel(self):
self.get_init_guess()
self.update()
optimizer = DIIS(self, max_diis=self.max_diis)
if self.verbose > 1:
nspace = 44
print(">>> Entering RHFlite kernel...\n", flush=True)
print(" "+"-"*nspace, flush=True)
print(" {:4s} {:14s} {:9s} {:s}".format("iter".rjust(4),
"e_scf".rjust(14), "err".rjust(9), "comment"), flush=True)
print(" "+"-"*nspace, flush=True)
for iteration in range(1,self.max_iter+1):
if optimizer.next_step():
self.is_converged = True
if self.verbose > 1:
print(" {:4d} {:14.8f} {:.3E} {:s}".format(iteration,
self.e_scf, optimizer.err, optimizer.comment))
if self.is_converged:
break
if self.verbose > 1:
print(" "+"-"*nspace, flush=True)
msg = "DIIS converged!"
print(" "*(nspace-len(msg)+4) + msg + "\n", flush=True)
if self.verbose > 0:
prtvar("Final SCF energy", self.e_scf, "{: .10f}")
print(flush=True)
if self.verbose > 1:
print("<<< Leaving RHFlite kernel...\n", flush=True)
class pySD(SD):
"""Basic class that handles Schmidt decomposition of mean-field wave func
"""
def __init__(self, pymf, fragsites, Clao=None, build=True, **kwargs):
"""Constructor of SD
Inp:
pymf (RHF):
An pyscf RHF instance.
fragsites ([int] * nfs):
A list of nfs fragment sites.
"""
if not isinstance(pymf, scf.hf.RHF):
raise ValueError("pymf must be an RHF instance.")
if pymf.mol.spin != 0:
raise ValueError("pymf.mol.spin != 0 is detected. Currently we only support spin-compensated system.")
SD.__init__(self, pymf, fragsites)
self.pymf = pymf
self._h = None
self._S = None
self.Clao = get_lao(pymf, None) if Clao is None else Clao
self.Ccore = None
# these properties can be set via initialization
self.incore = True
self.fraglabel = ""
self.Vsfile = None # h5py file that stores the xformed ERIs
self.Vsname = None # h5py dataset name
self.swpfile = None
self.max_mem = 2000 # 2000 MB
self.__dict__.update(kwargs)
self._ncore = self.Clao.shape[0] - self.Clao.shape[1]
if not self.incore and self._h is None:
self._h = self.pymf.get_hcore()
if self.Vsfile is None: self.Vsfile = "__eris.h5"
if self.Vsname is None: self.Vsname = "".join(["eris", self.fraglabel])
if self.swpfile is None: self.swpfile = "__swp.h5"
# these properties are generated in "self.get_new_mo"
self.mo_coeffs_eff = None
# these properties are generated in "kernel"
self.t_sd = 0.
self.t_core = 0.
self.t_mos = 0.
self.t_xform = 0.
self.built = False
if build:
self.kernel()
if self.built and self.verbose > 2:
self.__str__()
# @@HY: note that we do not store h and S (as we did before), this is because the storage required for storing the full h and S scales as O(N^3) and soon becomes memory bottleneck for large molecule.
@property
def h(self):
# don't use self.pymf.get_hcore() here since "get_hcore" method is subjected to overwrite later
return self.pymf.get_hcore() if self._h is None else self._h
@property
def S(self):
return self.pymf.get_ovlp() if self._S is None else self._S
@property
def ncore(self):
return self._ncore
@property
def nocc(self):
return self.pymf.mol.nelectron // 2 - self.ncore
@property
def nsocc(self):
return min(self.nf, self.nocc)
@property
def mo_occ(self):
return np.array([2. if i < self.nsocc else 0 for i in range(self.nsao)])
@property
def h_xformed(self):
return not self.hs is None
@property
def Vs_size_expt(self):
if self.nibath:
return (self.nf*(self.nf+1)//2)**2
elif self.nact < self.nsao:
return (self.nact*(self.nact+1)//2)**2
else:
return (self.nsao*(self.nsao+1)//2)**2
@property
def V_xformed(self):
Vsfile = self.Vsfile
Vsname = self.Vsname
if os.path.isfile(Vsfile):
with h5py.File(Vsfile, "r") as fVs:
if (Vsname in list(fVs) and
fVs[Vsname].size == self.Vs_size_expt):
return True
return False
@property
def hV_xformed(self):
return self.h_xformed and self.V_xformed
def kernel(self):
self.kernel_sd()
self.kernel_xform()
self.kernel_mos()
# set self.built to be True
self.built = True
def kernel_sd(self, S=None):
start = time.time()
if S is None: S = self.S
# Schmidt decomposition
self.nf, self.T, self.TE, self.ssv, self.mo_coeff = \
schmidt_decomposition_pyrhf(
self.pymf, self.fragsites, self.Clao,
S=S,
cutoff=self.cutoff,
frag_eye=self.frag_eye,
addvirt=self.addvirt,
balance_fb=self.balance_fb,
ret_Cs=True,
verbose=self.verbose)
self._nsao = self.T.shape[1]
self._nenv = 0 if self.TE is None else self.TE.shape[1]
end = time.time()
self.t_sd = end - start
if self.nact is None: self.nact = self.nsao
def kernel_xform(self):
start = time.time()
# form Gcore and Ebath
# NOTE: this choice of Pbath double counts the frag-frag block for non-interacting bath, which will be subtracted in "xform_hV" after xforming V.
if self.Ccore is None:
self.Gcores0 = np.zeros([self.nsao]*2)
self.Ecore0 = 0.
else:
Pcore = self.Ccore @ self.Ccore.T * 2.
Gcore0 = self.pymf.get_veff(dm=Pcore)
self.Gcores0 = self.T.T @ Gcore0 @ self.T
self.Ecore0 = np.einsum("ji,ji->", 2*h+Gcore0, Pcore) * 0.5
Pbath = self.pymf.make_rdm1()*0.5 if self.nibath else self.PE
if Pbath is None:
self.Gcores = np.zeros([self.nsao]*2)
self.Ebath = 0.
if not self.incore:
self.Gcore = 0.
else:
Gcore = self.pymf.get_veff(dm=Pbath*2.)
self.Gcores = self.T.T @ Gcore @ self.T
# FIX ME: Ebath is wrong for nibath.
self.Ebath = 0. if self.nibath else np.einsum("ij,ji",
2*self.h+Gcore, Pbath)
if not self.incore:
self.Gcore = Gcore
end = time.time()
self.t_core = end - start
start = time.time()
# if incore, hs/Vs needs to be build anyway. Build them here.
if self.incore:
self.xform_hV()
end = time.time()
self.t_xform = end - start
def kernel_mos(self, h=None, S=None):
start = time.time()
if h is None: h = self.h
if S is None: S = self.S
# determine Schmidt space MOs
""" Some comments about this step:
In the Schmidt-space post-HF calculations, we need canonical HF MOs and MO energies. However, the MO coefficient matrix, self.mo_coeff, coming from Schmidt decomposition is NOT canonical. This step canonicalizes these MOs.
"""
if not self.incore:
Cs = self.mo_coeff
# diagonalize fock matrix in fb occ-vir orbs
Csocc = Cs[:,:self.nsocc]
Ps_full = Csocc @ Csocc.T
focks = Cs.T @ (self.pymf.get_fock(dm=Ps_full*2) + self.Gcore) @ Cs
# make sure these orbs correspond to a converged wave function
norm_fock_ov = np.linalg.norm(focks[:self.nsocc,self.nsocc:]) / \
(self.nsocc * self.nsvir)
if norm_fock_ov > 1E-6:
raise RuntimeError("""Fock matrix has a non-zero ov block (norm: {:.3E}). Make sure a converged RHF instance is used.""".format(norm_fock_ov))
# diagonalize fock matrix and obtain the canonical Schmidt MOs.
# Note: self.mo_coeff is the Schmidt MOs represented in the AO basis; while self.mo_coeffs is the same set of MOs represented in the Schmidt basis (hence "s", for Schmidt).
es, us = np.linalg.eigh(focks)
self.mo_energy = es
self.mo_coeff = Cs @ us
self.mo_coeffs = self.T.T @ S @ self.mo_coeff
self.Ps_rhf = (self.mo_coeffs*self.mo_occ) @ self.mo_coeffs.T
self.Es_rhf = 0.5 * np.einsum("ij,ji->", self.Ps_rhf,
self.T.T@h@self.T+self.Gcores) + \
np.sum(self.mo_energy[:self.nsocc])
else:
if hasattr(self, "mo_coeff"):
if not self.mo_coeff is None:
self.mo_coeffs = self.T.T @ S @ self.mo_coeff
pymfs = get_pymfs(self)
pymfs._eri = None
self.mo_coeffs = pymfs.mo_coeff
self.mo_energy = pymfs.mo_energy
self.Ps_rhf = (self.mo_coeffs*self.mo_occ) @ self.mo_coeffs.T
self.Es_rhf = pymfs.e_tot - pymfs.energy_nuc()
pymfs = None
end = time.time()
self.t_mos = end - start
def get_new_mo(self, heff):
# xfor heff (which is in the Schmidt basis) into Schmidt MOs.
heff_mo = self.mo_coeffs.T @ heff @ self.mo_coeffs
# solve the frozen core RHF problem
mflt = RHFlite(self.pymf, self.TE, self.mo_coeff, Gcore0=self.Gcore,
heff=heff_mo, verbose=0)
mflt.kernel()
mo_energy = mflt.mo_energy
mo_coeff = self.mo_coeff @ mflt.mo_coeff
self.mo_coeffs_eff = self.T.T @ self.S @ mo_coeff
return mo_energy, mo_coeff
def solve_impurity(self, solver, rdm_level=0, heff=None, sol_params=None):
"""Solve the impurity Hamiltonian with an effective potential
Inp:
solver (str):
Name of the solver, could be "mp2", "ccsd", "cisd", "fci".
rdm_level (int, optional, default: 0):
if rdm_level > 0 is used, the returned solver instance is ready to compute rdm1/2 via "make_rdm1/2".
heff (np.ndarray, optional, default: 0):
Effective 1e potential (in the Schmidt basis, i.e., self.T)!
sol_params (dict):
Solver parameters.
Output:
mc: an instance of the requested solver.
"""
if not self.built:
if self.verbose > 3:
print("Schmidt decomposition has not been perfomred yet. Calling self.kernel() now...", flush=True)
self.kernel()
if heff is None:
heff = 0.
elif isinstance(heff, np.ndarray):
if not heff.shape == (self.nsao,)*2:
raise ValueError("heff has wrong shape", heff.shape)
else:
raise ValueError("heff must be a np array.")
has_heff = np.linalg.norm(heff) > 0.
if self.incore:
pymf = get_pymfs(self, skip_kernel=has_heff)
if has_heff:
# dress self.pymf's "get_hcore" method
hcore = self.hs + heff
def get_hcore(mol=None):
return hcore
get_hcore_old = pymf.get_hcore
pymf.get_hcore = get_hcore
Csocc = self.mo_coeffs[:,:self.nsocc]
dm0 = Csocc @ Csocc.T * 2.
pymf.kernel(dm0=dm0)
mo_energy = pymf.mo_energy
mo_coeff = pymf.mo_coeff
self.mo_coeffs_eff = pymf.mo_coeff.copy()
else:
pymf = self.pymf
if has_heff:
# get new mo coeff/energy
mo_energy, mo_coeff = self.get_new_mo(heff)
# dress self.pymf's "get_hcore" method
ST = pymf.get_ovlp() @ self.T
hcore = ST @ heff @ ST.T + self.h
def get_hcore(mol=None):
return hcore
get_hcore_old = pymf.get_hcore
pymf.get_hcore = get_hcore
else:
mo_energy = self.mo_energy
mo_coeff = self.mo_coeff
# solve the impurity problem
if solver.upper() == "MP2":
mc = self.solve_mp2(pymf, mo_energy, mo_coeff)
elif solver.upper() == "CCSD":
mc = self.solve_ccsd(rdm_level, pymf, mo_energy, mo_coeff,
sol_params)
elif solver.upper() == "CISD":
mc = self.solve_cisd(pymf, mo_energy, mo_coeff)
elif solver.upper() == "FCI":
mc = self.solve_fci(pymf, mo_energy, mo_coeff)
elif solver.upper() == "RHF":
mc = pymf
mc.kernel()
else:
raise ValueError("Unsupported solver {:s}".format(solver))
# set self.pymf's "get_hcore" method back
if has_heff:
pymf.get_hcore = get_hcore_old
pymf = None
return mc
# wrappers for PySCF solvers
def solve_mp2(self, pymf=None, mo_energy=None, mo_coeff=None):
if pymf is None: pymf = self.pymf
if mo_energy is None: mo_energy = self.mo_energy
if mo_coeff is None: mo_coeff = self.mo_coeff
pymp = mp.MP2(pymf, mo_coeff=mo_coeff, mo_occ=self.mo_occ)
pymp.mo_energy = mo_energy
pymp.kernel()
return pymp
def solve_ccsd(self, rdm_level, pymf=None, mo_energy=None, mo_coeff=None,
sol_params=None):
if pymf is None: pymf = self.pymf
if mo_energy is None: mo_energy = self.mo_energy
if mo_coeff is None: mo_coeff = self.mo_coeff
rdm = sol_params["rdm"]
pycc = tmpCCSD(pymf, rdm, mo_coeff=mo_coeff, mo_occ=self.mo_occ)
pycc.incore_complete = True
pymf = None
if self.nibath:
erif = self.restore_eri(8, pad0=False)
eris = _make_eris_incore(pycc, erif, mo_coeff, self.nf)
else:
eris = pycc.ao2mo()
eris.mo_energy = mo_energy
eris.fock = np.diag(mo_energy)
try:
pycc.kernel(eris=eris)
except:
raise RuntimeError("CCSD calculation of fragment %s does not converge" % (self.fraglabel))
if rdm == "relaxed" and rdm_level > 0:
pycc.solve_lambda(eris=eris)
return pycc
def solve_cisd(self, pymf=None, mo_energy=None, mo_coeff=None):
if pymf is None: pymf = self.pymf
if mo_energy is None: mo_energy = self.mo_energy
if mo_coeff is None: mo_coeff = mo_coeff
pyci = ci.CISD(pymf, mo_coeff=mo_coeff, mo_occ=self.mo_occ)
if self.nibath:
erif = self.restore_eri(8, pad0=False)
eris = _make_eris_incore(pyci, erif, mo_coeff, self.nf)
else:
eris = pyci.ao2mo()
eris.mo_energy = self.mo_energy
eris.fock = np.diag(self.mo_energy)
pyci.kernel(eris=eris)
return pyci
def solve_fci(self, pymf=None, mo_energy=None, mo_coeff=None):
if pymf is None: pymf = self.pymf
if mo_energy is None: mo_energy = self.mo_energy
if mo_coeff is None: mo_coeff = self.mo_coeff
pyfci = fci.FCI(pymf, mo=mo_coeff)
if self.incore:
h1e = mo_coeff.T @ pymf.get_hcore() @ mo_coeff
else:
h1e = mo_coeff.T @ (pymf.get_hcore() + self.Gcore) @ mo_coeff
nelec = [self.nsocc]*2
e_fci, civec = pyfci.kernel(h1e=h1e, nelec=nelec, ecore=0.)
e_corr = e_fci - self.Es_rhf
class tmpFCI:
def __init__(self, pyfci, e_corr, civec):
self.pyfci = pyfci
self.e_corr = e_corr
self.civec = civec
self.rdm1 = None
self.rdm2 = None
@property
def e_tot(self):
return self.pyfci.e_tot
def make_rdm1(self):
p = self.pyfci
return p.make_rdm1(self.civec, p.norb, p.nelec)
def make_rdm2(self):
p = self.pyfci
return p.make_rdm2(self.civec, p.norb, p.nelec)
pyfci_ = tmpFCI(pyfci, e_corr, civec)
return pyfci_
def make_rdm1(self, mc):
"""Generate rdm1 in Schmidt basis (i.e., columns of self.T)
Inp:
mc (solver instance):
Generated by self.solve_impurity.
[NOTE] For solver == "CCSD", rdm_level > 0 must be used when calling self.solve_impurity.
Output:
rdm1 (np.ndarray, nsao*nsao):
rdm1 in Schmidt basis.
"""
mo_coeffs = self.mo_coeffs if self.mo_coeffs_eff is None \
else self.mo_coeffs_eff
# scaled by 0.5 to match our convention
if isinstance(mc,scf.hf.RHF):
rdm1s = mc.make_rdm1() * 0.5
else:
rdm1s = mo_coeffs @ mc.make_rdm1() @ mo_coeffs.T * 0.5
return rdm1s
def make_rdm2(self, mc):
"""Same as `make_rdm1`, but for rdm2.
"""
mo_coeffs = self.mo_coeffs if self.mo_coeffs_eff is None \
else self.mo_coeffs_eff
if isinstance(mc,scf.hf.RHF):
rdm2s = make_rdm2_mf(mc)
else:
rdm2s = np.einsum("ijkl,pi,qj,rk,sl->pqrs", 0.5*mc.make_rdm2(),
*([mo_coeffs]*4), optimize=True)
return rdm2s
def delete_eri(self):
if os.path.isfile(self.Vsfile):
with h5py.File(self.Vsfile, "w") as Vs:
if self.Vsname in Vs:
if self.verbose > 0:
print("""Deleting data "{:s}" from file "{:s}".""".format(self.Vsname, self.Vsfile), flush=True)
del Vs[self.Vsname]
def delete_erifile(self):
if os.path.isfile(self.Vsfile):
if self.verbose > 0:
print("""Removing "{:s}".""".format(self.Vsfile))
os.remove(self.Vsfile)
def xform_hV(self, compact=True):
Vsfile = self.Vsfile
Vsname = self.Vsname
if not self.V_xformed:
Csd = self.T[:,:self.nf] if self.nibath else self.T
ao2mo.outcore.full(self.pymf.mol, Csd, Vsfile, dataname=Vsname,
compact=compact)
if self.nibath:
self.remove_dc_Gcore()
if not self.h_xformed:
self.hs = self.T.T @ self.h @ self.T + self.Gcores + self.Gcores0
def remove_dc_Gcore(self):
nf = self.nf
Vsfile = self.Vsfile
Vsname = self.Vsname
with h5py.File(Vsfile, "r") as fVs:
Csf = self.T[:,:self.nf].T @ self.S @ self.mo_coeff_occ
Psf = Csf @ Csf.T
Vsf = ao2mo.restore(1, fVs[Vsname], nf)
Gfs = -get_fock(np.zeros([nf,nf]), Vsf, Psf)
self.Gcores[:nf,:nf] += Gfs
def restore_eri(self, symm, Vsfile=None, Vsname=None, pad0=True):
if Vsfile is None: Vsfile = self.Vsfile
if Vsname is None: Vsname = self.Vsname
with h5py.File(Vsfile, "r") as fVs:
if self.nibath or self.nsao > self.nact:
nf = self.nf if self.nibath else self.nact
if pad0:
Vs = np.zeros([self.nsao]*4)
Vs[:nf,:nf,:nf,:nf] = ao2mo.restore(1, fVs[Vsname], nf)
if symm != 1:
Vs = ao2mo.restore(symm, Vs, self.nsao)
else:
Vs = ao2mo.restore(symm, fVs[Vsname], nf)
else:
Vs = ao2mo.restore(symm, fVs[Vsname], self.nsao)
return Vs
def get_frankmol(self, heff=None, **kwargs):
if not self.hV_xformed:
self.xform_hV()
from frankenstein import molecule
mol = molecule.MOL.init_from_hV(self.hs+heff, self.restore_eri(1),
self.nsocc, verbose=self.verbose)
mol.__dict__.update(kwargs)
return mol
def get_SD_energy(self, rdm1s, rdm2s, ret_rhf=False):
if not self.hV_xformed:
self.xform_hV(compact=True)
nf = self.nf
hf1 = self.hs[:nf]-self.Gcores[:nf]
e1 = 2.*np.einsum("ij,ij->i", hf1, rdm1s[:nf])
e2 = np.einsum("ij,ij->i", self.Gcores[:nf], rdm1s[:nf])
if ret_rhf:
rdm1s_rhf = self.Ps_rhf * 0.5
e1_rhf = 2.*np.einsum("ij,ij->i", hf1, rdm1s_rhf[:nf])
e2_rhf = np.einsum("ij,ij->i", self.Gcores[:nf], rdm1s_rhf[:nf])
# the first nf are frag sites
with h5py.File(self.Vsfile, "r") as Vs:
if self.nibath:
jmax = self.nf
elif self.nact < self.nsao:
jmax = self.nact
else:
jmax = self.nsao
# jmax = self.nf if self.nibath else self.nsao
for i in range(self.nf):
for j in range(jmax):
# By default, Vs is stored in the "compact" format. So we need to do some extra work here, but not too bad.
ij = i*(i+1)//2+j if i > j else j*(j+1)//2+i
Vsij = np.zeros_like(self.hs)
tril_idx = np.tril_indices(jmax)
Vsij[tril_idx] = Vs[self.Vsname][ij]
Vsij[np.diag_indices(self.nsao)] *= 0.5
Vsij += Vsij.T
e2[i] += np.einsum("kl,kl->", Vsij, rdm2s[i,j])
if ret_rhf:
rdm2s_rhf_ij = 2.*rdm1s_rhf[i,j]*rdm1s_rhf - \
np.outer(rdm1s_rhf[i], rdm1s_rhf[j])
e2_rhf[i] += np.einsum("kl,kl->", Vsij, rdm2s_rhf_ij)
rets = (e1, e2, e1+e2)
if ret_rhf:
rets += (e1_rhf, e2_rhf, e1_rhf+e2_rhf)
return rets
if __name__ == "__main__":
from frankenstein.tools.pyscf_utils import get_pymol
from pyscf import scf
geom = "../tests/geom/h10.zmat"
basis = "sto-3g"
fragsites = [0,1,2,3]
pymol = get_pymol(geom, basis, verbose=0)
pymf = scf.RHF(pymol)
pymf.kernel()
pysd = pySD(pymf, fragsites)
mc = pysd.solve_impurity("MP2")
ecorr_mp2_ = mc.e_corr
rdm1_mp2_ = mc.make_rdm1()
rdm2_mp2 = mc.make_rdm2()
rdm2s_mp2 = pysd.make_rdm2(mc)
mc = pysd.solve_impurity("CCSD", 1)
ecorr_ccsd_ = mc.e_corr
rdm1_ccsd_ = mc.make_rdm1()
rdm2_ccsd = mc.make_rdm2()
rdm2s_ccsd = pysd.make_rdm2(mc)
mc = pysd.solve_impurity("CISD")
ecorr_cisd_ = mc.e_corr
rdm1_cisd_ = mc.make_rdm1()
rdm2_cisd = mc.make_rdm2()
rdm2s_cisd = pysd.make_rdm2(mc)
mc = pysd.solve_impurity("FCI")
ecorr_fci_ = mc.e_corr
rdm1_fci_ = mc.make_rdm1()
rdm2_fci = mc.make_rdm2()
rdm2s_fci = pysd.make_rdm2(mc)
from pyscf import mp
pymp = mp.MP2(pymf, mo_coeff=pysd.mo_coeff, mo_occ=pysd.mo_occ)
pymp.mo_energy = pysd.mo_energy
pymp.kernel()
ecorr_mp2 = pymp.e_corr
rdm1_mp2 = pymp.make_rdm1()
from pyscf import cc
pycc = cc.CCSD(pymf, mo_coeff=pysd.mo_coeff, mo_occ=pysd.mo_occ)
eris = pycc.ao2mo()
eris.mo_energy = pysd.mo_energy
eris.fock = np.diag(pysd.mo_energy)
pycc.kernel(eris=eris)
pycc.solve_lambda(eris=eris)
ecorr_ccsd = pycc.e_corr
rdm1_ccsd = pycc.make_rdm1()
from pyscf import ci
pyci = ci.CISD(pymf, mo_coeff=pysd.mo_coeff, mo_occ=pysd.mo_occ)
pyci.kernel(eris=eris)
ecorr_cisd = pyci.e_corr
rdm1_cisd = pyci.make_rdm1()
from pyscf import fci
pyfci = fci.FCI(pymf, mo=pysd.mo_coeff)
h1e = pysd.mo_coeff.T @ (pymf.get_hcore() + pysd.Gcore) @ pysd.mo_coeff
rets = pyfci.kernel(h1e=h1e, nelec=[pysd.nsocc,pysd.nsocc], ecore=0.)
ecorr_fci = pyfci.e_tot - pysd.Es_rhf
rdm1_fci = pyfci.make_rdm1(rets[1], pyfci.norb, pyfci.nelec)
assert(np.allclose(ecorr_mp2, ecorr_mp2_))
assert(np.allclose(ecorr_ccsd, ecorr_ccsd_))
assert(np.allclose(ecorr_cisd, ecorr_cisd_))
assert(np.allclose(ecorr_fci, ecorr_fci_))
assert(np.allclose(rdm1_mp2, rdm1_mp2_))
assert(np.allclose(rdm1_ccsd, rdm1_ccsd_))
assert(np.allclose(rdm1_cisd, rdm1_cisd_))
assert(np.allclose(rdm1_fci, rdm1_fci_))
heff = np.zeros([pysd.nsao,pysd.nsao])
heff[0,0] = 0.1
heff[0,1] = heff[1,0] = 0.05
S = pymf.get_ovlp()
X = get_symm_mat_pow(S, -0.5)
Xinv = get_symm_mat_pow(S, 0.5)
heff_full = np.zeros_like(S)
heff_full[:pysd.nfs,:pysd.nfs] = heff[:pysd.nfs,:pysd.nfs]
from frankenstein.tools.io_utils import dumpMat
T_soao = Xinv @ pysd.T
dumpMat(T_soao)
dumpMat(T_soao @ heff @ T_soao.T)
dumpMat(X @ heff_full @ X.T)
mc = pysd.solve_impurity("fci", 1, heff=heff)
rdm1s_fci = pysd.make_rdm1(mc)
rdm2s_fci = pysd.make_rdm2(mc)
from frankenstein.tools.io_utils import dumpMat
# dumpMat(rdm1_fci)
dumpMat(rdm1s_fci)
dumpMat(rdm2s_fci[0,0])
pysd.xform_hV()
e1, e2, e12 = pysd.get_SD_energy(rdm1s_fci, rdm2s_fci)
print(e1)
print(e2)
print(e12)
print(np.mean(e12))
|
hongzhouye/frankenstein
|
pyscf_be/pysd.py
|
Python
|
bsd-3-clause
| 35,392
|
[
"PyMOL",
"PySCF"
] |
6390286230b730ac046c2139262bb49b7f75466110342b998e30c9f40fb8bdd3
|
from __future__ import with_statement
import unittest
from repoze.bfg import testing
from karl import testing as karltesting
from zope.testing.cleanup import cleanUp
class TestAdminView(unittest.TestCase):
def setUp(self):
cleanUp()
def tearDown(self):
cleanUp()
def test_it(self):
from karl.views.admin import admin_view
site = DummyModel()
request = testing.DummyRequest()
renderer = testing.registerDummyRenderer(
'templates/admin/admin.pt'
)
response = admin_view(site, request)
self.assertEqual(response.status_int, 200)
class TestDeleteContentView(unittest.TestCase):
def setUp(self):
cleanUp()
from datetime import datetime
site = DummyModel()
self.site = site
profiles = site['profiles'] = DummyModel()
site['bigendians'] = c = DummyModel()
c.title = 'Big Endians'
c.modified = datetime(2009, 12, 23, 12, 31)
c.creator = 'chucknorris'
site['littleendians'] = c = DummyModel()
c.title = 'Little Endians'
c.modified = datetime(2009, 12, 26, 3, 31)
c.creator = 'geekbill'
p = profiles['chucknorris'] = karltesting.DummyProfile()
p.title = 'Chuck Norris'
p = profiles['geekbill'] = karltesting.DummyProfile()
p.title = 'Bill Haverchuck'
from karl.models.interfaces import ICatalogSearch
from zope.interface import Interface
search = DummyCatalogSearch()
def dummy_search_factory(context):
return search
karltesting.registerAdapter(dummy_search_factory, Interface,
ICatalogSearch)
self.search = search
from karl.views.admin import delete_content_view
self.fut = delete_content_view
def tearDown(self):
cleanUp()
def test_render_form(self):
request = testing.DummyRequest()
renderer = testing.registerDummyRenderer(
'templates/admin/delete_content.pt'
)
self.search.add_result([
self.site['bigendians'],
self.site['littleendians'],
])
response = self.fut(self.site, request)
self.assertEqual(renderer.filtered_content, [])
c = renderer.communities
self.assertEqual(len(c), 2)
self.assertEqual(c[0]['path'], "/bigendians")
self.assertEqual(c[0]['title'], "Big Endians")
self.assertEqual(c[1]['path'], "/littleendians")
self.assertEqual(c[1]['title'], "Little Endians")
def test_filter_content_no_filter(self):
renderer = testing.registerDummyRenderer(
'templates/admin/delete_content.pt'
)
self.search.add_result([
self.site['bigendians'],
self.site['littleendians'],
])
self.search.add_result([
self.site['bigendians'],
self.site['littleendians'],
])
request = testing.DummyRequest(
params=dict(
filter_content=1,
)
)
response = self.fut(self.site, request)
c = renderer.filtered_content
self.assertEqual(len(c), 0)
def test_filter_content_by_title(self):
renderer = testing.registerDummyRenderer(
'templates/admin/delete_content.pt'
)
self.search.add_result([
self.site['bigendians'],
self.site['littleendians'],
])
self.search.add_result([
self.site['bigendians'],
self.site['littleendians'],
])
request = testing.DummyRequest(
params=dict(
filter_content=1,
title_contains="Little",
)
)
response = self.fut(self.site, request)
c = renderer.filtered_content
self.assertEqual(len(c), 1)
self.assertEqual(c[0]['path'], "/littleendians")
self.assertEqual(c[0]['url'], "http://example.com/littleendians/")
self.assertEqual(c[0]['title'], "Little Endians")
self.assertEqual(c[0]['modified'], "12/26/2009 03:31")
self.assertEqual(c[0]['creator_name'], "Bill Haverchuck")
self.assertEqual(c[0]['creator_url'],
"http://example.com/profiles/geekbill/")
self.failUnless(self.site['littleendians'].deactivated)
def test_filter_content_by_community(self):
renderer = testing.registerDummyRenderer(
'templates/admin/delete_content.pt'
)
self.search.add_result([
self.site['bigendians'],
self.site['littleendians'],
])
self.search.add_result([
self.site['bigendians'],
self.site['littleendians'],
])
request = testing.DummyRequest(
params=dict(
filter_content=1,
community='/bigendians',
)
)
response = self.fut(self.site, request)
self.assertEqual(len(self.search.calls), 2)
self.assertEqual(self.search.calls[0]['path'], '/bigendians')
def test_delete_one_item(self):
from webob.multidict import MultiDict
request = testing.DummyRequest(
params=MultiDict([
('delete_content', '1'),
('selected_content', '/bigendians'),
]),
view_name='delete_content.html',
)
self.failUnless('bigendians' in self.site)
response = self.fut(self.site, request)
self.assertEqual(
response.location,
"http://example.com/delete_content.html"
"?status_message=Deleted+one+content+item."
)
self.failIf('bigendians' in self.site)
self.failUnless('littleendians' in self.site)
def test_delete_two_items(self):
from webob.multidict import MultiDict
request = testing.DummyRequest(
params=MultiDict([
('delete_content', '1'),
('selected_content', '/bigendians'),
('selected_content', '/littleendians'),
]),
view_name='delete_content.html',
)
self.failUnless('bigendians' in self.site)
self.failUnless('littleendians' in self.site)
response = self.fut(self.site, request)
self.assertEqual(
response.location,
"http://example.com/delete_content.html"
"?status_message=Deleted+2+content+items."
)
self.failIf('bigendians' in self.site)
self.failIf('littleendians' in self.site)
class TestMoveContentView(unittest.TestCase):
def setUp(self):
cleanUp()
from karl.models.interfaces import ICommunity
from zope.interface import directlyProvides
from datetime import datetime
site = DummyModel()
self.site = site
profiles = site['profiles'] = DummyModel()
site['bigendians'] = c = DummyModel()
directlyProvides(c, ICommunity)
c.title = 'Big Endians'
c.modified = datetime(2009, 12, 23, 12, 31)
c.creator = 'chucknorris'
blog = c['blog'] = DummyModel()
entry = blog['entry1'] = DummyModel()
entry.title = 'Blog Post 1'
entry.modified = datetime(2009, 12, 23, 12, 31)
entry.creator = 'chucknorris'
entry = blog['entry2'] = DummyModel()
entry.title = 'Blog Post 2'
entry.modified = datetime(2009, 12, 23, 13, 31)
entry.creator = 'chucknorris'
site['littleendians'] = c = DummyModel()
directlyProvides(c, ICommunity)
c.title = 'Little Endians'
c.modified = datetime(2009, 12, 26, 3, 31)
c.creator = 'geekbill'
c['blog'] = DummyModel()
p = profiles['chucknorris'] = karltesting.DummyProfile()
p.title = 'Chuck Norris'
p = profiles['geekbill'] = karltesting.DummyProfile()
p.title = 'Bill Haverchuck'
from karl.models.interfaces import ICatalogSearch
from zope.interface import Interface
search = DummyCatalogSearch()
def dummy_search_factory(context):
return search
karltesting.registerAdapter(dummy_search_factory, Interface,
ICatalogSearch)
self.search = search
from karl.views.admin import move_content_view
self.fut = move_content_view
def tearDown(self):
cleanUp()
def test_render_form(self):
request = testing.DummyRequest()
renderer = testing.registerDummyRenderer(
'templates/admin/move_content.pt'
)
self.search.add_result([
self.site['bigendians'],
self.site['littleendians'],
])
response = self.fut(self.site, request)
self.assertEqual(renderer.filtered_content, [])
c = renderer.communities
self.assertEqual(len(c), 2)
self.assertEqual(c[0]['path'], "/bigendians")
self.assertEqual(c[0]['title'], "Big Endians")
self.assertEqual(c[1]['path'], "/littleendians")
self.assertEqual(c[1]['title'], "Little Endians")
def test_filter_content_no_filter(self):
renderer = testing.registerDummyRenderer(
'templates/admin/move_content.pt'
)
self.search.add_result([
self.site['bigendians'],
self.site['littleendians'],
])
self.search.add_result([
self.site['bigendians'],
self.site['littleendians'],
])
request = testing.DummyRequest(
params=dict(
filter_content=1,
)
)
response = self.fut(self.site, request)
c = renderer.filtered_content
self.assertEqual(len(c), 0)
def test_move_one_item(self):
from webob.multidict import MultiDict
request = testing.DummyRequest(
params=MultiDict([
('move_content', '1'),
('selected_content', '/bigendians/blog/entry1'),
('to_community', '/littleendians'),
]),
view_name='move_content.html',
)
src_blog = self.site['bigendians']['blog']
dst_blog = self.site['littleendians']['blog']
self.failUnless('entry1' in src_blog)
self.failIf('entry1' in dst_blog)
response = self.fut(self.site, request)
self.assertEqual(
response.location,
"http://example.com/move_content.html"
"?status_message=Moved+one+content+item."
)
self.failIf('entry1' in src_blog)
self.failUnless('entry1' in dst_blog)
self.failUnless('entry2' in src_blog)
self.failIf('entry2' in dst_blog)
def test_error_no_to_community(self):
renderer = testing.registerDummyRenderer(
'templates/admin/move_content.pt'
)
from webob.multidict import MultiDict
request = testing.DummyRequest(
params=MultiDict([
('move_content', '1'),
('selected_content', '/bigendians/blog/entry1'),
]),
view_name='move_content.html',
)
response = self.fut(self.site, request)
self.assertEqual(response.status_int, 200)
self.assertEqual(renderer.api.error_message,
'Please specify destination community.')
def test_move_two_items(self):
from webob.multidict import MultiDict
request = testing.DummyRequest(
params=MultiDict([
('move_content', '1'),
('selected_content', '/bigendians/blog/entry1'),
('selected_content', '/bigendians/blog/entry2'),
('to_community', '/littleendians'),
]),
view_name='move_content.html',
)
src_blog = self.site['bigendians']['blog']
dst_blog = self.site['littleendians']['blog']
self.failUnless('entry1' in src_blog)
self.failIf('entry1' in dst_blog)
self.failUnless('entry2' in src_blog)
self.failIf('entry2' in dst_blog)
response = self.fut(self.site, request)
self.assertEqual(
response.location,
"http://example.com/move_content.html"
"?status_message=Moved+2+content+items."
)
self.failIf('entry1' in src_blog)
self.failUnless('entry1' in dst_blog)
self.failIf('entry2' in src_blog)
self.failUnless('entry2' in dst_blog)
def test_move_bad_destination(self):
from webob.multidict import MultiDict
request = testing.DummyRequest(
params=MultiDict([
('move_content', '1'),
('selected_content', '/bigendians/blog/entry1'),
('to_community', '/littleendians'),
]),
view_name='move_content.html',
)
renderer = testing.registerDummyRenderer(
'templates/admin/move_content.pt'
)
del self.site['littleendians']['blog']
self.fut(self.site, request)
self.assertEqual(
renderer.api.error_message,
'Path does not exist in destination community: /littleendians/blog'
)
class TestSiteAnnouncementView(unittest.TestCase):
def setUp(self):
cleanUp()
site = self.site = DummyModel()
def tearDown(self):
cleanUp()
def test_render(self):
from karl.views.admin import site_announcement_view
request = testing.DummyRequest()
renderer = testing.registerDummyRenderer(
'templates/admin/site_announcement.pt'
)
response = site_announcement_view(self.site, request)
self.assertEqual(response.status_int, 200)
def test_set_announcement(self):
from karl.views.admin import site_announcement_view
request = testing.DummyRequest()
renderer = testing.registerDummyRenderer(
'templates/admin/site_announcement.pt'
)
request.params['submit-site-announcement'] = None
annc = '<p>This is the <i>announcement</i>.</p>'
request.params['site-announcement-input'] = annc
response = site_announcement_view(self.site, request)
self.assertEqual(self.site.site_announcement, annc[3:-4])
def test_set_announcement_drop_extra(self):
from karl.views.admin import site_announcement_view
request = testing.DummyRequest()
renderer = testing.registerDummyRenderer(
'templates/admin/site_announcement.pt'
)
request.params['submit-site-announcement'] = None
annc = '<p>This is the <i>announcement</i>.</p><p>This is dropped.</p>'
request.params['site-announcement-input'] = annc
response = site_announcement_view(self.site, request)
self.assertEqual(self.site.site_announcement, annc[3:35])
def test_remove_announcement(self):
from karl.views.admin import site_announcement_view
self.site.site_announcement = 'Foo.'
request = testing.DummyRequest()
renderer = testing.registerDummyRenderer(
'templates/admin/site_announcement.pt'
)
request.params['remove-site-announcement'] = None
response = site_announcement_view(self.site, request)
self.failIf(self.site.site_announcement)
class TestEmailUsersView(unittest.TestCase):
def setUp(self):
cleanUp()
from karl.testing import DummyMailer
from repoze.sendmail.interfaces import IMailDelivery
self.mailer = DummyMailer()
testing.registerUtility(self.mailer, IMailDelivery)
site = self.site = testing.DummyModel()
profiles = site['profiles'] = testing.DummyModel()
users = site.users = karltesting.DummyUsers()
fred = profiles['fred'] = testing.DummyModel(
title='Fred Flintstone', email='fred@example.com'
)
barney = profiles['barney'] = testing.DummyModel(
title='Barney Rubble', email='barney@example.com'
)
users._by_id = users._by_login = {
'fred': {
'groups': ['group.KarlStaff'],
},
'barney': {
'groups': ['group.KarlAdmin'],
}
}
from repoze.bfg.interfaces import ISettings
testing.registerUtility(karltesting.DummySettings(), ISettings)
from karl.models.interfaces import ICatalogSearch
from zope.interface import Interface
search = DummyCatalogSearch()
def dummy_search_factory(context):
return search
karltesting.registerAdapter(dummy_search_factory, Interface,
ICatalogSearch)
search.add_result([fred, barney])
def tearDown(self):
cleanUp()
def _make_one(self, context, request):
from karl.views.admin import EmailUsersView
return EmailUsersView(context, request)
def test_render_form(self):
request = testing.DummyRequest()
testing.registerDummySecurityPolicy('barney')
view = self._make_one(self.site, request)
renderer = testing.registerDummyRenderer(
'templates/admin/email_users.pt'
)
response = view()
self.assertEqual(response.status_int, 200)
self.assertEqual(renderer.to_groups, view.to_groups)
self.assertEqual(renderer.from_emails, [
('self', 'Barney Rubble <barney@example.com>'),
('admin', 'karl3test Administrator <admin@example.com>'),
])
def test_email_everyone(self):
from webob.multidict import MultiDict
request = testing.DummyRequest(params=MultiDict({
'from_email': 'self',
'to_group': '',
'subject': 'Exciting news!',
'text': 'Foo walked into a bar...',
'send_email': '1',
}))
testing.registerDummySecurityPolicy('barney')
view = self._make_one(self.site, request)
response = view()
self.assertEqual(response.location,
'http://example.com/admin.html'
'?status_message=Sent+message+to+2+users.')
self.assertEqual(len(self.mailer), 2)
self.assertEqual(self.mailer[0].mfrom, 'barney@example.com')
msg = self.mailer[0].msg
self.assertEqual(msg['Subject'], 'Exciting news!')
body = msg.get_payload(decode=True)
self.failUnless('Foo walked into a bar' in body, body)
def test_email_staff(self):
from webob.multidict import MultiDict
request = testing.DummyRequest(params=MultiDict({
'from_email': 'admin',
'to_group': 'group.KarlStaff',
'subject': 'Exciting news!',
'text': 'Foo walked into a bar...',
'send_email': '1',
}))
testing.registerDummySecurityPolicy('barney')
view = self._make_one(self.site, request)
response = view()
self.assertEqual(response.location,
'http://example.com/admin.html'
'?status_message=Sent+message+to+1+users.')
self.assertEqual(len(self.mailer), 1)
self.assertEqual(self.mailer[0].mfrom, 'admin@example.com')
msg = self.mailer[0].msg
self.assertEqual(msg['Subject'], 'Exciting news!')
self.assertEqual(msg['From'],
'karl3test Administrator <admin@example.com>')
self.assertEqual(msg['To'], 'Fred Flintstone <fred@example.com>')
body = msg.get_payload(decode=True)
self.failUnless('Foo walked into a bar' in body, body)
class TestSyslogView(unittest.TestCase):
def setUp(self):
cleanUp()
import os
import sys
here = os.path.abspath(os.path.dirname(sys.modules[__name__].__file__))
from repoze.bfg.interfaces import ISettings
self.settings = settings = karltesting.DummySettings(
syslog_view=os.path.join(here, 'test.log'),
syslog_view_instances=['org1', 'org2'],
)
testing.registerUtility(settings, ISettings)
from karl.views.admin import syslog_view
self.fut = syslog_view
def tearDown(self):
cleanUp()
def test_no_filter(self):
request = testing.DummyRequest()
renderer = testing.registerDummyRenderer('templates/admin/syslog.pt')
response = self.fut(None, request)
self.assertEqual(len(renderer.entries), 4)
self.assertEqual(renderer.instance, '_any')
self.failUnless(renderer.entries[0].startswith('Dec 26 11:15:23'))
def test_filter_any(self):
request = testing.DummyRequest(params={
'instance': '_any',
})
renderer = testing.registerDummyRenderer('templates/admin/syslog.pt')
response = self.fut(None, request)
self.assertEqual(len(renderer.entries), 4)
self.assertEqual(renderer.instances, ['org1', 'org2'])
self.assertEqual(renderer.instance, '_any')
self.failUnless(renderer.entries[0].startswith('Dec 26 11:15:23'))
def test_filter_org1(self):
request = testing.DummyRequest(params={
'instance': 'org1',
})
renderer = testing.registerDummyRenderer('templates/admin/syslog.pt')
response = self.fut(None, request)
self.assertEqual(len(renderer.entries), 2)
self.assertEqual(renderer.instances, ['org1', 'org2'])
self.assertEqual(renderer.instance, 'org1')
self.failUnless(renderer.entries[0].startswith('Dec 26 11:14:23'))
def test_single_digit_day_with_leading_space(self):
self.settings['syslog_view_instances'] = ['org1', 'org2', 'org3']
request = testing.DummyRequest(params={
'instance': 'org3',
})
renderer = testing.registerDummyRenderer('templates/admin/syslog.pt')
response = self.fut(None, request)
self.assertEqual(len(renderer.entries), 1)
self.assertEqual(renderer.instances, ['org1', 'org2', 'org3'])
self.assertEqual(renderer.instance, 'org3')
self.failUnless(renderer.entries[0].startswith('Feb 2 11:15:23'))
class TestLogsView(unittest.TestCase):
def setUp(self):
cleanUp()
import os
import sys
here = os.path.abspath(os.path.dirname(sys.modules[__name__].__file__))
from repoze.bfg.interfaces import ISettings
self.logs = [os.path.join(here, 'test.log'),
os.path.join(here, 'test_admin.py')]
settings = karltesting.DummySettings(
logs_view=self.logs
)
testing.registerUtility(settings, ISettings)
from karl.views.admin import logs_view
self.fut = logs_view
def tearDown(self):
cleanUp()
def test_no_log(self):
request = testing.DummyRequest()
renderer = testing.registerDummyRenderer('templates/admin/log.pt')
response = self.fut(None, request)
self.assertEqual(len(renderer.logs), 2)
self.assertEqual(renderer.log, None)
self.assertEqual(len(renderer.lines), 0)
def test_view_log(self):
request = testing.DummyRequest(params={
'log': self.logs[0]
})
renderer = testing.registerDummyRenderer('templates/admin/log.pt')
response = self.fut(None, request)
self.assertEqual(renderer.logs, self.logs)
self.assertEqual(renderer.log, self.logs[0])
self.assertEqual(len(renderer.lines), 6)
def test_one_log(self):
del self.logs[1]
request = testing.DummyRequest()
renderer = testing.registerDummyRenderer('templates/admin/log.pt')
response = self.fut(None, request)
self.assertEqual(len(renderer.logs), 1)
self.assertEqual(renderer.log, self.logs[0])
self.assertEqual(len(renderer.lines), 6)
def test_protect_arbitrary_files(self):
request = testing.DummyRequest(params={
'log': self.logs[0]
})
self.logs[0] = 'foo'
renderer = testing.registerDummyRenderer('templates/admin/log.pt')
response = self.fut(None, request)
self.assertEqual(renderer.log, None)
self.assertEqual(len(renderer.lines), 0)
class TestUploadUsersView(unittest.TestCase):
def setUp(self):
cleanUp()
site = testing.DummyModel()
profiles = site['profiles'] = testing.DummyModel()
encrypt = lambda x: 'sha:' + x
users = site.users = karltesting.DummyUsers(encrypt=encrypt)
self.site = site
self.renderer = testing.registerDummyRenderer(
'templates/admin/upload_users_csv.pt'
)
from repoze.lemonade.testing import registerContentFactory
from karl.models.interfaces import IProfile
registerContentFactory(testing.DummyModel, IProfile)
from repoze.workflow.testing import registerDummyWorkflow
registerDummyWorkflow('security')
def tearDown(self):
cleanUp()
def _call_fut(self, context, request):
from karl.views.admin import UploadUsersView
return UploadUsersView(context, request)()
def _file_upload(self, fname):
import os, sys
here = os.path.dirname(sys.modules[__name__].__file__)
return DummyUpload(None, path=os.path.join(here, fname))
def test_render_form(self):
request = testing.DummyRequest()
response = self._call_fut(self.site, request)
self.assertEqual(response.status_int, 200)
self.failUnless(hasattr(self.renderer, 'menu'))
self.failUnless(hasattr(self.renderer, 'required_fields'))
self.failUnless(hasattr(self.renderer, 'allowed_fields'))
def test_submit_ok(self):
request = testing.DummyRequest({
'csv': self._file_upload('test_users1.csv'),
})
response = self._call_fut(self.site, request)
self.assertEqual(response.status_int, 200)
api = self.renderer.api
self.assertEqual(api.error_message, None)
self.assertEqual(api.status_message, "Created 2 users.")
users = self.site.users
self.assertEqual(users.get_by_id('user1'), {
'id': 'user1',
'login': 'hello sir',
'password': 'sha:pass1234',
'groups': set(['group.KarlAdmin', 'group.community.moderator']),
})
self.assertEqual(users.get_by_id('user2'), {
'id': 'user2',
'login': 'user2',
'password': 'sha:pass1234',
'groups': set(),
})
profile = self.site['profiles']['user1']
self.assertEqual(profile.email, 'user1@example.com')
self.assertEqual(profile.firstname, 'User')
self.assertEqual(profile.lastname, 'One')
self.assertEqual(profile.phone, '(212) 555-1212')
self.assertEqual(profile.extension, 'x33')
self.assertEqual(profile.department, 'Homeland Ambiguity')
self.assertEqual(profile.position, 'High Sheriff')
self.assertEqual(profile.organization, 'Not much')
self.assertEqual(profile.location, 'Down yonder')
self.assertEqual(profile.country, 'US and A')
self.assertEqual(profile.website, 'http://example.com')
self.assertEqual(profile.languages, 'Turkish, Ebonics')
self.assertEqual(profile.office, '1234')
self.assertEqual(profile.room_no, '12')
self.assertEqual(profile.biography, 'Born. Not dead yet.')
self.assertEqual(profile.home_path, '/offices/ha')
profile = self.site['profiles']['user2']
self.assertEqual(profile.email, 'user2@example.com')
self.assertEqual(profile.firstname, 'User')
self.assertEqual(profile.lastname, 'Two')
self.assertEqual(profile.phone, '(212) 555-1212')
self.assertEqual(profile.extension, 'x34')
self.assertEqual(profile.department, 'Homeland Ambiguity')
self.assertEqual(profile.position, 'High Sheriff')
self.assertEqual(profile.organization, 'A little')
self.assertEqual(profile.location, 'Over there')
self.assertEqual(profile.country, 'US and A')
self.assertEqual(profile.website, 'http://example.com')
self.assertEqual(profile.languages, 'Magyar, Klingon')
self.assertEqual(profile.office, '4321')
self.assertEqual(profile.room_no, '21')
self.assertEqual(profile.biography, '')
self.assertEqual(profile.home_path, '')
def test_password_encrypted(self):
request = testing.DummyRequest({
'csv': DummyUpload(
'"username","firstname","lastname","email","sha_password"\n'
'"user1","User","One","test@example.com","pass1234"\n'
),
})
response = self._call_fut(self.site, request)
api = self.renderer.api
self.assertEqual(api.error_message, None)
self.assertEqual(api.status_message, "Created 1 users.")
users = self.site.users
self.assertEqual(users.get_by_id('user1')['password'], 'pass1234')
def test_row_too_long(self):
request = testing.DummyRequest({
'csv': DummyUpload(
'"username","firstname","lastname","email","sha_password"\n'
'"user1","User","One","test@example.com","pass1234","foo"\n'
),
})
response = self._call_fut(self.site, request)
api = self.renderer.api
self.assertEqual(api.error_message,
'Malformed CSV: line 2 does not match header.')
self.assertEqual(api.status_message, None)
def test_row_too_short(self):
request = testing.DummyRequest({
'csv': DummyUpload(
'"username","firstname","lastname","email","sha_password"\n'
'"user1","User","One","test@example.com"\n'
),
})
response = self._call_fut(self.site, request)
api = self.renderer.api
self.assertEqual(api.error_message,
'Malformed CSV: line 2 does not match header.')
self.assertEqual(api.status_message, None)
def test_test_unknown_field(self):
request = testing.DummyRequest({
'csv': DummyUpload(
'"username","firstname","lastname","email","sha_password","wut"\n'
'"user1","User","One","test@example.com","pass1234","wut"\n'
),
})
response = self._call_fut(self.site, request)
api = self.renderer.api
self.assertEqual(api.error_message,
'Unrecognized field: wut')
self.assertEqual(api.status_message, None)
def test_missing_required_field(self):
request = testing.DummyRequest({
'csv': DummyUpload(
'"username","lastname","email","sha_password"\n'
'"user1","One","test@example.com","pass1234"\n'
),
})
response = self._call_fut(self.site, request)
api = self.renderer.api
self.assertEqual(api.error_message,
'Missing required field: firstname')
self.assertEqual(api.status_message, None)
def test_missing_password_field(self):
request = testing.DummyRequest({
'csv': DummyUpload(
'"username","firstname","lastname","email"\n'
'"user1","User","One","test@example.com"\n'
),
})
response = self._call_fut(self.site, request)
api = self.renderer.api
self.assertEqual(api.error_message,
'Must supply either password or sha_password field.')
self.assertEqual(api.status_message, None)
def test_skip_existing_user_in_users(self):
self.site.users.add('user1', 'user1', 'password', set())
request = testing.DummyRequest({
'csv': DummyUpload(
'"username","firstname","lastname","email","password"\n'
'"user1","User","One","test@example.com","pass1234"\n'
'"user2","User","Two","test@example.com","pass1234"\n'
),
})
response = self._call_fut(self.site, request)
api = self.renderer.api
self.assertEqual(api.error_message, None)
self.assertEqual(api.status_message,
'Skipping user: user1: User already exists.\n'
'Created 1 users.')
def test_skip_existing_user_in_profiles(self):
self.site['profiles']['user1'] = testing.DummyModel()
request = testing.DummyRequest({
'csv': DummyUpload(
'"username","firstname","lastname","email","password"\n'
'"user1","User","One","test@example.com","pass1234"\n'
'"user2","User","Two","test@example.com","pass1234"\n'
),
})
response = self._call_fut(self.site, request)
api = self.renderer.api
self.assertEqual(api.error_message, None)
self.assertEqual(api.status_message,
'Skipping user: user1: User already exists.\n'
'Created 1 users.')
def test_skip_existing_user_by_login(self):
self.site.users.add('foo', 'user1', 'password', set())
request = testing.DummyRequest({
'csv': DummyUpload(
'"username","firstname","lastname","email","password"\n'
'"user1","User","One","test@example.com","pass1234"\n'
'"user2","User","Two","test@example.com","pass1234"\n'
),
})
response = self._call_fut(self.site, request)
api = self.renderer.api
self.assertEqual(api.error_message, None)
self.assertEqual(api.status_message,
'Skipping user: user1: User already exists with login: user1\n'
'Created 1 users.')
class TestErrorMonitorView(unittest.TestCase):
def setUp(self):
cleanUp()
import tempfile
self.tmpdir = tmpdir = tempfile.mkdtemp('karl_test')
self.site = testing.DummyModel()
from repoze.bfg.interfaces import ISettings
settings = karltesting.DummySettings(
error_monitor_dir=tmpdir,
error_monitor_subsystems=["blonde", "red", "head"],
)
testing.registerUtility(settings, ISettings)
def log_error(self, subsystem, message):
import os
with open(os.path.join(self.tmpdir, subsystem), 'ab') as f:
print >>f, 'ENTRY'
print >>f, message
def tearDown(self):
cleanUp()
import shutil
shutil.rmtree(self.tmpdir)
def call_fut(self):
from karl.views.admin import error_monitor_view
request = testing.DummyRequest()
return error_monitor_view(self.site, request)
def test_all_ok(self):
renderer = testing.registerDummyRenderer(
'templates/admin/error_monitor.pt'
)
self.call_fut()
self.assertEqual(renderer.subsystems, ["blonde", "red", "head"])
self.assertEqual(renderer.states,
{"blonde": [], "red": [], "head": []})
self.assertEqual(renderer.urls['blonde'],
"http://example.com/error_monitor_subsystem.html?subsystem=blonde")
def test_bad_head(self):
renderer = testing.registerDummyRenderer(
'templates/admin/error_monitor.pt'
)
self.log_error('head', 'Testing...')
self.call_fut()
self.assertEqual(renderer.states,
{"blonde": [], "red": [], "head": ['Testing...']})
class TestErrorMonitorSubsystemView(unittest.TestCase):
def setUp(self):
cleanUp()
import tempfile
self.tmpdir = tmpdir = tempfile.mkdtemp('karl_test')
self.site = testing.DummyModel()
from repoze.bfg.interfaces import ISettings
settings = karltesting.DummySettings(
error_monitor_dir=tmpdir,
error_monitor_subsystems=["blonde", "red", "head"],
)
testing.registerUtility(settings, ISettings)
def log_error(self, subsystem, message):
import os
with open(os.path.join(self.tmpdir, subsystem), 'ab') as f:
print >>f, 'ENTRY'
print >>f, message
def tearDown(self):
cleanUp()
import shutil
shutil.rmtree(self.tmpdir)
def call_fut(self, subsystem=None):
from karl.views.admin import error_monitor_subsystem_view
request = testing.DummyRequest(params={})
if subsystem is not None:
request.params['subsystem'] = subsystem
return error_monitor_subsystem_view(self.site, request)
def test_no_subsystem(self):
renderer = testing.registerDummyRenderer(
'templates/admin/error_monitor_subsystem.pt'
)
from repoze.bfg.exceptions import NotFound
self.assertRaises(NotFound, self.call_fut)
def test_no_errors(self):
renderer = testing.registerDummyRenderer(
'templates/admin/error_monitor_subsystem.pt'
)
self.call_fut('red')
self.assertEqual(renderer.entries, [])
def test_bad_head(self):
renderer = testing.registerDummyRenderer(
'templates/admin/error_monitor_subsystem.pt'
)
self.log_error('head', u'fo\xf2'.encode('utf-8'))
self.log_error('head', 'bar')
self.call_fut('head')
self.assertEqual(renderer.entries, [u'fo\xf2', 'bar'])
def test_bad_head_bad_characters(self):
renderer = testing.registerDummyRenderer(
'templates/admin/error_monitor_subsystem.pt'
)
self.log_error('head', 'fo\x92')
self.log_error('head', 'bar')
self.call_fut('head')
self.assertEqual(renderer.entries, [u'fo\x92', 'bar'])
class TestErrorMonitorStatusView(unittest.TestCase):
def setUp(self):
cleanUp()
import tempfile
self.tmpdir = tmpdir = tempfile.mkdtemp('karl_test')
self.site = testing.DummyModel()
from repoze.bfg.interfaces import ISettings
settings = karltesting.DummySettings(
error_monitor_dir=tmpdir,
error_monitor_subsystems=["blonde", "red", "head"],
)
testing.registerUtility(settings, ISettings)
def log_error(self, subsystem, message):
import os
with open(os.path.join(self.tmpdir, subsystem), 'ab') as f:
print >>f, 'ENTRY'
print >>f, message
def tearDown(self):
cleanUp()
import shutil
shutil.rmtree(self.tmpdir)
def call_fut(self):
from karl.views.admin import error_monitor_status_view
request = testing.DummyRequest()
return error_monitor_status_view(self.site, request)
def test_all_ok(self):
body = self.call_fut().body
self.assertEqual(body, "blonde: OK\n"
"red: OK\n"
"head: OK\n")
def test_bad_head(self):
self.log_error('head', 'foo')
body = self.call_fut().body
self.assertEqual(body, "blonde: OK\n"
"red: OK\n"
"head: ERROR\n")
class DummyCatalogSearch(object):
def __init__(self):
self._results = []
self.calls = []
def add_result(self, result):
self._results.append(result)
def __call__(self, **kw):
self.calls.append(kw)
if self._results:
result = self._results.pop(0)
count = len(result)
def resolver(i):
return result[i]
return count, xrange(count), resolver
return 0, (), None
class DummyModel(testing.DummyModel):
deactivated = False
def _p_deactivate(self):
self.deactivated = True
class DummyUpload(object):
type = 'text/plain'
filename = 'users.csv'
def __init__(self, data, path=None):
if path is not None:
self.file = open(path, "rb")
else:
from cStringIO import StringIO
self.file = StringIO(data)
|
boothead/karl
|
karl/views/tests/test_admin.py
|
Python
|
gpl-2.0
| 40,372
|
[
"exciting"
] |
c511520e24476b8a6c09717b0f00f717a54548fa7741e560102d27cc9e05f7ea
|
# Copyright: (c) 2013, James Cammarata <jcammarata@ansible.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
import re
import shutil
import textwrap
import time
import yaml
from jinja2 import BaseLoader, Environment, FileSystemLoader
from yaml.error import YAMLError
import ansible.constants as C
from ansible import context
from ansible.cli import CLI
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy, get_collections_galaxy_meta_info
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.collection import build_collection, install_collections, publish_collection, \
validate_collection_name
from ansible.galaxy.login import GalaxyLogin
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.token import GalaxyToken, NoTokenSentinel
from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.playbook.role.requirement import RoleRequirement
from ansible.utils.display import Display
from ansible.utils.plugin_docs import get_versioned_doclink
display = Display()
class GalaxyCLI(CLI):
'''command to manage Ansible roles in shared repositories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
def __init__(self, args):
# Inject role into sys.argv[1] as a backwards compatibility step
if len(args) > 1 and args[1] not in ['-h', '--help'] and 'role' not in args and 'collection' not in args:
# TODO: Should we add a warning here and eventually deprecate the implicit role subcommand choice
# Remove this in Ansible 2.13 when we also remove -v as an option on the root parser for ansible-galaxy.
idx = 2 if args[1].startswith('-v') else 1
args.insert(idx, 'role')
self.api_servers = []
self.galaxy = None
super(GalaxyCLI, self).__init__(args)
def init_parser(self):
''' create an options parser for bin/ansible '''
super(GalaxyCLI, self).init_parser(
desc="Perform various Role and Collection related operations.",
)
# Common arguments that apply to more than 1 action
common = opt_help.argparse.ArgumentParser(add_help=False)
common.add_argument('-s', '--server', dest='api_server', help='The Galaxy API server URL')
common.add_argument('--api-key', dest='api_key',
help='The Ansible Galaxy API key which can be found at '
'https://galaxy.ansible.com/me/preferences. You can also use ansible-galaxy login to '
'retrieve this key or set the token for the GALAXY_SERVER_LIST entry.')
common.add_argument('-c', '--ignore-certs', action='store_true', dest='ignore_certs',
default=C.GALAXY_IGNORE_CERTS, help='Ignore SSL certificate validation errors.')
opt_help.add_verbosity_options(common)
force = opt_help.argparse.ArgumentParser(add_help=False)
force.add_argument('-f', '--force', dest='force', action='store_true', default=False,
help='Force overwriting an existing role or collection')
github = opt_help.argparse.ArgumentParser(add_help=False)
github.add_argument('github_user', help='GitHub username')
github.add_argument('github_repo', help='GitHub repository')
offline = opt_help.argparse.ArgumentParser(add_help=False)
offline.add_argument('--offline', dest='offline', default=False, action='store_true',
help="Don't query the galaxy API when creating roles")
default_roles_path = C.config.get_configuration_definition('DEFAULT_ROLES_PATH').get('default', '')
roles_path = opt_help.argparse.ArgumentParser(add_help=False)
roles_path.add_argument('-p', '--roles-path', dest='roles_path', type=opt_help.unfrack_path(pathsep=True),
default=C.DEFAULT_ROLES_PATH, action=opt_help.PrependListAction,
help='The path to the directory containing your roles. The default is the first '
'writable one configured via DEFAULT_ROLES_PATH: %s ' % default_roles_path)
# Add sub parser for the Galaxy role type (role or collection)
type_parser = self.parser.add_subparsers(metavar='TYPE', dest='type')
type_parser.required = True
# Add sub parser for the Galaxy collection actions
collection = type_parser.add_parser('collection', help='Manage an Ansible Galaxy collection.')
collection_parser = collection.add_subparsers(metavar='COLLECTION_ACTION', dest='action')
collection_parser.required = True
self.add_init_options(collection_parser, parents=[common, force])
self.add_build_options(collection_parser, parents=[common, force])
self.add_publish_options(collection_parser, parents=[common])
self.add_install_options(collection_parser, parents=[common, force])
# Add sub parser for the Galaxy role actions
role = type_parser.add_parser('role', help='Manage an Ansible Galaxy role.')
role_parser = role.add_subparsers(metavar='ROLE_ACTION', dest='action')
role_parser.required = True
self.add_init_options(role_parser, parents=[common, force, offline])
self.add_remove_options(role_parser, parents=[common, roles_path])
self.add_delete_options(role_parser, parents=[common, github])
self.add_list_options(role_parser, parents=[common, roles_path])
self.add_search_options(role_parser, parents=[common])
self.add_import_options(role_parser, parents=[common, github])
self.add_setup_options(role_parser, parents=[common, roles_path])
self.add_login_options(role_parser, parents=[common])
self.add_info_options(role_parser, parents=[common, roles_path, offline])
self.add_install_options(role_parser, parents=[common, force, roles_path])
def add_init_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
init_parser = parser.add_parser('init', parents=parents,
help='Initialize new {0} with the base structure of a '
'{0}.'.format(galaxy_type))
init_parser.set_defaults(func=self.execute_init)
init_parser.add_argument('--init-path', dest='init_path', default='./',
help='The path in which the skeleton {0} will be created. The default is the '
'current working directory.'.format(galaxy_type))
init_parser.add_argument('--{0}-skeleton'.format(galaxy_type), dest='{0}_skeleton'.format(galaxy_type),
default=C.GALAXY_ROLE_SKELETON,
help='The path to a {0} skeleton that the new {0} should be based '
'upon.'.format(galaxy_type))
obj_name_kwargs = {}
if galaxy_type == 'collection':
obj_name_kwargs['type'] = validate_collection_name
init_parser.add_argument('{0}_name'.format(galaxy_type), help='{0} name'.format(galaxy_type.capitalize()),
**obj_name_kwargs)
if galaxy_type == 'role':
init_parser.add_argument('--type', dest='role_type', action='store', default='default',
help="Initialize using an alternate role type. Valid types include: 'container', "
"'apb' and 'network'.")
def add_remove_options(self, parser, parents=None):
remove_parser = parser.add_parser('remove', parents=parents, help='Delete roles from roles_path.')
remove_parser.set_defaults(func=self.execute_remove)
remove_parser.add_argument('args', help='Role(s)', metavar='role', nargs='+')
def add_delete_options(self, parser, parents=None):
delete_parser = parser.add_parser('delete', parents=parents,
help='Removes the role from Galaxy. It does not remove or alter the actual '
'GitHub repository.')
delete_parser.set_defaults(func=self.execute_delete)
def add_list_options(self, parser, parents=None):
list_parser = parser.add_parser('list', parents=parents,
help='Show the name and version of each role installed in the roles_path.')
list_parser.set_defaults(func=self.execute_list)
list_parser.add_argument('role', help='Role', nargs='?', metavar='role')
def add_search_options(self, parser, parents=None):
search_parser = parser.add_parser('search', parents=parents,
help='Search the Galaxy database by tags, platforms, author and multiple '
'keywords.')
search_parser.set_defaults(func=self.execute_search)
search_parser.add_argument('--platforms', dest='platforms', help='list of OS platforms to filter by')
search_parser.add_argument('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
search_parser.add_argument('--author', dest='author', help='GitHub username')
search_parser.add_argument('args', help='Search terms', metavar='searchterm', nargs='*')
def add_import_options(self, parser, parents=None):
import_parser = parser.add_parser('import', parents=parents, help='Import a role')
import_parser.set_defaults(func=self.execute_import)
import_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import results.")
import_parser.add_argument('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch '
'(usually master)')
import_parser.add_argument('--role-name', dest='role_name',
help='The name the role should have, if different than the repo name')
import_parser.add_argument('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_'
'user/github_repo.')
def add_setup_options(self, parser, parents=None):
setup_parser = parser.add_parser('setup', parents=parents,
help='Manage the integration between Galaxy and the given source.')
setup_parser.set_defaults(func=self.execute_setup)
setup_parser.add_argument('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see '
'ID values.')
setup_parser.add_argument('--list', dest="setup_list", action='store_true', default=False,
help='List all of your integrations.')
setup_parser.add_argument('source', help='Source')
setup_parser.add_argument('github_user', help='GitHub username')
setup_parser.add_argument('github_repo', help='GitHub repository')
setup_parser.add_argument('secret', help='Secret')
def add_login_options(self, parser, parents=None):
login_parser = parser.add_parser('login', parents=parents,
help="Login to api.github.com server in order to use ansible-galaxy role sub "
"command such as 'import', 'delete', 'publish', and 'setup'")
login_parser.set_defaults(func=self.execute_login)
login_parser.add_argument('--github-token', dest='token', default=None,
help='Identify with github token rather than username and password.')
def add_info_options(self, parser, parents=None):
info_parser = parser.add_parser('info', parents=parents, help='View more details about a specific role.')
info_parser.set_defaults(func=self.execute_info)
info_parser.add_argument('args', nargs='+', help='role', metavar='role_name[,version]')
def add_install_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
args_kwargs = {}
if galaxy_type == 'collection':
args_kwargs['help'] = 'The collection(s) name or path/url to a tar.gz collection artifact. This is ' \
'mutually exclusive with --requirements-file.'
ignore_errors_help = 'Ignore errors during installation and continue with the next specified ' \
'collection. This will not ignore dependency conflict errors.'
else:
args_kwargs['help'] = 'Role name, URL or tar file'
ignore_errors_help = 'Ignore errors and continue with the next specified role.'
install_parser = parser.add_parser('install', parents=parents,
help='Install {0}(s) from file(s), URL(s) or Ansible '
'Galaxy'.format(galaxy_type))
install_parser.set_defaults(func=self.execute_install)
install_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', **args_kwargs)
install_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help=ignore_errors_help)
install_exclusive = install_parser.add_mutually_exclusive_group()
install_exclusive.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download {0}s listed as dependencies.".format(galaxy_type))
install_exclusive.add_argument('--force-with-deps', dest='force_with_deps', action='store_true', default=False,
help="Force overwriting an existing {0} and its "
"dependencies.".format(galaxy_type))
if galaxy_type == 'collection':
install_parser.add_argument('-p', '--collections-path', dest='collections_path', required=True,
help='The path to the directory containing your collections.')
install_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be installed.')
else:
install_parser.add_argument('-r', '--role-file', dest='role_file',
help='A file containing a list of roles to be imported.')
install_parser.add_argument('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
default=False,
help='Use tar instead of the scm archive option when packaging the role.')
def add_build_options(self, parser, parents=None):
build_parser = parser.add_parser('build', parents=parents,
help='Build an Ansible collection artifact that can be publish to Ansible '
'Galaxy.')
build_parser.set_defaults(func=self.execute_build)
build_parser.add_argument('args', metavar='collection', nargs='*', default=('.',),
help='Path to the collection(s) directory to build. This should be the directory '
'that contains the galaxy.yml file. The default is the current working '
'directory.')
build_parser.add_argument('--output-path', dest='output_path', default='./',
help='The path in which the collection is built to. The default is the current '
'working directory.')
def add_publish_options(self, parser, parents=None):
publish_parser = parser.add_parser('publish', parents=parents,
help='Publish a collection artifact to Ansible Galaxy.')
publish_parser.set_defaults(func=self.execute_publish)
publish_parser.add_argument('args', metavar='collection_path',
help='The path to the collection tarball to publish.')
publish_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import validation results.")
publish_parser.add_argument('--import-timeout', dest='import_timeout', type=int, default=0,
help="The time to wait for the collection import process to finish.")
def post_process_args(self, options):
options = super(GalaxyCLI, self).post_process_args(options)
display.verbosity = options.verbosity
return options
def run(self):
super(GalaxyCLI, self).run()
self.galaxy = Galaxy()
def server_config_def(section, key, required):
return {
'description': 'The %s of the %s Galaxy server' % (key, section),
'ini': [
{
'section': 'galaxy_server.%s' % section,
'key': key,
}
],
'env': [
{'name': 'ANSIBLE_GALAXY_SERVER_%s_%s' % (section.upper(), key.upper())},
],
'required': required,
}
server_def = [('url', True), ('username', False), ('password', False), ('token', False)]
config_servers = []
for server_key in (C.GALAXY_SERVER_LIST or []):
# Config definitions are looked up dynamically based on the C.GALAXY_SERVER_LIST entry. We look up the
# section [galaxy_server.<server>] for the values url, username, password, and token.
config_dict = dict((k, server_config_def(server_key, k, req)) for k, req in server_def)
defs = AnsibleLoader(yaml.safe_dump(config_dict)).get_single_data()
C.config.initialize_plugin_configuration_definitions('galaxy_server', server_key, defs)
server_options = C.config.get_plugin_options('galaxy_server', server_key)
token_val = server_options['token'] or NoTokenSentinel
server_options['token'] = GalaxyToken(token=token_val)
config_servers.append(GalaxyAPI(self.galaxy, server_key, **server_options))
cmd_server = context.CLIARGS['api_server']
cmd_token = GalaxyToken(token=context.CLIARGS['api_key'])
if cmd_server:
# Cmd args take precedence over the config entry but fist check if the arg was a name and use that config
# entry, otherwise create a new API entry for the server specified.
config_server = next((s for s in config_servers if s.name == cmd_server), None)
if config_server:
self.api_servers.append(config_server)
else:
self.api_servers.append(GalaxyAPI(self.galaxy, 'cmd_arg', cmd_server, token=cmd_token))
else:
self.api_servers = config_servers
# Default to C.GALAXY_SERVER if no servers were defined
if len(self.api_servers) == 0:
self.api_servers.append(GalaxyAPI(self.galaxy, 'default', C.GALAXY_SERVER, token=cmd_token))
context.CLIARGS['func']()
@property
def api(self):
return self.api_servers[0]
def _parse_requirements_file(self, requirements_file, allow_old_format=True):
"""
Parses an Ansible requirement.yml file and returns all the roles and/or collections defined in it. There are 2
requirements file format:
# v1 (roles only)
- src: The source of the role, required if include is not set. Can be Galaxy role name, URL to a SCM repo or tarball.
name: Downloads the role to the specified name, defaults to Galaxy name from Galaxy or name of repo if src is a URL.
scm: If src is a URL, specify the SCM. Only git or hd are supported and defaults ot git.
version: The version of the role to download. Can also be tag, commit, or branch name and defaults to master.
include: Path to additional requirements.yml files.
# v2 (roles and collections)
---
roles:
# Same as v1 format just under the roles key
collections:
- namespace.collection
- name: namespace.collection
version: version identifier, multiple identifiers are separated by ','
source: the URL or a predefined source name that relates to C.GALAXY_SERVER_LIST
:param requirements_file: The path to the requirements file.
:param allow_old_format: Will fail if a v1 requirements file is found and this is set to False.
:return: a dict containing roles and collections to found in the requirements file.
"""
requirements = {
'roles': [],
'collections': [],
}
b_requirements_file = to_bytes(requirements_file, errors='surrogate_or_strict')
if not os.path.exists(b_requirements_file):
raise AnsibleError("The requirements file '%s' does not exist." % to_native(requirements_file))
display.vvv("Reading requirement file at '%s'" % requirements_file)
with open(b_requirements_file, 'rb') as req_obj:
try:
file_requirements = yaml.safe_load(req_obj)
except YAMLError as err:
raise AnsibleError(
"Failed to parse the requirements yml at '%s' with the following error:\n%s"
% (to_native(requirements_file), to_native(err)))
if requirements_file is None:
raise AnsibleError("No requirements found in file '%s'" % to_native(requirements_file))
def parse_role_req(requirement):
if "include" not in requirement:
role = RoleRequirement.role_yaml_parse(requirement)
display.vvv("found role %s in yaml file" % to_text(role))
if "name" not in role and "src" not in role:
raise AnsibleError("Must specify name or src for role")
return [GalaxyRole(self.galaxy, **role)]
else:
b_include_path = to_bytes(requirement["include"], errors="surrogate_or_strict")
if not os.path.isfile(b_include_path):
raise AnsibleError("Failed to find include requirements file '%s' in '%s'"
% (to_native(b_include_path), to_native(requirements_file)))
with open(b_include_path, 'rb') as f_include:
try:
return [GalaxyRole(self.galaxy, **r) for r in
(RoleRequirement.role_yaml_parse(i) for i in yaml.safe_load(f_include))]
except Exception as e:
raise AnsibleError("Unable to load data from include requirements file: %s %s"
% (to_native(requirements_file), to_native(e)))
if isinstance(file_requirements, list):
# Older format that contains only roles
if not allow_old_format:
raise AnsibleError("Expecting requirements file to be a dict with the key 'collections' that contains "
"a list of collections to install")
for role_req in file_requirements:
requirements['roles'] += parse_role_req(role_req)
else:
# Newer format with a collections and/or roles key
extra_keys = set(file_requirements.keys()).difference(set(['roles', 'collections']))
if extra_keys:
raise AnsibleError("Expecting only 'roles' and/or 'collections' as base keys in the requirements "
"file. Found: %s" % (to_native(", ".join(extra_keys))))
for role_req in file_requirements.get('roles', []):
requirements['roles'] += parse_role_req(role_req)
for collection_req in file_requirements.get('collections', []):
if isinstance(collection_req, dict):
req_name = collection_req.get('name', None)
if req_name is None:
raise AnsibleError("Collections requirement entry should contain the key name.")
req_version = collection_req.get('version', '*')
req_source = collection_req.get('source', None)
if req_source:
# Try and match up the requirement source with our list of Galaxy API servers defined in the
# config, otherwise create a server with that URL without any auth.
req_source = next(iter([a for a in self.api_servers if req_source in [a.name, a.api_server]]),
GalaxyAPI(self.galaxy, "explicit_requirement_%s" % req_name, req_source))
requirements['collections'].append((req_name, req_version, req_source))
else:
requirements['collections'].append((collection_req, '*', None))
return requirements
@staticmethod
def exit_without_ignore(rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not context.CLIARGS['ignore_errors']:
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
@staticmethod
def _display_role_info(role_info):
text = [u"", u"Role: %s" % to_text(role_info['name'])]
text.append(u"\tdescription: %s" % role_info.get('description', ''))
for k in sorted(role_info.keys()):
if k in GalaxyCLI.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in GalaxyCLI.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
return u'\n'.join(text)
@staticmethod
def _resolve_path(path):
return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
@staticmethod
def _get_skeleton_galaxy_yml(template_path, inject_data):
with open(to_bytes(template_path, errors='surrogate_or_strict'), 'rb') as template_obj:
meta_template = to_text(template_obj.read(), errors='surrogate_or_strict')
galaxy_meta = get_collections_galaxy_meta_info()
required_config = []
optional_config = []
for meta_entry in galaxy_meta:
config_list = required_config if meta_entry.get('required', False) else optional_config
value = inject_data.get(meta_entry['key'], None)
if not value:
meta_type = meta_entry.get('type', 'str')
if meta_type == 'str':
value = ''
elif meta_type == 'list':
value = []
elif meta_type == 'dict':
value = {}
meta_entry['value'] = value
config_list.append(meta_entry)
link_pattern = re.compile(r"L\(([^)]+),\s+([^)]+)\)")
const_pattern = re.compile(r"C\(([^)]+)\)")
def comment_ify(v):
if isinstance(v, list):
v = ". ".join([l.rstrip('.') for l in v])
v = link_pattern.sub(r"\1 <\2>", v)
v = const_pattern.sub(r"'\1'", v)
return textwrap.fill(v, width=117, initial_indent="# ", subsequent_indent="# ", break_on_hyphens=False)
def to_yaml(v):
return yaml.safe_dump(v, default_flow_style=False).rstrip()
env = Environment(loader=BaseLoader)
env.filters['comment_ify'] = comment_ify
env.filters['to_yaml'] = to_yaml
template = env.from_string(meta_template)
meta_value = template.render({'required_config': required_config, 'optional_config': optional_config})
return meta_value
############################
# execute actions
############################
def execute_role(self):
"""
Perform the action on an Ansible Galaxy role. Must be combined with a further action like delete/install/init
as listed below.
"""
# To satisfy doc build
pass
def execute_collection(self):
"""
Perform the action on an Ansible Galaxy collection. Must be combined with a further action like init/install as
listed below.
"""
# To satisfy doc build
pass
def execute_build(self):
"""
Build an Ansible Galaxy collection artifact that can be stored in a central repository like Ansible Galaxy.
By default, this command builds from the current working directory. You can optionally pass in the
collection input path (where the ``galaxy.yml`` file is).
"""
force = context.CLIARGS['force']
output_path = GalaxyCLI._resolve_path(context.CLIARGS['output_path'])
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
elif os.path.isfile(b_output_path):
raise AnsibleError("- the output collection directory %s is a file - aborting" % to_native(output_path))
for collection_path in context.CLIARGS['args']:
collection_path = GalaxyCLI._resolve_path(collection_path)
build_collection(collection_path, output_path, force)
def execute_init(self):
"""
Creates the skeleton framework of a role or collection that complies with the Galaxy metadata format.
Requires a role or collection name. The collection name must be in the format ``<namespace>.<collection>``.
"""
galaxy_type = context.CLIARGS['type']
init_path = context.CLIARGS['init_path']
force = context.CLIARGS['force']
obj_skeleton = context.CLIARGS['{0}_skeleton'.format(galaxy_type)]
obj_name = context.CLIARGS['{0}_name'.format(galaxy_type)]
inject_data = dict(
description='your {0} description'.format(galaxy_type),
ansible_plugin_list_dir=get_versioned_doclink('plugins/plugins.html'),
)
if galaxy_type == 'role':
inject_data.update(dict(
author='your name',
company='your company (optional)',
license='license (GPL-2.0-or-later, MIT, etc)',
role_name=obj_name,
role_type=context.CLIARGS['role_type'],
issue_tracker_url='http://example.com/issue/tracker',
repository_url='http://example.com/repository',
documentation_url='http://docs.example.com',
homepage_url='http://example.com',
min_ansible_version=ansible_version[:3], # x.y
))
obj_path = os.path.join(init_path, obj_name)
elif galaxy_type == 'collection':
namespace, collection_name = obj_name.split('.', 1)
inject_data.update(dict(
namespace=namespace,
collection_name=collection_name,
version='1.0.0',
readme='README.md',
authors=['your name <example@domain.com>'],
license=['GPL-2.0-or-later'],
repository='http://example.com/repository',
documentation='http://docs.example.com',
homepage='http://example.com',
issues='http://example.com/issue/tracker',
))
obj_path = os.path.join(init_path, namespace, collection_name)
b_obj_path = to_bytes(obj_path, errors='surrogate_or_strict')
if os.path.exists(b_obj_path):
if os.path.isfile(obj_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % to_native(obj_path))
elif not force:
raise AnsibleError("- the directory %s already exists. "
"You can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % to_native(obj_path))
if obj_skeleton is not None:
own_skeleton = False
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
else:
own_skeleton = True
obj_skeleton = self.galaxy.default_role_skeleton_path
skeleton_ignore_expressions = ['^.*/.git_keep$']
obj_skeleton = os.path.expanduser(obj_skeleton)
skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
if not os.path.exists(obj_skeleton):
raise AnsibleError("- the skeleton path '{0}' does not exist, cannot init {1}".format(
to_native(obj_skeleton), galaxy_type)
)
template_env = Environment(loader=FileSystemLoader(obj_skeleton))
# create role directory
if not os.path.exists(b_obj_path):
os.makedirs(b_obj_path)
for root, dirs, files in os.walk(obj_skeleton, topdown=True):
rel_root = os.path.relpath(root, obj_skeleton)
rel_dirs = rel_root.split(os.sep)
rel_root_dir = rel_dirs[0]
if galaxy_type == 'collection':
# A collection can contain templates in playbooks/*/templates and roles/*/templates
in_templates_dir = rel_root_dir in ['playbooks', 'roles'] and 'templates' in rel_dirs
else:
in_templates_dir = rel_root_dir == 'templates'
dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
for f in files:
filename, ext = os.path.splitext(f)
if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
continue
elif galaxy_type == 'collection' and own_skeleton and rel_root == '.' and f == 'galaxy.yml.j2':
# Special use case for galaxy.yml.j2 in our own default collection skeleton. We build the options
# dynamically which requires special options to be set.
# The templated data's keys must match the key name but the inject data contains collection_name
# instead of name. We just make a copy and change the key back to name for this file.
template_data = inject_data.copy()
template_data['name'] = template_data.pop('collection_name')
meta_value = GalaxyCLI._get_skeleton_galaxy_yml(os.path.join(root, rel_root, f), template_data)
b_dest_file = to_bytes(os.path.join(obj_path, rel_root, filename), errors='surrogate_or_strict')
with open(b_dest_file, 'wb') as galaxy_obj:
galaxy_obj.write(to_bytes(meta_value, errors='surrogate_or_strict'))
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(rel_root, f)
dest_file = os.path.join(obj_path, rel_root, filename)
template_env.get_template(src_template).stream(inject_data).dump(dest_file, encoding='utf-8')
else:
f_rel_path = os.path.relpath(os.path.join(root, f), obj_skeleton)
shutil.copyfile(os.path.join(root, f), os.path.join(obj_path, f_rel_path))
for d in dirs:
b_dir_path = to_bytes(os.path.join(obj_path, rel_root, d), errors='surrogate_or_strict')
if not os.path.exists(b_dir_path):
os.makedirs(b_dir_path)
display.display("- %s %s was created successfully" % (galaxy_type.title(), obj_name))
def execute_info(self):
"""
prints out detailed information about an installed role as well as info available from the galaxy API.
"""
roles_path = context.CLIARGS['roles_path']
data = ''
for role in context.CLIARGS['args']:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['installed_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = False
if not context.CLIARGS['offline']:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
role_info.update(remote_data)
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec = req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data = self._display_role_info(role_info)
# FIXME: This is broken in both 1.9 and 2.0 as
# _display_role_info() always returns something
if not data:
data = u"\n- the role %s was not found" % role
self.pager(data)
def execute_install(self):
"""
Install one or more roles(``ansible-galaxy role install``), or one or more collections(``ansible-galaxy collection install``).
You can pass in a list (roles or collections) or use the file
option listed below (these are mutually exclusive). If you pass in a list, it
can be a name (which will be downloaded via the galaxy API and github), or it can be a local tar archive file.
"""
if context.CLIARGS['type'] == 'collection':
collections = context.CLIARGS['args']
force = context.CLIARGS['force']
output_path = context.CLIARGS['collections_path']
ignore_certs = context.CLIARGS['ignore_certs']
ignore_errors = context.CLIARGS['ignore_errors']
requirements_file = context.CLIARGS['requirements']
no_deps = context.CLIARGS['no_deps']
force_deps = context.CLIARGS['force_with_deps']
if collections and requirements_file:
raise AnsibleError("The positional collection_name arg and --requirements-file are mutually exclusive.")
elif not collections and not requirements_file:
raise AnsibleError("You must specify a collection name or a requirements file.")
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
requirements = self._parse_requirements_file(requirements_file, allow_old_format=False)['collections']
else:
requirements = []
for collection_input in collections:
name, dummy, requirement = collection_input.partition(':')
requirements.append((name, requirement or '*', None))
output_path = GalaxyCLI._resolve_path(output_path)
collections_path = C.COLLECTIONS_PATHS
if len([p for p in collections_path if p.startswith(output_path)]) == 0:
display.warning("The specified collections path '%s' is not part of the configured Ansible "
"collections paths '%s'. The installed collection won't be picked up in an Ansible "
"run." % (to_text(output_path), to_text(":".join(collections_path))))
if os.path.split(output_path)[1] != 'ansible_collections':
output_path = os.path.join(output_path, 'ansible_collections')
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
install_collections(requirements, output_path, self.api_servers, (not ignore_certs), ignore_errors,
no_deps, force, force_deps)
return 0
role_file = context.CLIARGS['role_file']
if not context.CLIARGS['args'] and role_file is None:
# the user needs to specify one of either --role-file or specify a single user/role name
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
no_deps = context.CLIARGS['no_deps']
force_deps = context.CLIARGS['force_with_deps']
force = context.CLIARGS['force'] or force_deps
roles_left = []
if role_file:
if not (role_file.endswith('.yaml') or role_file.endswith('.yml')):
raise AnsibleError("Invalid role requirements file, it must end with a .yml or .yaml extension")
roles_left = self._parse_requirements_file(role_file)['roles']
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in context.CLIARGS['args']:
role = RoleRequirement.role_yaml_parse(rname.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
for role in roles_left:
# only process roles in roles files when names matches if given
if role_file and context.CLIARGS['args'] and role.name not in context.CLIARGS['args']:
display.vvv('Skipping role %s' % role.name)
continue
display.vvv('Processing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None:
if role.install_info['version'] != role.version or force:
if force:
display.display('- changing role %s from %s to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
role.remove()
else:
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
continue
else:
if not force:
display.display('- %s is already installed, skipping.' % str(role))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning(u"- %s was NOT installed successfully: %s " % (role.name, to_text(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
if not role.metadata:
display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
else:
role_dependencies = role.metadata.get('dependencies') or []
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None:
if dep_role not in roles_left:
display.display('- adding dependency: %s' % to_text(dep_role))
roles_left.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
if dep_role.install_info['version'] != dep_role.version:
if force_deps:
display.display('- changing dependant role %s from %s to %s' %
(dep_role.name, dep_role.install_info['version'], dep_role.version or "unspecified"))
dep_role.remove()
roles_left.append(dep_role)
else:
display.warning('- dependency %s (%s) from role %s differs from already installed version (%s), skipping' %
(to_text(dep_role), dep_role.version, role.name, dep_role.install_info['version']))
else:
if force_deps:
roles_left.append(dep_role)
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
removes the list of roles passed as arguments from the local system.
"""
if not context.CLIARGS['args']:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in context.CLIARGS['args']:
role = GalaxyRole(self.galaxy, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, to_native(e)))
return 0
def execute_list(self):
"""
lists the roles installed on the local system or matches a single role passed as an argument.
"""
def _display_role(gr):
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (gr.name, version))
if context.CLIARGS['role']:
# show the requested role, if it exists
name = context.CLIARGS['role']
gr = GalaxyRole(self.galaxy, name)
if gr.metadata:
display.display('# %s' % os.path.dirname(gr.path))
_display_role(gr)
else:
display.display("- the role %s was not found" % name)
else:
# show all valid roles in the roles_path directory
roles_path = context.CLIARGS['roles_path']
path_found = False
warnings = []
for path in roles_path:
role_path = os.path.expanduser(path)
if not os.path.exists(role_path):
warnings.append("- the configured path %s does not exist." % role_path)
continue
elif not os.path.isdir(role_path):
warnings.append("- the configured path %s, exists, but it is not a directory." % role_path)
continue
display.display('# %s' % role_path)
path_files = os.listdir(role_path)
path_found = True
for path_file in path_files:
gr = GalaxyRole(self.galaxy, path_file, path=path)
if gr.metadata:
_display_role(gr)
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError("- None of the provided paths was usable. Please specify a valid path with --roles-path")
return 0
def execute_publish(self):
"""
Publish a collection into Ansible Galaxy. Requires the path to the collection tarball to publish.
"""
collection_path = GalaxyCLI._resolve_path(context.CLIARGS['args'])
wait = context.CLIARGS['wait']
timeout = context.CLIARGS['import_timeout']
publish_collection(collection_path, self.api, wait, timeout)
def execute_search(self):
''' searches for roles on the Ansible Galaxy server'''
page_size = 1000
search = None
if context.CLIARGS['args']:
search = '+'.join(context.CLIARGS['args'])
if not search and not context.CLIARGS['platforms'] and not context.CLIARGS['galaxy_tags'] and not context.CLIARGS['author']:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=context.CLIARGS['platforms'],
tags=context.CLIARGS['galaxy_tags'], author=context.CLIARGS['author'], page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_login(self):
"""
verify user's identify via Github and retrieve an auth token from Ansible Galaxy.
"""
# Authenticate with github and retrieve a token
if context.CLIARGS['token'] is None:
if C.GALAXY_TOKEN:
github_token = C.GALAXY_TOKEN
else:
login = GalaxyLogin(self.galaxy)
github_token = login.create_github_token()
else:
github_token = context.CLIARGS['token']
galaxy_response = self.api.authenticate(github_token)
if context.CLIARGS['token'] is None and C.GALAXY_TOKEN is None:
# Remove the token we created
login.remove_github_token()
# Store the Galaxy token
token = GalaxyToken()
token.set(galaxy_response['token'])
display.display("Successfully logged into Galaxy as %s" % galaxy_response['username'])
return 0
def execute_import(self):
""" used to import a role into Ansible Galaxy """
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
github_user = to_text(context.CLIARGS['github_user'], errors='surrogate_or_strict')
github_repo = to_text(context.CLIARGS['github_repo'], errors='surrogate_or_strict')
if context.CLIARGS['check_status']:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo,
reference=context.CLIARGS['reference'],
role_name=context.CLIARGS['role_name'])
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not context.CLIARGS['wait']:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
if context.CLIARGS['check_status'] or context.CLIARGS['wait']:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
""" Setup an integration from Github or Travis for Ansible Galaxy roles"""
if context.CLIARGS['setup_list']:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']), color=C.COLOR_OK)
return 0
if context.CLIARGS['remove_id']:
# Remove a secret
self.api.remove_secret(context.CLIARGS['remove_id'])
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
source = context.CLIARGS['source']
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
secret = context.CLIARGS['secret']
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
""" Delete a role from Ansible Galaxy. """
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
display.display(resp['status'])
return True
|
amenonsen/ansible
|
lib/ansible/cli/galaxy.py
|
Python
|
gpl-3.0
| 57,910
|
[
"Galaxy"
] |
056773abea2cbf4af1a3c21c11341729205ee1d35587cac1204b0c50eb09b70e
|
#
# QAPI command marshaller generator
#
# Copyright IBM, Corp. 2011
#
# Authors:
# Anthony Liguori <aliguori@us.ibm.com>
# Michael Roth <mdroth@linux.vnet.ibm.com>
#
# This work is licensed under the terms of the GNU GPLv2.
# See the COPYING.LIB file in the top-level directory.
from ordereddict import OrderedDict
from qapi import *
import sys
import os
import getopt
import errno
def generate_decl_enum(name, members, genlist=True):
return mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s * obj, const char *name, Error **errp);
''',
name=name)
def generate_command_decl(name, args, ret_type):
arglist=""
for argname, argtype, optional, structured in parse_args(args):
argtype = c_type(argtype)
if argtype == "char *":
argtype = "const char *"
if optional:
arglist += "bool has_%s, " % c_var(argname)
arglist += "%s %s, " % (argtype, c_var(argname))
return mcgen('''
%(ret_type)s qmp_%(name)s(%(args)sError **errp);
''',
ret_type=c_type(ret_type), name=c_var(name), args=arglist).strip()
def gen_sync_call(name, args, ret_type, indent=0):
ret = ""
arglist=""
retval=""
if ret_type:
retval = "retval = "
for argname, argtype, optional, structured in parse_args(args):
if optional:
arglist += "has_%s, " % c_var(argname)
arglist += "%s, " % (c_var(argname))
push_indent(indent)
ret = mcgen('''
%(retval)sqmp_%(name)s(%(args)serrp);
''',
name=c_var(name), args=arglist, retval=retval).rstrip()
if ret_type:
ret += "\n" + mcgen(''''
%(marshal_output_call)s
''',
marshal_output_call=gen_marshal_output_call(name, ret_type)).rstrip()
pop_indent(indent)
return ret.rstrip()
def gen_marshal_output_call(name, ret_type):
if not ret_type:
return ""
return "qmp_marshal_output_%s(retval, ret, errp);" % c_var(name)
def gen_visitor_output_containers_decl(ret_type):
ret = ""
push_indent()
if ret_type:
ret += mcgen('''
QmpOutputVisitor *mo;
QapiDeallocVisitor *md;
Visitor *v;
''')
pop_indent()
return ret
def gen_visitor_input_containers_decl(args):
ret = ""
push_indent()
if len(args) > 0:
ret += mcgen('''
QmpInputVisitor *mi;
QapiDeallocVisitor *md;
Visitor *v;
''')
pop_indent()
return ret.rstrip()
def gen_visitor_input_vars_decl(args):
ret = ""
push_indent()
for argname, argtype, optional, structured in parse_args(args):
if optional:
ret += mcgen('''
bool has_%(argname)s = false;
''',
argname=c_var(argname))
if c_type(argtype).endswith("*"):
ret += mcgen('''
%(argtype)s %(argname)s = NULL;
''',
argname=c_var(argname), argtype=c_type(argtype))
else:
ret += mcgen('''
%(argtype)s %(argname)s;
''',
argname=c_var(argname), argtype=c_type(argtype))
pop_indent()
return ret.rstrip()
def gen_visitor_input_block(args, obj, dealloc=False):
ret = ""
if len(args) == 0:
return ret
push_indent()
if dealloc:
ret += mcgen('''
md = qapi_dealloc_visitor_new();
v = qapi_dealloc_get_visitor(md);
''')
else:
ret += mcgen('''
mi = qmp_input_visitor_new(%(obj)s);
v = qmp_input_get_visitor(mi);
''',
obj=obj)
for argname, argtype, optional, structured in parse_args(args):
if optional:
ret += mcgen('''
visit_start_optional(v, &has_%(c_name)s, "%(name)s", errp);
if (has_%(c_name)s) {
''',
c_name=c_var(argname), name=argname)
push_indent()
ret += mcgen('''
visit_type_%(argtype)s(v, &%(c_name)s, "%(name)s", errp);
''',
c_name=c_var(argname), name=argname, argtype=argtype)
if optional:
pop_indent()
ret += mcgen('''
}
visit_end_optional(v, errp);
''')
if dealloc:
ret += mcgen('''
qapi_dealloc_visitor_cleanup(md);
''')
else:
ret += mcgen('''
qmp_input_visitor_cleanup(mi);
''')
pop_indent()
return ret.rstrip()
def gen_marshal_output(name, args, ret_type):
if not ret_type:
return ""
ret = mcgen('''
static void qmp_marshal_output_%(c_name)s(%(c_ret_type)s ret_in, QObject **ret_out, Error **errp)
{
QapiDeallocVisitor *md = qapi_dealloc_visitor_new();
QmpOutputVisitor *mo = qmp_output_visitor_new();
Visitor *v;
v = qmp_output_get_visitor(mo);
visit_type_%(ret_type)s(v, &ret_in, "unused", errp);
if (!error_is_set(errp)) {
*ret_out = qmp_output_get_qobject(mo);
}
qmp_output_visitor_cleanup(mo);
v = qapi_dealloc_get_visitor(md);
visit_type_%(ret_type)s(v, &ret_in, "unused", errp);
qapi_dealloc_visitor_cleanup(md);
}
''',
c_ret_type=c_type(ret_type), c_name=c_var(name), ret_type=ret_type)
return ret
def gen_marshal_input(name, args, ret_type):
ret = mcgen('''
static void qmp_marshal_input_%(c_name)s(QDict *args, QObject **ret, Error **errp)
{
''',
c_name=c_var(name))
if ret_type:
if c_type(ret_type).endswith("*"):
retval = " %s retval = NULL;" % c_type(ret_type)
else:
retval = " %s retval;" % c_type(ret_type)
ret += mcgen('''
%(retval)s
''',
retval=retval)
if len(args) > 0:
ret += mcgen('''
%(visitor_input_containers_decl)s
%(visitor_input_vars_decl)s
%(visitor_input_block)s
''',
visitor_input_containers_decl=gen_visitor_input_containers_decl(args),
visitor_input_vars_decl=gen_visitor_input_vars_decl(args),
visitor_input_block=gen_visitor_input_block(args, "QOBJECT(args)"))
ret += mcgen('''
if (error_is_set(errp)) {
goto out;
}
%(sync_call)s
''',
sync_call=gen_sync_call(name, args, ret_type, indent=4))
ret += mcgen('''
out:
''')
ret += mcgen('''
%(visitor_input_block_cleanup)s
return;
}
''',
visitor_input_block_cleanup=gen_visitor_input_block(args, None, dealloc=True))
return ret
def gen_registry(commands):
registry=""
push_indent()
for cmd in commands:
registry += mcgen('''
qmp_register_command("%(name)s", qmp_marshal_input_%(c_name)s);
''',
name=cmd['command'], c_name=c_var(cmd['command']))
pop_indent()
ret = mcgen('''
static void qmp_init_marshal(void)
{
%(registry)s
}
qapi_init(qmp_init_marshal);
''',
registry=registry.rstrip())
return ret
def gen_command_decl_prologue(header, guard, prefix=""):
ret = mcgen('''
/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QAPI function prototypes
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#ifndef %(guard)s
#define %(guard)s
#include "%(prefix)sqapi-types.h"
#include "error.h"
''',
header=basename(h_file), guard=guardname(h_file), prefix=prefix)
return ret
def gen_command_def_prologue(prefix="", proxy=False):
ret = mcgen('''
/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QMP->QAPI command dispatch
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#include "qemu-objects.h"
#include "qapi/qmp-core.h"
#include "qapi/qapi-visit-core.h"
#include "qapi/qmp-output-visitor.h"
#include "qapi/qmp-input-visitor.h"
#include "qapi/qapi-dealloc-visitor.h"
#include "%(prefix)sqapi-types.h"
#include "%(prefix)sqapi-visit.h"
''',
prefix=prefix)
if not proxy:
ret += '#include "%sqmp-commands.h"' % prefix
return ret + "\n"
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "p:o:", ["prefix=", "output-dir=", "type="])
except getopt.GetoptError, err:
print str(err)
sys.exit(1)
output_dir = ""
prefix = ""
dispatch_type = "sync"
c_file = 'qmp-marshal.c'
h_file = 'qmp-commands.h'
for o, a in opts:
if o in ("-p", "--prefix"):
prefix = a
elif o in ("-o", "--output-dir"):
output_dir = a + "/"
elif o in ("-t", "--type"):
dispatch_type = a
c_file = output_dir + prefix + c_file
h_file = output_dir + prefix + h_file
try:
os.makedirs(output_dir)
except os.error, e:
if e.errno != errno.EEXIST:
raise
exprs = parse_schema(sys.stdin)
commands = filter(lambda expr: expr.has_key('command'), exprs)
if dispatch_type == "sync":
fdecl = open(h_file, 'w')
fdef = open(c_file, 'w')
ret = gen_command_decl_prologue(header=basename(h_file), guard=guardname(h_file), prefix=prefix)
fdecl.write(ret)
ret = gen_command_def_prologue(prefix=prefix)
fdef.write(ret)
for cmd in commands:
arglist = []
ret_type = None
if cmd.has_key('data'):
arglist = cmd['data']
if cmd.has_key('returns'):
ret_type = cmd['returns']
ret = generate_command_decl(cmd['command'], arglist, ret_type) + "\n"
fdecl.write(ret)
if ret_type:
ret = gen_marshal_output(cmd['command'], arglist, ret_type) + "\n"
fdef.write(ret)
ret = gen_marshal_input(cmd['command'], arglist, ret_type) + "\n"
fdef.write(ret)
fdecl.write("\n#endif");
ret = gen_registry(commands)
fdef.write(ret)
fdef.flush()
fdef.close()
fdecl.flush()
fdecl.close()
|
Aaron0927/qemu
|
scripts/qapi-commands.py
|
Python
|
gpl-2.0
| 9,831
|
[
"VisIt"
] |
208835bcbb3967345e7e4fd49cd36d866cf9321bc6157310037851d65badc6ef
|
import jinja2
import json
import os
import subprocess
import cclib
# For atomic number -> atomic symbol, and num valence electrons
from mendeleev import element
import openchemistry as oc
def run_calculation(geometry_file, output_file, params, scratch_dir):
# Read in the geometry from the geometry file
# This container expects the geometry file to be in .xyz format
with open(geometry_file) as f:
xyz_data = f.read()
atomic_symbols, coords = extract_xyz(xyz_data)
# Get the min/max values of each dimension for the atomic positions
ranges = get_molecule_ranges(coords)
# Make a unit cell big enough to simulate a vacuum.
# Add a 10 angstrom vacuum on each side of the molecule.
old_center = [(x[0] + x[1]) / 2.0 for x in ranges]
lengths = [x[1] - x[0] for x in ranges]
# Add a 10 angstrom vacuum to each length, and center the molecule
lengths = [x + 10 for x in lengths]
new_center = [x / 2 for x in lengths]
translation = [new_center[i] - old_center[i] for i in range(len(lengths))]
# Translate the coords to the center
for i, coord in enumerate(coords):
coords[i] = [c + t for c, t in zip(coord, translation)]
# Set up input
project = os.path.splitext(os.path.basename(geometry_file))[0]
kinds = generate_kinds(atomic_symbols)
cell = generate_cell(lengths)
cp2k_coords = generate_coords(atomic_symbols, coords)
# Read the input parameters
task = params.get('task', 'energy')
if task == 'energy':
run_type = 'ENERGY'
elif task == 'optimize':
run_type = 'GEO_OPT'
else:
raise Exception('Invalid task: ' + str(task))
context = {
'project': project,
'run_type': run_type,
'kinds': kinds,
'cell': cell,
'coords': cp2k_coords
}
# Combine the input parameters and geometry into a concrete input file
# that can be executed by the simulation code
template_path = os.path.dirname(__file__)
jinja2_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_path),
trim_blocks=True)
os.makedirs(scratch_dir, exist_ok=True)
os.chdir(scratch_dir)
raw_input_file = os.path.join(scratch_dir, 'raw.in')
raw_output_file = os.path.join(scratch_dir, 'raw.out')
with open(raw_input_file, 'wb') as f:
jinja2_env.get_template('cp2k.in.j2').stream(**context).dump(f, encoding='utf8')
# Execute the code and write to output
subprocess.run(['/usr/bin/cp2k', '-i', raw_input_file,
'-o', raw_output_file])
# Convert the raw output file generated by the code execution, into the
# output format declared in the container description (cjson)
cjson = oc.Cp2kReader(raw_output_file).read()
# Save the calculation parameters in the cjson output for future reference
cjson['inputParameters'] = params
if task == 'energy' and 'atoms' not in cjson:
# Set the original xyz positions back on the cjson
data = cclib.io.ccread(geometry_file)
input_cjson = json.loads(cclib.ccwrite(data, outputtype='cjson',))
cjson['atoms'] = input_cjson['atoms']
with open(output_file, 'w') as f:
json.dump(cjson, f)
def extract_xyz(xyz_data):
# Extract atomic nums and coords from xyz data
# remove the first two lines in the xyz file
# (i.e. number of atom and optional comment)
xyz_data = xyz_data.split('\n')[2:]
# Extract the atomic nums and the coords
atomic_symbols = []
coords = []
for line in xyz_data:
line = line.strip()
if not line:
continue
vals = line.split()[:4]
if vals[0].isdigit():
num = int(vals[0])
atomic_symbols.append(element(num).symbol)
else:
atomic_symbols.append(vals[0])
coords.append([float(x) for x in vals[1:]])
return atomic_symbols, coords
def get_molecule_ranges(coords):
ranges = None
for coord in coords:
if ranges is None:
ranges = [[x, x] for x in coord]
continue
for i, val in enumerate(coord):
if val < ranges[i][0]:
ranges[i][0] = val
if val > ranges[i][1]:
ranges[i][1] = val
return ranges
def generate_kinds(atomic_symbols):
# Generates the &KIND block for a symbol for cp2k
template = '&KIND {{symbol}}\n'
template += ' BASIS_SET DZVP-GTH-PADE\n'
template += ' POTENTIAL GTH-PADE-q{{num_valence}}\n'
template += ' &END KIND\n'
template = jinja2.Template(template)
kinds = ''
for symbol in set(atomic_symbols):
context = {
'symbol': symbol.upper(),
'num_valence': str(element(symbol).nvalence())
}
kinds += template.render(**context) + '\n'
return kinds
def generate_cell(lengths):
# Generates the &CELL block for an orthorhombic cell for cp2k
template = '&CELL\n'
template += ' A {{a}} 0.000000000 0.000000000\n'
template += ' B 0.000000000 {{b}} 0.000000000\n'
template += ' C 0.000000000 0.000000000 {{c}}\n'
template += ' &END CELL\n'
context = {
'a': '%2.9f' % lengths[0],
'b': '%2.9f' % lengths[1],
'c': '%2.9f' % lengths[2]
}
template = jinja2.Template(template)
return template.render(**context)
def generate_coords(atomic_symbol, coords):
cp2k_coords = '&COORD\n'
# Generates the &COORD block for cp2k
for symbol, coord in zip(atomic_symbol, coords):
cp2k_coords += ' ' + symbol.upper() + ' '
cp2k_coords += '%2.9f %2.9f %2.9f' % (coord[0], coord[1],
coord[2])
cp2k_coords += '\n'
cp2k_coords += ' &END COORD\n'
return cp2k_coords
|
OpenChemistry/mongochemdeploy
|
docker/cp2k/src/run.py
|
Python
|
bsd-3-clause
| 5,911
|
[
"CP2K",
"cclib"
] |
48e6a6d1fa8c9daf43b55c3c3002f79e58de3a4591f451b9a5daf72b919fdb58
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""This module provides functions and classes related to Task objects."""
from __future__ import division, print_function, unicode_literals, absolute_import
import os
import time
import datetime
import shutil
import collections
import abc
import copy
import ruamel.yaml as yaml
import six
import numpy as np
from pprint import pprint
from itertools import product
from six.moves import map, zip, StringIO
from monty.dev import deprecated
from monty.string import is_string, list_strings
from monty.termcolor import colored, cprint
from monty.collections import AttrDict
from monty.functools import lazy_property, return_none_if_raise
from monty.json import MSONable
from monty.fnmatch import WildCard
from pymatgen.core.units import Memory
from pymatgen.util.serialization import json_pretty_dump, pmg_serialize
from .utils import File, Directory, irdvars_for_ext, abi_splitext, FilepathFixer, Condition, SparseHistogram
from .qadapters import make_qadapter, QueueAdapter, QueueAdapterError
from . import qutils as qu
from .db import DBConnector
from .nodes import Status, Node, NodeError, NodeResults, NodeCorrections, FileNode, check_spectator
from . import abiinspect
from . import events
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__all__ = [
"TaskManager",
"AbinitBuild",
"ParalHintsParser",
"ParalHints",
"AbinitTask",
"ScfTask",
"ScrTask",
"NscfTask",
"RelaxTask",
"DdkTask",
"PhononTask",
"SigmaTask",
"OpticTask",
"AnaddbTask",
]
import logging
logger = logging.getLogger(__name__)
# Tools and helper functions.
def straceback():
"""Returns a string with the traceback."""
import traceback
return traceback.format_exc()
def lennone(PropperOrNone):
if PropperOrNone is None:
return 0
else:
return len(PropperOrNone)
def nmltostring(nml):
"""Convert a dictionary representing a Fortran namelist into a string."""
if not isinstance(nml,dict):
raise ValueError("nml should be a dict !")
curstr = ""
for key,group in nml.items():
namelist = ["&" + key]
for k, v in group.items():
if isinstance(v, list) or isinstance(v, tuple):
namelist.append(k + " = " + ",".join(map(str, v)) + ",")
elif is_string(v):
namelist.append(k + " = '" + str(v) + "',")
else:
namelist.append(k + " = " + str(v) + ",")
namelist.append("/")
curstr = curstr + "\n".join(namelist) + "\n"
return curstr
class TaskResults(NodeResults):
JSON_SCHEMA = NodeResults.JSON_SCHEMA.copy()
JSON_SCHEMA["properties"] = {
"executable": {"type": "string", "required": True},
}
@classmethod
def from_node(cls, task):
"""Initialize an instance from an :class:`AbinitTask` instance."""
new = super(TaskResults, cls).from_node(task)
new.update(
executable=task.executable,
#executable_version:
#task_events=
pseudos=[p.as_dict() for p in task.input.pseudos],
#input=task.input
)
new.register_gridfs_files(
run_abi=(task.input_file.path, "t"),
run_abo=(task.output_file.path, "t"),
)
return new
class ParalConf(AttrDict):
"""
This object store the parameters associated to one
of the possible parallel configurations reported by ABINIT.
Essentially it is a dictionary whose values can also be accessed
as attributes. It also provides default values for selected keys
that might not be present in the ABINIT dictionary.
Example:
--- !Autoparal
info:
version: 1
autoparal: 1
max_ncpus: 108
configurations:
- tot_ncpus: 2 # Total number of CPUs
mpi_ncpus: 2 # Number of MPI processes.
omp_ncpus: 1 # Number of OMP threads (1 if not present)
mem_per_cpu: 10 # Estimated memory requirement per MPI processor in Megabytes.
efficiency: 0.4 # 1.0 corresponds to an "expected" optimal efficiency (strong scaling).
vars: { # Dictionary with the variables that should be added to the input.
varname1: varvalue1
varname2: varvalue2
}
-
...
For paral_kgb we have:
nproc npkpt npspinor npband npfft bandpp weight
108 1 1 12 9 2 0.25
108 1 1 108 1 2 27.00
96 1 1 24 4 1 1.50
84 1 1 12 7 2 0.25
"""
_DEFAULTS = {
"omp_ncpus": 1,
"mem_per_cpu": 0.0,
"vars": {}
}
def __init__(self, *args, **kwargs):
super(ParalConf, self).__init__(*args, **kwargs)
# Add default values if not already in self.
for k, v in self._DEFAULTS.items():
if k not in self:
self[k] = v
def __str__(self):
stream = StringIO()
pprint(self, stream=stream)
return stream.getvalue()
# TODO: Change name in abinit
# Remove tot_ncpus from Abinit
@property
def num_cores(self):
return self.mpi_procs * self.omp_threads
@property
def mem_per_proc(self):
return self.mem_per_cpu
@property
def mpi_procs(self):
return self.mpi_ncpus
@property
def omp_threads(self):
return self.omp_ncpus
@property
def speedup(self):
"""Estimated speedup reported by ABINIT."""
return self.efficiency * self.num_cores
@property
def tot_mem(self):
"""Estimated total memory in Mbs (computed from mem_per_proc)"""
return self.mem_per_proc * self.mpi_procs
class ParalHintsError(Exception):
"""Base error class for `ParalHints`."""
class ParalHintsParser(object):
Error = ParalHintsError
def __init__(self):
# Used to push error strings.
self._errors = collections.deque(maxlen=100)
def add_error(self, errmsg):
self._errors.append(errmsg)
def parse(self, filename):
"""
Read the `AutoParal` section (YAML format) from filename.
Assumes the file contains only one section.
"""
with abiinspect.YamlTokenizer(filename) as r:
doc = r.next_doc_with_tag("!Autoparal")
try:
d = yaml.safe_load(doc.text_notag)
return ParalHints(info=d["info"], confs=d["configurations"])
except:
import traceback
sexc = traceback.format_exc()
err_msg = "Wrong YAML doc:\n%s\n\nException:\n%s" % (doc.text, sexc)
self.add_error(err_msg)
logger.critical(err_msg)
raise self.Error(err_msg)
class ParalHints(collections.Iterable):
"""
Iterable with the hints for the parallel execution reported by ABINIT.
"""
Error = ParalHintsError
def __init__(self, info, confs):
self.info = info
self._confs = [ParalConf(**d) for d in confs]
@classmethod
def from_mpi_omp_lists(cls, mpi_procs, omp_threads):
"""
Build a list of Parallel configurations from two lists
containing the number of MPI processes and the number of OpenMP threads
i.e. product(mpi_procs, omp_threads).
The configuration have parallel efficiency set to 1.0 and no input variables.
Mainly used for preparing benchmarks.
"""
info = {}
confs = [ParalConf(mpi_ncpus=p, omp_ncpus=p, efficiency=1.0)
for p, t in product(mpi_procs, omp_threads)]
return cls(info, confs)
def __getitem__(self, key):
return self._confs[key]
def __iter__(self):
return self._confs.__iter__()
def __len__(self):
return self._confs.__len__()
def __repr__(self):
return "\n".join(str(conf) for conf in self)
def __str__(self):
return repr(self)
@lazy_property
def max_cores(self):
"""Maximum number of cores."""
return max(c.mpi_procs * c.omp_threads for c in self)
@lazy_property
def max_mem_per_proc(self):
"""Maximum memory per MPI process."""
return max(c.mem_per_proc for c in self)
@lazy_property
def max_speedup(self):
"""Maximum speedup."""
return max(c.speedup for c in self)
@lazy_property
def max_efficiency(self):
"""Maximum parallel efficiency."""
return max(c.efficiency for c in self)
@pmg_serialize
def as_dict(self, **kwargs):
return {"info": self.info, "confs": self._confs}
@classmethod
def from_dict(cls, d):
return cls(info=d["info"], confs=d["confs"])
def copy(self):
"""Shallow copy of self."""
return copy.copy(self)
def select_with_condition(self, condition, key=None):
"""
Remove all the configurations that do not satisfy the given condition.
Args:
condition: dict or :class:`Condition` object with operators expressed with a Mongodb-like syntax
key: Selects the sub-dictionary on which condition is applied, e.g. key="vars"
if we have to filter the configurations depending on the values in vars
"""
condition = Condition.as_condition(condition)
new_confs = []
for conf in self:
# Select the object on which condition is applied
obj = conf if key is None else AttrDict(conf[key])
add_it = condition(obj=obj)
#if key is "vars": print("conf", conf, "added:", add_it)
if add_it: new_confs.append(conf)
self._confs = new_confs
def sort_by_efficiency(self, reverse=True):
"""Sort the configurations in place. items with highest efficiency come first"""
self._confs.sort(key=lambda c: c.efficiency, reverse=reverse)
return self
def sort_by_speedup(self, reverse=True):
"""Sort the configurations in place. items with highest speedup come first"""
self._confs.sort(key=lambda c: c.speedup, reverse=reverse)
return self
def sort_by_mem_per_proc(self, reverse=False):
"""Sort the configurations in place. items with lowest memory per proc come first."""
# Avoid sorting if mem_per_cpu is not available.
if any(c.mem_per_proc > 0.0 for c in self):
self._confs.sort(key=lambda c: c.mem_per_proc, reverse=reverse)
return self
def multidimensional_optimization(self, priorities=("speedup", "efficiency")):
# Mapping property --> options passed to sparse_histogram
opts = dict(speedup=dict(step=1.0), efficiency=dict(step=0.1), mem_per_proc=dict(memory=1024))
#opts = dict(zip(priorities, bin_widths))
opt_confs = self._confs
for priority in priorities:
histogram = SparseHistogram(opt_confs, key=lambda c: getattr(c, priority), **opts[priority])
pos = 0 if priority == "mem_per_proc" else -1
opt_confs = histogram.values[pos]
#histogram.plot(show=True, savefig="hello.pdf")
return self.__class__(info=self.info, confs=opt_confs)
#def histogram_efficiency(self, step=0.1):
# """Returns a :class:`SparseHistogram` with configuration grouped by parallel efficiency."""
# return SparseHistogram(self._confs, key=lambda c: c.efficiency, step=step)
#def histogram_speedup(self, step=1.0):
# """Returns a :class:`SparseHistogram` with configuration grouped by parallel speedup."""
# return SparseHistogram(self._confs, key=lambda c: c.speedup, step=step)
#def histogram_memory(self, step=1024):
# """Returns a :class:`SparseHistogram` with configuration grouped by memory."""
# return SparseHistogram(self._confs, key=lambda c: c.speedup, step=step)
#def filter(self, qadapter):
# """Return a new list of configurations that can be executed on the `QueueAdapter` qadapter."""
# new_confs = [pconf for pconf in self if qadapter.can_run_pconf(pconf)]
# return self.__class__(info=self.info, confs=new_confs)
def get_ordered_with_policy(self, policy, max_ncpus):
"""
Sort and return a new list of configurations ordered according to the :class:`TaskPolicy` policy.
"""
# Build new list since we are gonna change the object in place.
hints = self.__class__(self.info, confs=[c for c in self if c.num_cores <= max_ncpus])
# First select the configurations satisfying the condition specified by the user (if any)
bkp_hints = hints.copy()
if policy.condition:
logger.info("Applying condition %s" % str(policy.condition))
hints.select_with_condition(policy.condition)
# Undo change if no configuration fullfills the requirements.
if not hints:
hints = bkp_hints
logger.warning("Empty list of configurations after policy.condition")
# Now filter the configurations depending on the values in vars
bkp_hints = hints.copy()
if policy.vars_condition:
logger.info("Applying vars_condition %s" % str(policy.vars_condition))
hints.select_with_condition(policy.vars_condition, key="vars")
# Undo change if no configuration fullfills the requirements.
if not hints:
hints = bkp_hints
logger.warning("Empty list of configurations after policy.vars_condition")
if len(policy.autoparal_priorities) == 1:
# Example: hints.sort_by_speedup()
if policy.autoparal_priorities[0] in ['efficiency', 'speedup', 'mem_per_proc']:
getattr(hints, "sort_by_" + policy.autoparal_priorities[0])()
elif isinstance(policy.autoparal_priorities[0], collections.Mapping):
if policy.autoparal_priorities[0]['meta_priority'] == 'highest_speedup_minimum_efficiency_cutoff':
min_efficiency = policy.autoparal_priorities[0].get('minimum_efficiency', 1.0)
hints.select_with_condition({'efficiency': {'$gte': min_efficiency}})
hints.sort_by_speedup()
else:
hints = hints.multidimensional_optimization(priorities=policy.autoparal_priorities)
if len(hints) == 0: raise ValueError("len(hints) == 0")
#TODO: make sure that num_cores == 1 is never selected when we have more than one configuration
#if len(hints) > 1:
# hints.select_with_condition(dict(num_cores={"$eq": 1)))
# Return final (orderded ) list of configurations (best first).
return hints
class TaskPolicy(object):
"""
This object stores the parameters used by the :class:`TaskManager` to
create the submission script and/or to modify the ABINIT variables
governing the parallel execution. A `TaskPolicy` object contains
a set of variables that specify the launcher, as well as the options
and the conditions used to select the optimal configuration for the parallel run
"""
@classmethod
def as_policy(cls, obj):
"""
Converts an object obj into a `:class:`TaskPolicy. Accepts:
* None
* TaskPolicy
* dict-like object
"""
if obj is None:
# Use default policy.
return TaskPolicy()
else:
if isinstance(obj, cls):
return obj
elif isinstance(obj, collections.Mapping):
return cls(**obj)
else:
raise TypeError("Don't know how to convert type %s to %s" % (type(obj), cls))
@classmethod
def autodoc(cls):
return """
autoparal: # (integer). 0 to disable the autoparal feature (DEFAULT: 1 i.e. autoparal is on)
condition: # condition used to filter the autoparal configurations (Mongodb-like syntax).
# DEFAULT: empty i.e. ignored.
vars_condition: # Condition used to filter the list of ABINIT variables reported by autoparal
# (Mongodb-like syntax). DEFAULT: empty i.e. ignored.
frozen_timeout: # A job is considered frozen and its status is set to ERROR if no change to
# the output file has been done for `frozen_timeout` seconds. Accepts int with seconds or
# string in slurm form i.e. days-hours:minutes:seconds. DEFAULT: 1 hour.
precedence: # Under development.
autoparal_priorities: # Under development.
"""
def __init__(self, **kwargs):
"""
See autodoc
"""
self.autoparal = kwargs.pop("autoparal", 1)
self.condition = Condition(kwargs.pop("condition", {}))
self.vars_condition = Condition(kwargs.pop("vars_condition", {}))
self.precedence = kwargs.pop("precedence", "autoparal_conf")
self.autoparal_priorities = kwargs.pop("autoparal_priorities", ["speedup"])
#self.autoparal_priorities = kwargs.pop("autoparal_priorities", ["speedup", "efficiecy", "memory"]
# TODO frozen_timeout could be computed as a fraction of the timelimit of the qadapter!
self.frozen_timeout = qu.slurm_parse_timestr(kwargs.pop("frozen_timeout", "0-1:00:00"))
if kwargs:
raise ValueError("Found invalid keywords in policy section:\n %s" % str(kwargs.keys()))
# Consistency check.
if self.precedence not in ("qadapter", "autoparal_conf"):
raise ValueError("Wrong value for policy.precedence, should be qadapter or autoparal_conf")
def __str__(self):
lines = []
app = lines.append
for k, v in self.__dict__.items():
if k.startswith("_"): continue
app("%s: %s" % (k, v))
return "\n".join(lines)
class ManagerIncreaseError(Exception):
"""
Exception raised by the manager if the increase request failed
"""
class FixQueueCriticalError(Exception):
"""
error raised when an error could not be fixed at the task level
"""
# Global variable used to store the task manager returned by `from_user_config`.
_USER_CONFIG_TASKMANAGER = None
class TaskManager(MSONable):
"""
A `TaskManager` is responsible for the generation of the job script and the submission
of the task, as well as for the specification of the parameters passed to the resource manager
(e.g. Slurm, PBS ...) and/or the run-time specification of the ABINIT variables governing the parallel execution.
A `TaskManager` delegates the generation of the submission script and the submission of the task to the :class:`QueueAdapter`.
A `TaskManager` has a :class:`TaskPolicy` that governs the specification of the parameters for the parallel executions.
Ideally, the TaskManager should be the **main entry point** used by the task to deal with job submission/optimization
"""
YAML_FILE = "manager.yml"
USER_CONFIG_DIR = os.path.join(os.path.expanduser("~"), ".abinit", "abipy")
ENTRIES = {"policy", "qadapters", "db_connector", "batch_adapter"}
@classmethod
def autodoc(cls):
from .db import DBConnector
s = """
# TaskManager configuration file (YAML Format)
policy:
# Dictionary with options used to control the execution of the tasks.
qadapters:
# List of qadapters objects (mandatory)
- # qadapter_1
- # qadapter_2
db_connector:
# Connection to MongoDB database (optional)
batch_adapter:
# Adapter used to submit flows with batch script. (optional)
##########################################
# Individual entries are documented below:
##########################################
"""
s += "policy: " + TaskPolicy.autodoc() + "\n"
s += "qadapter: " + QueueAdapter.autodoc() + "\n"
#s += "db_connector: " + DBConnector.autodoc()
return s
@classmethod
def from_user_config(cls):
"""
Initialize the :class:`TaskManager` from the YAML file 'manager.yaml'.
Search first in the working directory and then in the abipy configuration directory.
Raises:
RuntimeError if file is not found.
"""
global _USER_CONFIG_TASKMANAGER
if _USER_CONFIG_TASKMANAGER is not None:
return _USER_CONFIG_TASKMANAGER
# Try in the current directory then in user configuration directory.
path = os.path.join(os.getcwd(), cls.YAML_FILE)
if not os.path.exists(path):
path = os.path.join(cls.USER_CONFIG_DIR, cls.YAML_FILE)
if not os.path.exists(path):
raise RuntimeError(colored(
"\nCannot locate %s neither in current directory nor in %s\n"
"!!! PLEASE READ THIS: !!!\n"
"To use abipy to run jobs this file must be present\n"
"It provides a description of the cluster/computer you are running on\n"
"Examples are provided in abipy/data/managers." % (cls.YAML_FILE, path), color="red"))
_USER_CONFIG_TASKMANAGER = cls.from_file(path)
return _USER_CONFIG_TASKMANAGER
@classmethod
def from_file(cls, filename):
"""Read the configuration parameters from the Yaml file filename."""
try:
with open(filename, "r") as fh:
return cls.from_dict(yaml.safe_load(fh))
except Exception as exc:
print("Error while reading TaskManager parameters from %s\n" % filename)
raise
@classmethod
def from_string(cls, s):
"""Create an instance from string s containing a YAML dictionary."""
return cls.from_dict(yaml.safe_load(s))
@classmethod
def as_manager(cls, obj):
"""
Convert obj into TaskManager instance. Accepts string, filepath, dictionary, `TaskManager` object.
If obj is None, the manager is initialized from the user config file.
"""
if isinstance(obj, cls): return obj
if obj is None: return cls.from_user_config()
if is_string(obj):
if os.path.exists(obj):
return cls.from_file(obj)
else:
return cls.from_string(obj)
elif isinstance(obj, collections.Mapping):
return cls.from_dict(obj)
else:
raise TypeError("Don't know how to convert type %s to TaskManager" % type(obj))
@classmethod
def from_dict(cls, d):
"""Create an instance from a dictionary."""
return cls(**{k: v for k, v in d.items() if k in cls.ENTRIES})
@pmg_serialize
def as_dict(self):
return self._kwargs
def __init__(self, **kwargs):
"""
Args:
policy:None
qadapters:List of qadapters in YAML format
db_connector:Dictionary with data used to connect to the database (optional)
"""
# Keep a copy of kwargs
self._kwargs = copy.deepcopy(kwargs)
self.policy = TaskPolicy.as_policy(kwargs.pop("policy", None))
# Initialize database connector (if specified)
self.db_connector = DBConnector(**kwargs.pop("db_connector", {}))
# Build list of QAdapters. Neglect entry if priority == 0 or `enabled: no"
qads = []
for d in kwargs.pop("qadapters"):
if d.get("enabled", False): continue
qad = make_qadapter(**d)
if qad.priority > 0:
qads.append(qad)
elif qad.priority < 0:
raise ValueError("qadapter cannot have negative priority:\n %s" % qad)
if not qads:
raise ValueError("Received emtpy list of qadapters")
#if len(qads) != 1:
# raise NotImplementedError("For the time being multiple qadapters are not supported! Please use one adapter")
# Order qdapters according to priority.
qads = sorted(qads, key=lambda q: q.priority)
priorities = [q.priority for q in qads]
if len(priorities) != len(set(priorities)):
raise ValueError("Two or more qadapters have same priority. This is not allowed. Check taskmanager.yml")
self._qads, self._qadpos = tuple(qads), 0
# Initialize the qadapter for batch script submission.
d = kwargs.pop("batch_adapter", None)
self.batch_adapter = None
if d: self.batch_adapter = make_qadapter(**d)
#print("batch_adapter", self.batch_adapter)
if kwargs:
raise ValueError("Found invalid keywords in the taskmanager file:\n %s" % str(list(kwargs.keys())))
@lazy_property
def abinit_build(self):
""":class:`AbinitBuild` object with Abinit version and options used to build the code"""
return AbinitBuild(manager=self)
def to_shell_manager(self, mpi_procs=1):
"""
Returns a new `TaskManager` with the same parameters as self but replace the :class:`QueueAdapter`
with a :class:`ShellAdapter` with mpi_procs so that we can submit the job without passing through the queue.
"""
my_kwargs = copy.deepcopy(self._kwargs)
my_kwargs["policy"] = TaskPolicy(autoparal=0)
# On BlueGene we need at least two qadapters.
# One for running jobs on the computing nodes and another one
# for running small jobs on the fronted. These two qadapters
# will have different enviroments and different executables.
# If None of the q-adapters has qtype==shell, we change qtype to shell
# and we return a new Manager for sequential jobs with the same parameters as self.
# If the list contains a qadapter with qtype == shell, we ignore the remaining qadapters
# when we build the new Manager.
has_shell_qad = False
for d in my_kwargs["qadapters"]:
if d["queue"]["qtype"] == "shell": has_shell_qad = True
if has_shell_qad:
my_kwargs["qadapters"] = [d for d in my_kwargs["qadapters"] if d["queue"]["qtype"] == "shell"]
for d in my_kwargs["qadapters"]:
d["queue"]["qtype"] = "shell"
d["limits"]["min_cores"] = mpi_procs
d["limits"]["max_cores"] = mpi_procs
# If shell_runner is specified, replace mpi_runner with shell_runner
# in the script used to run jobs on the frontend.
# On same machines based on Slurm, indeed, mpirun/mpiexec is not available
# and jobs should be executed with `srun -n4 exec` when running on the computing nodes
# or with `exec` when running in sequential on the frontend.
if "job" in d and "shell_runner" in d["job"]:
shell_runner = d["job"]["shell_runner"]
#print("shell_runner:", shell_runner, type(shell_runner))
if not shell_runner or shell_runner == "None": shell_runner = ""
d["job"]["mpi_runner"] = shell_runner
#print("shell_runner:", shell_runner)
#print(my_kwargs)
new = self.__class__(**my_kwargs)
new.set_mpi_procs(mpi_procs)
return new
def new_with_fixed_mpi_omp(self, mpi_procs, omp_threads):
"""
Return a new `TaskManager` in which autoparal has been disabled.
The jobs will be executed with `mpi_procs` MPI processes and `omp_threads` OpenMP threads.
Useful for generating input files for benchmarks.
"""
new = self.deepcopy()
new.policy.autoparal = 0
new.set_mpi_procs(mpi_procs)
new.set_omp_threads(omp_threads)
return new
@property
def has_queue(self):
"""True if we are submitting jobs via a queue manager."""
return self.qadapter.QTYPE.lower() != "shell"
@property
def qads(self):
"""List of :class:`QueueAdapter` objects sorted according to priorities (highest comes first)"""
return self._qads
@property
def qadapter(self):
"""The qadapter used to submit jobs."""
return self._qads[self._qadpos]
def select_qadapter(self, pconfs):
"""
Given a list of parallel configurations, pconfs, this method select an `optimal` configuration
according to some criterion as well as the :class:`QueueAdapter` to use.
Args:
pconfs: :class:`ParalHints` object with the list of parallel configurations
Returns:
:class:`ParallelConf` object with the `optimal` configuration.
"""
# Order the list of configurations according to policy.
policy, max_ncpus = self.policy, self.max_cores
pconfs = pconfs.get_ordered_with_policy(policy, max_ncpus)
if policy.precedence == "qadapter":
# Try to run on the qadapter with the highest priority.
for qadpos, qad in enumerate(self.qads):
possible_pconfs = [pc for pc in pconfs if qad.can_run_pconf(pc)]
if qad.allocation == "nodes":
#if qad.allocation in ["nodes", "force_nodes"]:
# Select the configuration divisible by nodes if possible.
for pconf in possible_pconfs:
if pconf.num_cores % qad.hw.cores_per_node == 0:
return self._use_qadpos_pconf(qadpos, pconf)
# Here we select the first one.
if possible_pconfs:
return self._use_qadpos_pconf(qadpos, possible_pconfs[0])
elif policy.precedence == "autoparal_conf":
# Try to run on the first pconf irrespectively of the priority of the qadapter.
for pconf in pconfs:
for qadpos, qad in enumerate(self.qads):
if qad.allocation == "nodes" and not pconf.num_cores % qad.hw.cores_per_node == 0:
continue # Ignore it. not very clean
if qad.can_run_pconf(pconf):
return self._use_qadpos_pconf(qadpos, pconf)
else:
raise ValueError("Wrong value of policy.precedence = %s" % policy.precedence)
# No qadapter could be found
raise RuntimeError("Cannot find qadapter for this run!")
def _use_qadpos_pconf(self, qadpos, pconf):
"""
This function is called when we have accepted the :class:`ParalConf` pconf.
Returns pconf
"""
self._qadpos = qadpos
# Change the number of MPI/OMP cores.
self.set_mpi_procs(pconf.mpi_procs)
if self.has_omp: self.set_omp_threads(pconf.omp_threads)
# Set memory per proc.
#FIXME: Fixer may have changed the memory per proc and should not be resetted by ParalConf
#self.set_mem_per_proc(pconf.mem_per_proc)
return pconf
def __str__(self):
"""String representation."""
lines = []
app = lines.append
#app("[Task policy]\n%s" % str(self.policy))
for i, qad in enumerate(self.qads):
app("[Qadapter %d]\n%s" % (i, str(qad)))
app("Qadapter selected: %d" % self._qadpos)
if self.has_db:
app("[MongoDB database]:")
app(str(self.db_connector))
return "\n".join(lines)
@property
def has_db(self):
"""True if we are using MongoDB database"""
return bool(self.db_connector)
@property
def has_omp(self):
"""True if we are using OpenMP parallelization."""
return self.qadapter.has_omp
@property
def num_cores(self):
"""Total number of CPUs used to run the task."""
return self.qadapter.num_cores
@property
def mpi_procs(self):
"""Number of MPI processes."""
return self.qadapter.mpi_procs
@property
def mem_per_proc(self):
"""Memory per MPI process."""
return self.qadapter.mem_per_proc
@property
def omp_threads(self):
"""Number of OpenMP threads"""
return self.qadapter.omp_threads
def deepcopy(self):
"""Deep copy of self."""
return copy.deepcopy(self)
def set_mpi_procs(self, mpi_procs):
"""Set the number of MPI processes to use."""
self.qadapter.set_mpi_procs(mpi_procs)
def set_omp_threads(self, omp_threads):
"""Set the number of OpenMp threads to use."""
self.qadapter.set_omp_threads(omp_threads)
def set_mem_per_proc(self, mem_mb):
"""Set the memory (in Megabytes) per CPU."""
self.qadapter.set_mem_per_proc(mem_mb)
@property
def max_cores(self):
"""
Maximum number of cores that can be used.
This value is mainly used in the autoparal part to get the list of possible configurations.
"""
return max(q.hint_cores for q in self.qads)
def get_njobs_in_queue(self, username=None):
"""
returns the number of jobs in the queue,
returns None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect)
"""
return self.qadapter.get_njobs_in_queue(username=username)
def cancel(self, job_id):
"""Cancel the job. Returns exit status."""
return self.qadapter.cancel(job_id)
def write_jobfile(self, task, **kwargs):
"""
Write the submission script. Return the path of the script
================ ============================================
kwargs Meaning
================ ============================================
exec_args List of arguments passed to task.executable.
Default: no arguments.
================ ============================================
"""
script = self.qadapter.get_script_str(
job_name=task.name,
launch_dir=task.workdir,
executable=task.executable,
qout_path=task.qout_file.path,
qerr_path=task.qerr_file.path,
stdin=task.files_file.path,
stdout=task.log_file.path,
stderr=task.stderr_file.path,
exec_args=kwargs.pop("exec_args", []),
)
# Write the script.
with open(task.job_file.path, "w") as fh:
fh.write(script)
task.job_file.chmod(0o740)
return task.job_file.path
def launch(self, task, **kwargs):
"""
Build the input files and submit the task via the :class:`Qadapter`
Args:
task: :class:`TaskObject`
Returns:
Process object.
"""
if task.status == task.S_LOCKED:
raise ValueError("You shall not submit a locked task!")
# Build the task
task.build()
# Pass information on the time limit to Abinit (we always assume ndtset == 1)
#if False and isinstance(task, AbinitTask):
if isinstance(task, AbinitTask):
args = kwargs.get("exec_args", [])
if args is None: args = []
args = args[:]
args.append("--timelimit %s" % qu.time2slurm(self.qadapter.timelimit))
kwargs["exec_args"] = args
logger.info("Will pass timelimit option to abinit %s:" % args)
# Write the submission script
script_file = self.write_jobfile(task, **kwargs)
# Submit the task and save the queue id.
try:
qjob, process = self.qadapter.submit_to_queue(script_file)
task.set_status(task.S_SUB, msg='Submitted to queue')
task.set_qjob(qjob)
return process
except self.qadapter.MaxNumLaunchesError as exc:
# TODO: Here we should try to switch to another qadapter
# 1) Find a new parallel configuration in those stored in task.pconfs
# 2) Change the input file.
# 3) Regenerate the submission script
# 4) Relaunch
task.set_status(task.S_ERROR, msg="max_num_launches reached: %s" % str(exc))
raise
def get_collection(self, **kwargs):
"""Return the MongoDB collection used to store the results."""
return self.db_connector.get_collection(**kwargs)
def increase_mem(self):
# OLD
# with GW calculations in mind with GW mem = 10,
# the response fuction is in memory and not distributed
# we need to increase memory if jobs fail ...
# return self.qadapter.more_mem_per_proc()
try:
self.qadapter.more_mem_per_proc()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase mem')
def increase_ncpus(self):
"""
increase the number of cpus, first ask the current quadapter, if that one raises a QadapterIncreaseError
switch to the next qadapter. If all fail raise an ManagerIncreaseError
"""
try:
self.qadapter.more_cores()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase ncpu')
def increase_resources(self):
try:
self.qadapter.more_cores()
return
except QueueAdapterError:
pass
try:
self.qadapter.more_mem_per_proc()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase resources')
def exclude_nodes(self, nodes):
try:
self.qadapter.exclude_nodes(nodes=nodes)
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to exclude nodes')
def increase_time(self):
try:
self.qadapter.more_time()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase time')
class AbinitBuild(object):
"""
This object stores information on the options used to build Abinit
.. attribute:: info
String with build information as produced by `abinit -b`
.. attribute:: version
Abinit version number e.g 8.0.1 (string)
.. attribute:: has_netcdf
True if netcdf is enabled.
.. attribute:: has_omp
True if OpenMP is enabled.
.. attribute:: has_mpi
True if MPI is enabled.
.. attribute:: has_mpiio
True if MPI-IO is supported.
"""
def __init__(self, workdir=None, manager=None):
manager = TaskManager.as_manager(manager).to_shell_manager(mpi_procs=1)
# Build a simple manager to run the job in a shell subprocess
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
# Generate a shell script to execute `abinit -b`
stdout = os.path.join(workdir, "run.abo")
script = manager.qadapter.get_script_str(
job_name="abinit_b",
launch_dir=workdir,
executable="abinit",
qout_path=os.path.join(workdir, "queue.qout"),
qerr_path=os.path.join(workdir, "queue.qerr"),
#stdin=os.path.join(workdir, "run.files"),
stdout=stdout,
stderr=os.path.join(workdir, "run.err"),
exec_args=["-b"],
)
# Execute the script.
script_file = os.path.join(workdir, "job.sh")
with open(script_file, "wt") as fh:
fh.write(script)
qjob, process = manager.qadapter.submit_to_queue(script_file)
process.wait()
if process.returncode != 0:
logger.critical("Error while executing %s" % script_file)
print("stderr:", process.stderr.read())
print("stdout:", process.stdout.read())
# To avoid: ResourceWarning: unclosed file <_io.BufferedReader name=87> in py3k
process.stderr.close()
with open(stdout, "rt") as fh:
self.info = fh.read()
# info string has the following format.
"""
=== Build Information ===
Version : 8.0.1
Build target : x86_64_darwin15.0.0_gnu5.3
Build date : 20160122
=== Compiler Suite ===
C compiler : gnu
C++ compiler : gnuApple
Fortran compiler : gnu5.3
CFLAGS : -g -O2 -mtune=native -march=native
CXXFLAGS : -g -O2 -mtune=native -march=native
FCFLAGS : -g -ffree-line-length-none
FC_LDFLAGS :
=== Optimizations ===
Debug level : basic
Optimization level : standard
Architecture : unknown_unknown
=== Multicore ===
Parallel build : yes
Parallel I/O : yes
openMP support : no
GPU support : no
=== Connectors / Fallbacks ===
Connectors on : yes
Fallbacks on : yes
DFT flavor : libxc-fallback+atompaw-fallback+wannier90-fallback
FFT flavor : none
LINALG flavor : netlib
MATH flavor : none
TIMER flavor : abinit
TRIO flavor : netcdf+etsf_io-fallback
=== Experimental features ===
Bindings : @enable_bindings@
Exports : no
GW double-precision : yes
=== Bazaar branch information ===
Branch ID : gmatteo@gmac-20160112110440-lf6exhneqim9082h
Revision : 1226
Committed : 0
"""
self.version = "0.0.0"
self.has_netcdf = False
self.has_omp = False
self.has_mpi, self.has_mpiio = False, False
def yesno2bool(line):
ans = line.split()[-1]
return dict(yes=True, no=False)[ans]
# Parse info.
for line in self.info.splitlines():
if "Version" in line: self.version = line.split()[-1]
if "TRIO flavor" in line:
self.has_netcdf = "netcdf" in line
if "openMP support" in line: self.has_omp = yesno2bool(line)
if "Parallel build" in line: self.has_mpi = yesno2bool(line)
if "Parallel I/O" in line: self.has_mpiio = yesno2bool(line)
def __str__(self):
lines = []
app = lines.append
app("Abinit Build Information:")
app(" Abinit version: %s" % self.version)
app(" MPI: %s, MPI-IO: %s, OpenMP: %s" % (self.has_mpi, self.has_mpiio, self.has_omp))
app(" Netcdf: %s" % self.has_netcdf)
return "\n".join(lines)
def version_ge(self, version_string):
"""True is Abinit version is >= version_string"""
return self.compare_version(version_string, ">=")
def compare_version(self, version_string, op):
"""Compare Abinit version to `version_string` with operator `op`"""
from pkg_resources import parse_version
from monty.operator import operator_from_str
op = operator_from_str(op)
return op(parse_version(self.version), parse_version(version_string))
class FakeProcess(object):
"""
This object is attached to a :class:`Task` instance if the task has not been submitted
This trick allows us to simulate a process that is still running so that
we can safely poll task.process.
"""
def poll(self):
return None
def wait(self):
raise RuntimeError("Cannot wait a FakeProcess")
def communicate(self, input=None):
raise RuntimeError("Cannot communicate with a FakeProcess")
def kill(self):
raise RuntimeError("Cannot kill a FakeProcess")
@property
def returncode(self):
return None
class MyTimedelta(datetime.timedelta):
"""A customized version of timedelta whose __str__ method doesn't print microseconds."""
def __new__(cls, days, seconds, microseconds):
return datetime.timedelta.__new__(cls, days, seconds, microseconds)
def __str__(self):
"""Remove microseconds from timedelta default __str__"""
s = super(MyTimedelta, self).__str__()
microsec = s.find(".")
if microsec != -1: s = s[:microsec]
return s
@classmethod
def as_timedelta(cls, delta):
"""Convert delta into a MyTimedelta object."""
# Cannot monkey patch the __class__ and must pass through __new__ as the object is immutable.
if isinstance(delta, cls): return delta
return cls(delta.days, delta.seconds, delta.microseconds)
class TaskDateTimes(object):
"""
Small object containing useful :class:`datetime.datatime` objects associated to important events.
.. attributes:
init: initialization datetime
submission: submission datetime
start: Begin of execution.
end: End of execution.
"""
def __init__(self):
self.init = datetime.datetime.now()
self.submission, self.start, self.end = None, None, None
def __str__(self):
lines = []
app = lines.append
app("Initialization done on: %s" % self.init)
if self.submission is not None: app("Submitted on: %s" % self.submission)
if self.start is not None: app("Started on: %s" % self.start)
if self.end is not None: app("Completed on: %s" % self.end)
return "\n".join(lines)
def reset(self):
"""Reinitialize the counters."""
self = self.__class__()
def get_runtime(self):
""":class:`timedelta` with the run-time, None if the Task is not running"""
if self.start is None: return None
if self.end is None:
delta = datetime.datetime.now() - self.start
else:
delta = self.end - self.start
return MyTimedelta.as_timedelta(delta)
def get_time_inqueue(self):
"""
:class:`timedelta` with the time spent in the Queue, None if the Task is not running
.. note:
This value is always greater than the real value computed by the resource manager
as we start to count only when check_status sets the `Task` status to S_RUN.
"""
if self.submission is None: return None
if self.start is None:
delta = datetime.datetime.now() - self.submission
else:
delta = self.start - self.submission
# This happens when we read the exact start datetime from the ABINIT log file.
if delta.total_seconds() < 0: delta = datetime.timedelta(seconds=0)
return MyTimedelta.as_timedelta(delta)
class TaskError(NodeError):
"""Base Exception for :class:`Task` methods"""
class TaskRestartError(TaskError):
"""Exception raised while trying to restart the :class:`Task`."""
class Task(six.with_metaclass(abc.ABCMeta, Node)):
"""A Task is a node that performs some kind of calculation."""
# Use class attributes for TaskErrors so that we don't have to import them.
Error = TaskError
RestartError = TaskRestartError
# List of `AbinitEvent` subclasses that are tested in the check_status method.
# Subclasses should provide their own list if they need to check the converge status.
CRITICAL_EVENTS = []
# Prefixes for Abinit (input, output, temporary) files.
Prefix = collections.namedtuple("Prefix", "idata odata tdata")
pj = os.path.join
prefix = Prefix(pj("indata", "in"), pj("outdata", "out"), pj("tmpdata", "tmp"))
del Prefix, pj
def __init__(self, input, workdir=None, manager=None, deps=None):
"""
Args:
input: :class:`AbinitInput` object.
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
deps: Dictionary specifying the dependency of this node.
None means that this Task has no dependency.
"""
# Init the node
super(Task, self).__init__()
self._input = input
if workdir is not None:
self.set_workdir(workdir)
if manager is not None:
self.set_manager(manager)
# Handle possible dependencies.
if deps:
self.add_deps(deps)
# Date-time associated to submission, start and end.
self.datetimes = TaskDateTimes()
# Count the number of restarts.
self.num_restarts = 0
self._qjob = None
self.queue_errors = []
self.abi_errors = []
# two flags that provide, dynamically, information on the scaling behavious of a task. If any process of fixing
# finds none scaling behaviour, they should be switched. If a task type is clearly not scaling they should be
# swiched.
self.mem_scales = True
self.load_scales = True
def __getstate__(self):
"""
Return state is pickled as the contents for the instance.
In this case we just remove the process since Subprocess objects cannot be pickled.
This is the reason why we have to store the returncode in self._returncode instead
of using self.process.returncode.
"""
return {k: v for k, v in self.__dict__.items() if k not in ["_process"]}
#@check_spectator
def set_workdir(self, workdir, chroot=False):
"""Set the working directory. Cannot be set more than once unless chroot is True"""
if not chroot and hasattr(self, "workdir") and self.workdir != workdir:
raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir))
self.workdir = os.path.abspath(workdir)
# Files required for the execution.
self.input_file = File(os.path.join(self.workdir, "run.abi"))
self.output_file = File(os.path.join(self.workdir, "run.abo"))
self.files_file = File(os.path.join(self.workdir, "run.files"))
self.job_file = File(os.path.join(self.workdir, "job.sh"))
self.log_file = File(os.path.join(self.workdir, "run.log"))
self.stderr_file = File(os.path.join(self.workdir, "run.err"))
self.start_lockfile = File(os.path.join(self.workdir, "__startlock__"))
# This file is produced by Abinit if nprocs > 1 and MPI_ABORT.
self.mpiabort_file = File(os.path.join(self.workdir, "__ABI_MPIABORTFILE__"))
# Directories with input|output|temporary data.
self.wdir = Directory(self.workdir)
self.indir = Directory(os.path.join(self.workdir, "indata"))
self.outdir = Directory(os.path.join(self.workdir, "outdata"))
self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata"))
# stderr and output file of the queue manager. Note extensions.
self.qerr_file = File(os.path.join(self.workdir, "queue.qerr"))
self.qout_file = File(os.path.join(self.workdir, "queue.qout"))
def set_manager(self, manager):
"""Set the :class:`TaskManager` used to launch the Task."""
self.manager = manager.deepcopy()
@property
def work(self):
"""The :class:`Work` containing this `Task`."""
return self._work
def set_work(self, work):
"""Set the :class:`Work` associated to this `Task`."""
if not hasattr(self, "_work"):
self._work = work
else:
if self._work != work:
raise ValueError("self._work != work")
@property
def flow(self):
"""The :class:`Flow` containing this `Task`."""
return self.work.flow
@lazy_property
def pos(self):
"""The position of the task in the :class:`Flow`"""
for i, task in enumerate(self.work):
if self == task:
return self.work.pos, i
raise ValueError("Cannot find the position of %s in flow %s" % (self, self.flow))
@property
def pos_str(self):
"""String representation of self.pos"""
return "w" + str(self.pos[0]) + "_t" + str(self.pos[1])
@property
def num_launches(self):
"""
Number of launches performed. This number includes both possible ABINIT restarts
as well as possible launches done due to errors encountered with the resource manager
or the hardware/software."""
return sum(q.num_launches for q in self.manager.qads)
@property
def input(self):
"""AbinitInput object."""
return self._input
def get_inpvar(self, varname, default=None):
"""Return the value of the ABINIT variable varname, None if not present."""
return self.input.get(varname, default)
@deprecated(message="_set_inpvars is deprecated. Use set_vars")
def _set_inpvars(self, *args, **kwargs):
return self.set_vars(*args, **kwargs)
def set_vars(self, *args, **kwargs):
"""
Set the values of the ABINIT variables in the input file. Return dict with old values.
"""
kwargs.update(dict(*args))
old_values = {vname: self.input.get(vname) for vname in kwargs}
self.input.set_vars(**kwargs)
if kwargs or old_values:
self.history.info("Setting input variables: %s" % str(kwargs))
self.history.info("Old values: %s" % str(old_values))
return old_values
@property
def initial_structure(self):
"""Initial structure of the task."""
return self.input.structure
def make_input(self, with_header=False):
"""Construct the input file of the calculation."""
s = str(self.input)
if with_header: s = str(self) + "\n" + s
return s
def ipath_from_ext(self, ext):
"""
Returns the path of the input file with extension ext.
Use it when the file does not exist yet.
"""
return os.path.join(self.workdir, self.prefix.idata + "_" + ext)
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return os.path.join(self.workdir, self.prefix.odata + "_" + ext)
@property
@abc.abstractmethod
def executable(self):
"""
Path to the executable associated to the task (internally stored in self._executable).
"""
def set_executable(self, executable):
"""Set the executable associate to this task."""
self._executable = executable
@property
def process(self):
try:
return self._process
except AttributeError:
# Attach a fake process so that we can poll it.
return FakeProcess()
@property
def is_completed(self):
"""True if the task has been executed."""
return self.status >= self.S_DONE
@property
def can_run(self):
"""The task can run if its status is < S_SUB and all the other dependencies (if any) are done!"""
all_ok = all(stat == self.S_OK for stat in self.deps_status)
return self.status < self.S_SUB and self.status != self.S_LOCKED and all_ok
#@check_spectator
def cancel(self):
"""Cancel the job. Returns 1 if job was cancelled."""
if self.queue_id is None: return 0
if self.status >= self.S_DONE: return 0
exit_status = self.manager.cancel(self.queue_id)
if exit_status != 0:
logger.warning("manager.cancel returned exit_status: %s" % exit_status)
return 0
# Remove output files and reset the status.
self.history.info("Job %s cancelled by user" % self.queue_id)
self.reset()
return 1
def with_fixed_mpi_omp(self, mpi_procs, omp_threads):
"""
Disable autoparal and force execution with `mpi_procs` MPI processes
and `omp_threads` OpenMP threads. Useful for generating benchmarks.
"""
manager = self.manager if hasattr(self, "manager") else self.flow.manager
self.manager = manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads)
#def set_max_ncores(self, max_ncores):
# """
# """
# manager = self.manager if hasattr(self, "manager") else self.flow.manager
# self.manager = manager.new_with_max_ncores(mpi_procs, omp_threads)
#@check_spectator
def _on_done(self):
self.fix_ofiles()
#@check_spectator
def _on_ok(self):
# Fix output file names.
self.fix_ofiles()
# Get results
results = self.on_ok()
self.finalized = True
return results
#@check_spectator
def on_ok(self):
"""
This method is called once the `Task` has reached status S_OK.
Subclasses should provide their own implementation
Returns:
Dictionary that must contain at least the following entries:
returncode:
0 on success.
message:
a string that should provide a human-readable description of what has been performed.
"""
return dict(returncode=0, message="Calling on_all_ok of the base class!")
#@check_spectator
def fix_ofiles(self):
"""
This method is called when the task reaches S_OK.
It changes the extension of particular output files
produced by Abinit so that the 'official' extension
is preserved e.g. out_1WF14 --> out_1WF
"""
filepaths = self.outdir.list_filepaths()
logger.info("in fix_ofiles with filepaths %s" % list(filepaths))
old2new = FilepathFixer().fix_paths(filepaths)
for old, new in old2new.items():
self.history.info("will rename old %s to new %s" % (old, new))
os.rename(old, new)
#@check_spectator
def _restart(self, submit=True):
"""
Called by restart once we have finished preparing the task for restarting.
Return:
True if task has been restarted
"""
self.set_status(self.S_READY, msg="Restarted on %s" % time.asctime())
# Increase the counter.
self.num_restarts += 1
self.history.info("Restarted, num_restarts %d" % self.num_restarts)
# Reset datetimes
self.datetimes.reset()
if submit:
# Remove the lock file
self.start_lockfile.remove()
# Relaunch the task.
fired = self.start()
if not fired: self.history.warning("Restart failed")
else:
fired = False
return fired
#@check_spectator
def restart(self):
"""
Restart the calculation. Subclasses should provide a concrete version that
performs all the actions needed for preparing the restart and then calls self._restart
to restart the task. The default implementation is empty.
Returns:
1 if job was restarted, 0 otherwise.
"""
logger.debug("Calling the **empty** restart method of the base class")
return 0
def poll(self):
"""Check if child process has terminated. Set and return returncode attribute."""
self._returncode = self.process.poll()
if self._returncode is not None:
self.set_status(self.S_DONE, "status set to Done")
return self._returncode
def wait(self):
"""Wait for child process to terminate. Set and return returncode attribute."""
self._returncode = self.process.wait()
try:
self.process.stderr.close()
except:
pass
self.set_status(self.S_DONE, "status set to Done")
return self._returncode
def communicate(self, input=None):
"""
Interact with process: Send data to stdin. Read data from stdout and stderr, until end-of-file is reached.
Wait for process to terminate. The optional input argument should be a string to be sent to the
child process, or None, if no data should be sent to the child.
communicate() returns a tuple (stdoutdata, stderrdata).
"""
stdoutdata, stderrdata = self.process.communicate(input=input)
self._returncode = self.process.returncode
self.set_status(self.S_DONE, "status set to Done")
return stdoutdata, stderrdata
def kill(self):
"""Kill the child."""
self.process.kill()
self.set_status(self.S_ERROR, "status set to Error by task.kill")
self._returncode = self.process.returncode
@property
def returncode(self):
"""
The child return code, set by poll() and wait() (and indirectly by communicate()).
A None value indicates that the process hasn't terminated yet.
A negative value -N indicates that the child was terminated by signal N (Unix only).
"""
try:
return self._returncode
except AttributeError:
return 0
def reset(self):
"""
Reset the task status. Mainly used if we made a silly mistake in the initial
setup of the queue manager and we want to fix it and rerun the task.
Returns:
0 on success, 1 if reset failed.
"""
# Can only reset tasks that are done.
# One should be able to reset 'Submitted' tasks (sometimes, they are not in the queue
# and we want to restart them)
#if self.status != self.S_SUB and self.status < self.S_DONE: return 1
# Remove output files otherwise the EventParser will think the job is still running
self.output_file.remove()
self.log_file.remove()
self.stderr_file.remove()
self.start_lockfile.remove()
self.qerr_file.remove()
self.qout_file.remove()
self.set_status(self.S_INIT, msg="Reset on %s" % time.asctime())
self.set_qjob(None)
return 0
@property
@return_none_if_raise(AttributeError)
def queue_id(self):
"""Queue identifier returned by the Queue manager. None if not set"""
return self.qjob.qid
@property
@return_none_if_raise(AttributeError)
def qname(self):
"""Queue name identifier returned by the Queue manager. None if not set"""
return self.qjob.qname
@property
def qjob(self):
return self._qjob
def set_qjob(self, qjob):
"""Set info on queue after submission."""
self._qjob = qjob
@property
def has_queue(self):
"""True if we are submitting jobs via a queue manager."""
return self.manager.qadapter.QTYPE.lower() != "shell"
@property
def num_cores(self):
"""Total number of CPUs used to run the task."""
return self.manager.num_cores
@property
def mpi_procs(self):
"""Number of CPUs used for MPI."""
return self.manager.mpi_procs
@property
def omp_threads(self):
"""Number of CPUs used for OpenMP."""
return self.manager.omp_threads
@property
def mem_per_proc(self):
"""Memory per MPI process."""
return Memory(self.manager.mem_per_proc, "Mb")
@property
def status(self):
"""Gives the status of the task."""
return self._status
def lock(self, source_node):
"""Lock the task, source is the :class:`Node` that applies the lock."""
if self.status != self.S_INIT:
raise ValueError("Trying to lock a task with status %s" % self.status)
self._status = self.S_LOCKED
self.history.info("Locked by node %s", source_node)
def unlock(self, source_node, check_status=True):
"""
Unlock the task, set its status to `S_READY` so that the scheduler can submit it.
source_node is the :class:`Node` that removed the lock
Call task.check_status if check_status is True.
"""
if self.status != self.S_LOCKED:
raise RuntimeError("Trying to unlock a task with status %s" % self.status)
self._status = self.S_READY
if check_status: self.check_status()
self.history.info("Unlocked by %s", source_node)
#@check_spectator
def set_status(self, status, msg):
"""
Set and return the status of the task.
Args:
status: Status object or string representation of the status
msg: string with human-readable message used in the case of errors.
"""
# truncate string if it's long. msg will be logged in the object and we don't want to waste memory.
if len(msg) > 2000:
msg = msg[:2000]
msg += "\n... snip ...\n"
# Locked files must be explicitly unlocked
if self.status == self.S_LOCKED or status == self.S_LOCKED:
err_msg = (
"Locked files must be explicitly unlocked before calling set_status but\n"
"task.status = %s, input status = %s" % (self.status, status))
raise RuntimeError(err_msg)
status = Status.as_status(status)
changed = True
if hasattr(self, "_status"):
changed = (status != self._status)
self._status = status
if status == self.S_RUN:
# Set datetimes.start when the task enters S_RUN
if self.datetimes.start is None:
self.datetimes.start = datetime.datetime.now()
# Add new entry to history only if the status has changed.
if changed:
if status == self.S_SUB:
self.datetimes.submission = datetime.datetime.now()
self.history.info("Submitted with MPI=%s, Omp=%s, Memproc=%.1f [Gb] %s " % (
self.mpi_procs, self.omp_threads, self.mem_per_proc.to("Gb"), msg))
elif status == self.S_OK:
self.history.info("Task completed %s", msg)
elif status == self.S_ABICRITICAL:
self.history.info("Status set to S_ABI_CRITICAL due to: %s", msg)
else:
self.history.info("Status changed to %s. msg: %s", status, msg)
#######################################################
# The section belows contains callbacks that should not
# be executed if we are in spectator_mode
#######################################################
if status == self.S_DONE:
# Execute the callback
self._on_done()
if status == self.S_OK:
# Finalize the task.
if not self.finalized:
self._on_ok()
# here we remove the output files of the task and of its parents.
if self.gc is not None and self.gc.policy == "task":
self.clean_output_files()
self.send_signal(self.S_OK)
return status
def check_status(self):
"""
This function checks the status of the task by inspecting the output and the
error files produced by the application and by the queue manager.
"""
# 1) see it the job is blocked
# 2) see if an error occured at submitting the job the job was submitted, TODO these problems can be solved
# 3) see if there is output
# 4) see if abinit reports problems
# 5) see if both err files exist and are empty
# 6) no output and no err files, the job must still be running
# 7) try to find out what caused the problems
# 8) there is a problem but we did not figure out what ...
# 9) the only way of landing here is if there is a output file but no err files...
# 1) A locked task can only be unlocked by calling set_status explicitly.
# an errored task, should not end up here but just to be sure
black_list = (self.S_LOCKED, self.S_ERROR)
#if self.status in black_list: return self.status
# 2) Check the returncode of the process (the process of submitting the job) first.
# this point type of problem should also be handled by the scheduler error parser
if self.returncode != 0:
# The job was not submitted properly
return self.set_status(self.S_QCRITICAL, msg="return code %s" % self.returncode)
# If we have an abort file produced by Abinit
if self.mpiabort_file.exists:
return self.set_status(self.S_ABICRITICAL, msg="Found ABINIT abort file")
# Analyze the stderr file for Fortran runtime errors.
# getsize is 0 if the file is empty or it does not exist.
err_msg = None
if self.stderr_file.getsize() != 0:
#if self.stderr_file.exists:
err_msg = self.stderr_file.read()
# Analyze the stderr file of the resource manager runtime errors.
# TODO: Why are we looking for errors in queue.qerr?
qerr_info = None
if self.qerr_file.getsize() != 0:
#if self.qerr_file.exists:
qerr_info = self.qerr_file.read()
# Analyze the stdout file of the resource manager (needed for PBS !)
qout_info = None
if self.qout_file.getsize():
#if self.qout_file.exists:
qout_info = self.qout_file.read()
# Start to check ABINIT status if the output file has been created.
#if self.output_file.getsize() != 0:
if self.output_file.exists:
try:
report = self.get_event_report()
except Exception as exc:
msg = "%s exception while parsing event_report:\n%s" % (self, exc)
return self.set_status(self.S_ABICRITICAL, msg=msg)
if report is None:
return self.set_status(self.S_ERROR, msg="got None report!")
if report.run_completed:
# Here we set the correct timing data reported by Abinit
self.datetimes.start = report.start_datetime
self.datetimes.end = report.end_datetime
# Check if the calculation converged.
not_ok = report.filter_types(self.CRITICAL_EVENTS)
if not_ok:
return self.set_status(self.S_UNCONVERGED, msg='status set to unconverged based on abiout')
else:
return self.set_status(self.S_OK, msg="status set to ok based on abiout")
# Calculation still running or errors?
if report.errors:
# Abinit reported problems
logger.debug('Found errors in report')
for error in report.errors:
logger.debug(str(error))
try:
self.abi_errors.append(error)
except AttributeError:
self.abi_errors = [error]
# The job is unfixable due to ABINIT errors
logger.debug("%s: Found Errors or Bugs in ABINIT main output!" % self)
msg = "\n".join(map(repr, report.errors))
return self.set_status(self.S_ABICRITICAL, msg=msg)
# 5)
if self.stderr_file.exists and not err_msg:
if self.qerr_file.exists and not qerr_info:
# there is output and no errors
# The job still seems to be running
return self.set_status(self.S_RUN, msg='there is output and no errors: job still seems to be running')
# 6)
if not self.output_file.exists:
logger.debug("output_file does not exists")
if not self.stderr_file.exists and not self.qerr_file.exists:
# No output at allThe job is still in the queue.
return self.status
# 7) Analyze the files of the resource manager and abinit and execution err (mvs)
if qerr_info or qout_info:
from pymatgen.io.abinit.scheduler_error_parsers import get_parser
scheduler_parser = get_parser(self.manager.qadapter.QTYPE, err_file=self.qerr_file.path,
out_file=self.qout_file.path, run_err_file=self.stderr_file.path)
if scheduler_parser is None:
return self.set_status(self.S_QCRITICAL,
msg="Cannot find scheduler_parser for qtype %s" % self.manager.qadapter.QTYPE)
scheduler_parser.parse()
if scheduler_parser.errors:
# Store the queue errors in the task
self.queue_errors = scheduler_parser.errors
# The job is killed or crashed and we know what happened
msg = "scheduler errors found:\n%s" % str(scheduler_parser.errors)
return self.set_status(self.S_QCRITICAL, msg=msg)
elif lennone(qerr_info) > 0:
# if only qout_info, we are not necessarily in QCRITICAL state,
# since there will always be info in the qout file
self.history.info('found unknown messages in the queue error: %s' % str(qerr_info))
#try:
# rt = self.datetimes.get_runtime().seconds
#except:
# rt = -1.0
#tl = self.manager.qadapter.timelimit
#if rt > tl:
# msg += 'set to error : runtime (%s) exceded walltime (%s)' % (rt, tl)
# print(msg)
# return self.set_status(self.S_ERROR, msg=msg)
# The job may be killed or crashed but we don't know what happened
# It may also be that an innocent message was written to qerr, so we wait for a while
# it is set to QCritical, we will attempt to fix it by running on more resources
# 8) analizing the err files and abinit output did not identify a problem
# but if the files are not empty we do have a problem but no way of solving it:
if lennone(err_msg) > 0:
msg = 'found error message:\n %s' % str(err_msg)
return self.set_status(self.S_QCRITICAL, msg=msg)
# The job is killed or crashed but we don't know what happend
# it is set to QCritical, we will attempt to fix it by running on more resources
# 9) if we still haven't returned there is no indication of any error and the job can only still be running
# but we should actually never land here, or we have delays in the file system ....
# print('the job still seems to be running maybe it is hanging without producing output... ')
# Check time of last modification.
if self.output_file.exists and \
(time.time() - self.output_file.get_stat().st_mtime > self.manager.policy.frozen_timeout):
msg = "Task seems to be frozen, last change more than %s [s] ago" % self.manager.policy.frozen_timeout
return self.set_status(self.S_ERROR, msg=msg)
# Handle weird case in which either run.abo, or run.log have not been produced
#if self.status not in (self.S_INIT, self.S_READY) and (not self.output.file.exists or not self.log_file.exits):
# msg = "Task have been submitted but cannot find the log file or the output file"
# return self.set_status(self.S_ERROR, msg)
return self.set_status(self.S_RUN, msg='final option: nothing seems to be wrong, the job must still be running')
def reduce_memory_demand(self):
"""
Method that can be called by the scheduler to decrease the memory demand of a specific task.
Returns True in case of success, False in case of Failure.
Should be overwritten by specific tasks.
"""
return False
def speed_up(self):
"""
Method that can be called by the flow to decrease the time needed for a specific task.
Returns True in case of success, False in case of Failure
Should be overwritten by specific tasks.
"""
return False
def out_to_in(self, out_file):
"""
Move an output file to the output data directory of the `Task`
and rename the file so that ABINIT will read it as an input data file.
Returns:
The absolute path of the new file in the indata directory.
"""
in_file = os.path.basename(out_file).replace("out", "in", 1)
dest = os.path.join(self.indir.path, in_file)
if os.path.exists(dest) and not os.path.islink(dest):
logger.warning("Will overwrite %s with %s" % (dest, out_file))
os.rename(out_file, dest)
return dest
def inlink_file(self, filepath):
"""
Create a symbolic link to the specified file in the
directory containing the input files of the task.
"""
if not os.path.exists(filepath):
logger.debug("Creating symbolic link to not existent file %s" % filepath)
# Extract the Abinit extension and add the prefix for input files.
root, abiext = abi_splitext(filepath)
infile = "in_" + abiext
infile = self.indir.path_in(infile)
# Link path to dest if dest link does not exist.
# else check that it points to the expected file.
self.history.info("Linking path %s --> %s" % (filepath, infile))
if not os.path.exists(infile):
os.symlink(filepath, infile)
else:
if os.path.realpath(infile) != filepath:
raise self.Error("infile %s does not point to filepath %s" % (infile, filepath))
def make_links(self):
"""
Create symbolic links to the output files produced by the other tasks.
.. warning::
This method should be called only when the calculation is READY because
it uses a heuristic approach to find the file to link.
"""
for dep in self.deps:
filepaths, exts = dep.get_filepaths_and_exts()
for path, ext in zip(filepaths, exts):
logger.info("Need path %s with ext %s" % (path, ext))
dest = self.ipath_from_ext(ext)
if not os.path.exists(path):
# Try netcdf file.
# TODO: this case should be treated in a cleaner way.
path += ".nc"
if os.path.exists(path): dest += ".nc"
if not os.path.exists(path):
raise self.Error("%s: %s is needed by this task but it does not exist" % (self, path))
if path.endswith(".nc") and not dest.endswith(".nc"): # NC --> NC file
dest += ".nc"
# Link path to dest if dest link does not exist.
# else check that it points to the expected file.
logger.debug("Linking path %s --> %s" % (path, dest))
if not os.path.exists(dest):
os.symlink(path, dest)
else:
# check links but only if we haven't performed the restart.
# in this case, indeed we may have replaced the file pointer with the
# previous output file of the present task.
if os.path.realpath(dest) != path and self.num_restarts == 0:
raise self.Error("dest %s does not point to path %s" % (dest, path))
@abc.abstractmethod
def setup(self):
"""Public method called before submitting the task."""
def _setup(self):
"""
This method calls self.setup after having performed additional operations
such as the creation of the symbolic links needed to connect different tasks.
"""
self.make_links()
self.setup()
def get_event_report(self, source="log"):
"""
Analyzes the main logfile of the calculation for possible Errors or Warnings.
If the ABINIT abort file is found, the error found in this file are added to
the output report.
Args:
source: "output" for the main output file,"log" for the log file.
Returns:
:class:`EventReport` instance or None if the source file file does not exist.
"""
# By default, we inspect the main log file.
ofile = {
"output": self.output_file,
"log": self.log_file}[source]
parser = events.EventsParser()
if not ofile.exists:
if not self.mpiabort_file.exists:
return None
else:
# ABINIT abort file without log!
abort_report = parser.parse(self.mpiabort_file.path)
return abort_report
try:
report = parser.parse(ofile.path)
#self._prev_reports[source] = report
# Add events found in the ABI_MPIABORTFILE.
if self.mpiabort_file.exists:
logger.critical("Found ABI_MPIABORTFILE!!!!!")
abort_report = parser.parse(self.mpiabort_file.path)
if len(abort_report) != 1:
logger.critical("Found more than one event in ABI_MPIABORTFILE")
# Weird case: empty abort file, let's skip the part
# below and hope that the log file contains the error message.
#if not len(abort_report): return report
# Add it to the initial report only if it differs
# from the last one found in the main log file.
last_abort_event = abort_report[-1]
if report and last_abort_event != report[-1]:
report.append(last_abort_event)
else:
report.append(last_abort_event)
return report
#except parser.Error as exc:
except Exception as exc:
# Return a report with an error entry with info on the exception.
msg = "%s: Exception while parsing ABINIT events:\n %s" % (ofile, str(exc))
self.set_status(self.S_ABICRITICAL, msg=msg)
return parser.report_exception(ofile.path, exc)
def get_results(self, **kwargs):
"""
Returns :class:`NodeResults` instance.
Subclasses should extend this method (if needed) by adding
specialized code that performs some kind of post-processing.
"""
# Check whether the process completed.
if self.returncode is None:
raise self.Error("return code is None, you should call wait, communitate or poll")
if self.status is None or self.status < self.S_DONE:
raise self.Error("Task is not completed")
return self.Results.from_node(self)
def move(self, dest, is_abspath=False):
"""
Recursively move self.workdir to another location. This is similar to the Unix "mv" command.
The destination path must not already exist. If the destination already exists
but is not a directory, it may be overwritten depending on os.rename() semantics.
Be default, dest is located in the parent directory of self.workdir.
Use is_abspath=True to specify an absolute path.
"""
if not is_abspath:
dest = os.path.join(os.path.dirname(self.workdir), dest)
shutil.move(self.workdir, dest)
def in_files(self):
"""Return all the input data files used."""
return self.indir.list_filepaths()
def out_files(self):
"""Return all the output data files produced."""
return self.outdir.list_filepaths()
def tmp_files(self):
"""Return all the input data files produced."""
return self.tmpdir.list_filepaths()
def path_in_workdir(self, filename):
"""Create the absolute path of filename in the top-level working directory."""
return os.path.join(self.workdir, filename)
def rename(self, src_basename, dest_basename, datadir="outdir"):
"""
Rename a file located in datadir.
src_basename and dest_basename are the basename of the source file
and of the destination file, respectively.
"""
directory = {
"indir": self.indir,
"outdir": self.outdir,
"tmpdir": self.tmpdir,
}[datadir]
src = directory.path_in(src_basename)
dest = directory.path_in(dest_basename)
os.rename(src, dest)
#@check_spectator
def build(self, *args, **kwargs):
"""
Creates the working directory and the input files of the :class:`Task`.
It does not overwrite files if they already exist.
"""
# Create dirs for input, output and tmp data.
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
# Write files file and input file.
if not self.files_file.exists:
self.files_file.write(self.filesfile_string)
self.input_file.write(self.make_input())
self.manager.write_jobfile(self)
#@check_spectator
def rmtree(self, exclude_wildcard=""):
"""
Remove all files and directories in the working directory
Args:
exclude_wildcard: Optional string with regular expressions separated by |.
Files matching one of the regular expressions will be preserved.
example: exclude_wildcard="*.nc|*.txt" preserves all the files whose extension is in ["nc", "txt"].
"""
if not exclude_wildcard:
shutil.rmtree(self.workdir)
else:
w = WildCard(exclude_wildcard)
for dirpath, dirnames, filenames in os.walk(self.workdir):
for fname in filenames:
filepath = os.path.join(dirpath, fname)
if not w.match(fname):
os.remove(filepath)
def remove_files(self, *filenames):
"""Remove all the files listed in filenames."""
filenames = list_strings(filenames)
for dirpath, dirnames, fnames in os.walk(self.workdir):
for fname in fnames:
if fname in filenames:
filepath = os.path.join(dirpath, fname)
os.remove(filepath)
def clean_output_files(self, follow_parents=True):
"""
This method is called when the task reaches S_OK. It removes all the output files
produced by the task that are not needed by its children as well as the output files
produced by its parents if no other node needs them.
Args:
follow_parents: If true, the output files of the parents nodes will be removed if possible.
Return:
list with the absolute paths of the files that have been removed.
"""
paths = []
if self.status != self.S_OK:
logger.warning("Calling task.clean_output_files on a task whose status != S_OK")
# Remove all files in tmpdir.
self.tmpdir.clean()
# Find the file extensions that should be preserved since these files are still
# needed by the children who haven't reached S_OK
except_exts = set()
for child in self.get_children():
if child.status == self.S_OK: continue
# Find the position of self in child.deps and add the extensions.
i = [dep.node for dep in child.deps].index(self)
except_exts.update(child.deps[i].exts)
# Remove the files in the outdir of the task but keep except_exts.
exts = self.gc.exts.difference(except_exts)
#print("Will remove its extensions: ", exts)
paths += self.outdir.remove_exts(exts)
if not follow_parents: return paths
# Remove the files in the outdir of my parents if all the possible dependencies have been fulfilled.
for parent in self.get_parents():
# Here we build a dictionary file extension --> list of child nodes requiring this file from parent
# e.g {"WFK": [node1, node2]}
ext2nodes = collections.defaultdict(list)
for child in parent.get_children():
if child.status == child.S_OK: continue
i = [d.node for d in child.deps].index(parent)
for ext in child.deps[i].exts:
ext2nodes[ext].append(child)
# Remove extension only if no node depends on it!
except_exts = [k for k, lst in ext2nodes.items() if lst]
exts = self.gc.exts.difference(except_exts)
#print("%s removes extensions %s from parent node %s" % (self, exts, parent))
paths += parent.outdir.remove_exts(exts)
self.history.info("Removed files: %s" % paths)
return paths
def setup(self):
"""Base class does not provide any hook."""
#@check_spectator
def start(self, **kwargs):
"""
Starts the calculation by performing the following steps:
- build dirs and files
- call the _setup method
- execute the job file by executing/submitting the job script.
Main entry point for the `Launcher`.
============== ==============================================================
kwargs Meaning
============== ==============================================================
autoparal False to skip the autoparal step (default True)
exec_args List of arguments passed to executable.
============== ==============================================================
Returns:
1 if task was started, 0 otherwise.
"""
if self.status >= self.S_SUB:
raise self.Error("Task status: %s" % str(self.status))
if self.start_lockfile.exists:
self.history.warning("Found lock file: %s" % self.start_lockfile.path)
return 0
self.start_lockfile.write("Started on %s" % time.asctime())
self.build()
self._setup()
# Add the variables needed to connect the node.
for d in self.deps:
cvars = d.connecting_vars()
self.history.info("Adding connecting vars %s" % cvars)
self.set_vars(cvars)
# Get (python) data from other nodes
d.apply_getters(self)
# Automatic parallelization
if kwargs.pop("autoparal", True) and hasattr(self, "autoparal_run"):
try:
self.autoparal_run()
except QueueAdapterError as exc:
# If autoparal cannot find a qadapter to run the calculation raises an Exception
self.history.critical(exc)
msg = "Error while trying to run autoparal in task:%s\n%s" % (repr(self), straceback())
cprint(msg, "yellow")
self.set_status(self.S_QCRITICAL, msg=msg)
return 0
except Exception as exc:
# Sometimes autoparal_run fails because Abinit aborts
# at the level of the parser e.g. cannot find the spacegroup
# due to some numerical noise in the structure.
# In this case we call fix_abicritical and then we try to run autoparal again.
self.history.critical("First call to autoparal failed with `%s`. Will try fix_abicritical" % exc)
msg = "autoparal_fake_run raised:\n%s" % straceback()
logger.critical(msg)
fixed = self.fix_abicritical()
if not fixed:
self.set_status(self.S_ABICRITICAL, msg="fix_abicritical could not solve the problem")
return 0
try:
self.autoparal_run()
self.history.info("Second call to autoparal succeeded!")
#cprint("Second call to autoparal succeeded!", "green")
except Exception as exc:
self.history.critical("Second call to autoparal failed with %s. Cannot recover!", exc)
msg = "Tried autoparal again but got:\n%s" % straceback()
cprint(msg, "red")
self.set_status(self.S_ABICRITICAL, msg=msg)
return 0
# Start the calculation in a subprocess and return.
self._process = self.manager.launch(self, **kwargs)
return 1
def start_and_wait(self, *args, **kwargs):
"""
Helper method to start the task and wait for completetion.
Mainly used when we are submitting the task via the shell without passing through a queue manager.
"""
self.start(*args, **kwargs)
retcode = self.wait()
return retcode
class DecreaseDemandsError(Exception):
"""
exception to be raised by a task if the request to decrease some demand, load or memory, could not be performed
"""
class AbinitTask(Task):
"""
Base class defining an ABINIT calculation
"""
Results = TaskResults
@classmethod
def from_input(cls, input, workdir=None, manager=None):
"""
Create an instance of `AbinitTask` from an ABINIT input.
Args:
ainput: `AbinitInput` object.
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
"""
return cls(input, workdir=workdir, manager=manager)
@classmethod
def temp_shell_task(cls, inp, mpi_procs=1, workdir=None, manager=None):
"""
Build a Task with a temporary workdir. The task is executed via the shell with 1 MPI proc.
Mainly used for invoking Abinit to get important parameters needed to prepare the real task.
Args:
mpi_procs: Number of MPI processes to use.
"""
# Build a simple manager to run the job in a shell subprocess
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
if manager is None: manager = TaskManager.from_user_config()
# Construct the task and run it
task = cls.from_input(inp, workdir=workdir, manager=manager.to_shell_manager(mpi_procs=mpi_procs))
task.set_name('temp_shell_task')
return task
def setup(self):
"""
Abinit has the very *bad* habit of changing the file extension by appending the characters in [A,B ..., Z]
to the output file, and this breaks a lot of code that relies of the use of a unique file extension.
Here we fix this issue by renaming run.abo to run.abo_[number] if the output file "run.abo" already
exists. A few lines of code in python, a lot of problems if you try to implement this trick in Fortran90.
"""
def rename_file(afile):
"""Helper function to rename :class:`File` objects. Return string for logging purpose."""
# Find the index of the last file (if any).
# TODO: Maybe it's better to use run.abo --> run(1).abo
fnames = [f for f in os.listdir(self.workdir) if f.startswith(afile.basename)]
nums = [int(f) for f in [f.split("_")[-1] for f in fnames] if f.isdigit()]
last = max(nums) if nums else 0
new_path = afile.path + "_" + str(last+1)
os.rename(afile.path, new_path)
return "Will rename %s to %s" % (afile.path, new_path)
logs = []
if self.output_file.exists: logs.append(rename_file(self.output_file))
if self.log_file.exists: logs.append(rename_file(self.log_file))
if logs:
self.history.info("\n".join(logs))
@property
def executable(self):
"""Path to the executable required for running the Task."""
try:
return self._executable
except AttributeError:
return "abinit"
@property
def pseudos(self):
"""List of pseudos used in the calculation."""
return self.input.pseudos
@property
def isnc(self):
"""True if norm-conserving calculation."""
return self.input.isnc
@property
def ispaw(self):
"""True if PAW calculation"""
return self.input.ispaw
@property
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
pj = os.path.join
app(self.input_file.path) # Path to the input file
app(self.output_file.path) # Path to the output file
app(pj(self.workdir, self.prefix.idata)) # Prefix for input data
app(pj(self.workdir, self.prefix.odata)) # Prefix for output data
app(pj(self.workdir, self.prefix.tdata)) # Prefix for temporary data
# Paths to the pseudopotential files.
# Note that here the pseudos **must** be sorted according to znucl.
# Here we reorder the pseudos if the order is wrong.
ord_pseudos = []
znucl = [specie.number for specie in
self.input.structure.types_of_specie]
for z in znucl:
for p in self.pseudos:
if p.Z == z:
ord_pseudos.append(p)
break
else:
raise ValueError("Cannot find pseudo with znucl %s in pseudos:\n%s" % (z, self.pseudos))
for pseudo in ord_pseudos:
app(pseudo.path)
return "\n".join(lines)
def set_pconfs(self, pconfs):
"""Set the list of autoparal configurations."""
self._pconfs = pconfs
@property
def pconfs(self):
"""List of autoparal configurations."""
try:
return self._pconfs
except AttributeError:
return None
def uses_paral_kgb(self, value=1):
"""True if the task is a GS Task and uses paral_kgb with the given value."""
paral_kgb = self.get_inpvar("paral_kgb", 0)
# paral_kgb is used only in the GS part.
return paral_kgb == value and isinstance(self, GsTask)
def _change_structure(self, new_structure):
"""Change the input structure."""
# Compare new and old structure for logging purpose.
# TODO: Write method of structure to compare self and other and return a dictionary
old_structure = self.input.structure
old_lattice = old_structure.lattice
abc_diff = np.array(new_structure.lattice.abc) - np.array(old_lattice.abc)
angles_diff = np.array(new_structure.lattice.angles) - np.array(old_lattice.angles)
cart_diff = new_structure.cart_coords - old_structure.cart_coords
displs = np.array([np.sqrt(np.dot(v, v)) for v in cart_diff])
recs, tol_angle, tol_length = [], 10**-2, 10**-5
if np.any(np.abs(angles_diff) > tol_angle):
recs.append("new_agles - old_angles = %s" % angles_diff)
if np.any(np.abs(abc_diff) > tol_length):
recs.append("new_abc - old_abc = %s" % abc_diff)
if np.any(np.abs(displs) > tol_length):
min_pos, max_pos = displs.argmin(), displs.argmax()
recs.append("Mean displ: %.2E, Max_displ: %.2E (site %d), min_displ: %.2E (site %d)" %
(displs.mean(), displs[max_pos], max_pos, displs[min_pos], min_pos))
self.history.info("Changing structure (only significant diffs are shown):")
if not recs:
self.history.info("Input and output structure seems to be equal within the given tolerances")
else:
for rec in recs:
self.history.info(rec)
self.input.set_structure(new_structure)
#assert self.input.structure == new_structure
def autoparal_run(self):
"""
Find an optimal set of parameters for the execution of the task
This method can change the ABINIT input variables and/or the
submission parameters e.g. the number of CPUs for MPI and OpenMp.
Set:
self.pconfs where pconfs is a :class:`ParalHints` object with the configuration reported by
autoparal and optimal is the optimal configuration selected.
Returns 0 if success
"""
policy = self.manager.policy
if policy.autoparal == 0: # or policy.max_ncpus in [None, 1]:
logger.info("Nothing to do in autoparal, returning (None, None)")
return 0
if policy.autoparal != 1:
raise NotImplementedError("autoparal != 1")
############################################################################
# Run ABINIT in sequential to get the possible configurations with max_ncpus
############################################################################
# Set the variables for automatic parallelization
# Will get all the possible configurations up to max_ncpus
# Return immediately if max_ncpus == 1
max_ncpus = self.manager.max_cores
if max_ncpus == 1: return 0
autoparal_vars = dict(autoparal=policy.autoparal, max_ncpus=max_ncpus)
self.set_vars(autoparal_vars)
# Run the job in a shell subprocess with mpi_procs = 1
# we don't want to make a request to the queue manager for this simple job!
# Return code is always != 0
process = self.manager.to_shell_manager(mpi_procs=1).launch(self)
self.history.pop()
retcode = process.wait()
# To avoid: ResourceWarning: unclosed file <_io.BufferedReader name=87> in py3k
process.stderr.close()
# Remove the variables added for the automatic parallelization
self.input.remove_vars(list(autoparal_vars.keys()))
##############################################################
# Parse the autoparal configurations from the main output file
##############################################################
parser = ParalHintsParser()
try:
pconfs = parser.parse(self.output_file.path)
except parser.Error:
logger.critical("Error while parsing Autoparal section:\n%s" % straceback())
return 2
######################################################
# Select the optimal configuration according to policy
######################################################
optconf = self.find_optconf(pconfs)
####################################################
# Change the input file and/or the submission script
####################################################
self.set_vars(optconf.vars)
# Write autoparal configurations to JSON file.
d = pconfs.as_dict()
d["optimal_conf"] = optconf
json_pretty_dump(d, os.path.join(self.workdir, "autoparal.json"))
##############
# Finalization
##############
# Reset the status, remove garbage files ...
self.set_status(self.S_INIT, msg='finished autoparallel run')
# Remove the output file since Abinit likes to create new files
# with extension .outA, .outB if the file already exists.
os.remove(self.output_file.path)
os.remove(self.log_file.path)
os.remove(self.stderr_file.path)
return 0
def find_optconf(self, pconfs):
"""Find the optimal Parallel configuration."""
# Save pconfs for future reference.
self.set_pconfs(pconfs)
# Select the partition on which we'll be running and set MPI/OMP cores.
optconf = self.manager.select_qadapter(pconfs)
return optconf
def select_files(self, what="o"):
"""
Helper function used to select the files of a task.
Args:
what: string with the list of characters selecting the file type
Possible choices:
i ==> input_file,
o ==> output_file,
f ==> files_file,
j ==> job_file,
l ==> log_file,
e ==> stderr_file,
q ==> qout_file,
all ==> all files.
"""
choices = collections.OrderedDict([
("i", self.input_file),
("o", self.output_file),
("f", self.files_file),
("j", self.job_file),
("l", self.log_file),
("e", self.stderr_file),
("q", self.qout_file),
])
if what == "all":
return [getattr(v, "path") for v in choices.values()]
selected = []
for c in what:
try:
selected.append(getattr(choices[c], "path"))
except KeyError:
logger.warning("Wrong keyword %s" % c)
return selected
def restart(self):
"""
general restart used when scheduler problems have been taken care of
"""
return self._restart()
#@check_spectator
def reset_from_scratch(self):
"""
Restart from scratch, this is to be used if a job is restarted with more resources after a crash
Move output files produced in workdir to _reset otherwise check_status continues
to see the task as crashed even if the job did not run
"""
# Create reset directory if not already done.
reset_dir = os.path.join(self.workdir, "_reset")
reset_file = os.path.join(reset_dir, "_counter")
if not os.path.exists(reset_dir):
os.mkdir(reset_dir)
num_reset = 1
else:
with open(reset_file, "rt") as fh:
num_reset = 1 + int(fh.read())
# Move files to reset and append digit with reset index.
def move_file(f):
if not f.exists: return
try:
f.move(os.path.join(reset_dir, f.basename + "_" + str(num_reset)))
except OSError as exc:
logger.warning("Couldn't move file {}. exc: {}".format(f, str(exc)))
for fname in ("output_file", "log_file", "stderr_file", "qout_file", "qerr_file"):
move_file(getattr(self, fname))
with open(reset_file, "wt") as fh:
fh.write(str(num_reset))
self.start_lockfile.remove()
# Reset datetimes
self.datetimes.reset()
return self._restart(submit=False)
#@check_spectator
def fix_abicritical(self):
"""
method to fix crashes/error caused by abinit
Returns:
1 if task has been fixed else 0.
"""
event_handlers = self.event_handlers
if not event_handlers:
self.set_status(status=self.S_ERROR, msg='Empty list of event handlers. Cannot fix abi_critical errors')
return 0
count, done = 0, len(event_handlers) * [0]
report = self.get_event_report()
if report is None:
self.set_status(status=self.S_ERROR, msg='get_event_report returned None')
return 0
# Note we have loop over all possible events (slow, I know)
# because we can have handlers for Error, Bug or Warning
# (ideally only for CriticalWarnings but this is not done yet)
for event in report:
for i, handler in enumerate(self.event_handlers):
if handler.can_handle(event) and not done[i]:
logger.info("handler %s will try to fix event %s" % (handler, event))
try:
d = handler.handle_task_event(self, event)
if d:
done[i] += 1
count += 1
except Exception as exc:
logger.critical(str(exc))
if count:
self.reset_from_scratch()
return 1
self.set_status(status=self.S_ERROR, msg='We encountered AbiCritical events that could not be fixed')
return 0
#@check_spectator
def fix_queue_critical(self):
"""
This function tries to fix critical events originating from the queue submission system.
General strategy, first try to increase resources in order to fix the problem,
if this is not possible, call a task specific method to attempt to decrease the demands.
Returns:
1 if task has been fixed else 0.
"""
from pymatgen.io.abinit.scheduler_error_parsers import NodeFailureError, MemoryCancelError, TimeCancelError
#assert isinstance(self.manager, TaskManager)
self.history.info('fixing queue critical')
ret = "task.fix_queue_critical: "
if not self.queue_errors:
# TODO
# paral_kgb = 1 leads to nasty sigegv that are seen as Qcritical errors!
# Try to fallback to the conjugate gradient.
#if self.uses_paral_kgb(1):
# logger.critical("QCRITICAL with PARAL_KGB==1. Will try CG!")
# self.set_vars(paral_kgb=0)
# self.reset_from_scratch()
# return
# queue error but no errors detected, try to solve by increasing ncpus if the task scales
# if resources are at maximum the task is definitively turned to errored
if self.mem_scales or self.load_scales:
try:
self.manager.increase_resources() # acts either on the policy or on the qadapter
self.reset_from_scratch()
ret += "increased resources"
return ret
except ManagerIncreaseError:
self.set_status(self.S_ERROR, msg='unknown queue error, could not increase resources any further')
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='unknown queue error, no options left')
raise FixQueueCriticalError
else:
print("Fix_qcritical: received %d queue_errors" % len(self.queue_errors))
print("type_list: %s" % list(type(qe) for qe in self.queue_errors))
for error in self.queue_errors:
self.history.info('fixing: %s' % str(error))
ret += str(error)
if isinstance(error, NodeFailureError):
# if the problematic node is known, exclude it
if error.nodes is not None:
try:
self.manager.exclude_nodes(error.nodes)
self.reset_from_scratch()
self.set_status(self.S_READY, msg='excluding nodes')
except:
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='Node error but no node identified.')
raise FixQueueCriticalError
elif isinstance(error, MemoryCancelError):
# ask the qadapter to provide more resources, i.e. more cpu's so more total memory if the code
# scales this should fix the memeory problem
# increase both max and min ncpu of the autoparalel and rerun autoparalel
if self.mem_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased ncps to solve memory problem')
return
except ManagerIncreaseError:
self.history.warning('increasing ncpus failed')
# if the max is reached, try to increase the memory per cpu:
try:
self.manager.increase_mem()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased mem')
return
except ManagerIncreaseError:
self.history.warning('increasing mem failed')
# if this failed ask the task to provide a method to reduce the memory demand
try:
self.reduce_memory_demand()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='decreased mem demand')
return
except DecreaseDemandsError:
self.history.warning('decreasing demands failed')
msg = ('Memory error detected but the memory could not be increased neigther could the\n'
'memory demand be decreased. Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
raise FixQueueCriticalError
elif isinstance(error, TimeCancelError):
# ask the qadapter to provide more time
print('trying to increase time')
try:
self.manager.increase_time()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased wall time')
return
except ManagerIncreaseError:
self.history.warning('increasing the waltime failed')
# if this fails ask the qadapter to increase the number of cpus
if self.load_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased number of cpus')
return
except ManagerIncreaseError:
self.history.warning('increase ncpus to speed up the calculation to stay in the walltime failed')
# if this failed ask the task to provide a method to speed up the task
try:
self.speed_up()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='task speedup')
return
except DecreaseDemandsError:
self.history.warning('decreasing demands failed')
msg = ('Time cancel error detected but the time could not be increased neither could\n'
'the time demand be decreased by speedup of increasing the number of cpus.\n'
'Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
else:
msg = 'No solution provided for error %s. Unrecoverable error.' % error.name
self.set_status(self.S_ERROR, msg)
return 0
def parse_timing(self):
"""
Parse the timer data in the main output file of Abinit.
Requires timopt /= 0 in the input file (usually timopt = -1)
Return: :class:`AbinitTimerParser` instance, None if error.
"""
from .abitimer import AbinitTimerParser
parser = AbinitTimerParser()
read_ok = parser.parse(self.output_file.path)
if read_ok:
return parser
return None
class ProduceHist(object):
"""
Mixin class for an :class:`AbinitTask` producing a HIST file.
Provide the method `open_hist` that reads and return a HIST file.
"""
@property
def hist_path(self):
"""Absolute path of the HIST file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._hist_path
except AttributeError:
path = self.outdir.has_abiext("HIST")
if path: self._hist_path = path
return path
def open_hist(self):
"""
Open the HIST file located in the in self.outdir.
Returns :class:`HistFile` object, None if file could not be found or file is not readable.
"""
if not self.hist_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a HIST file in %s" % (self, self.outdir))
return None
# Open the HIST file
from abipy.dynamics.hist import HistFile
try:
return HistFile(self.hist_path)
except Exception as exc:
logger.critical("Exception while reading HIST file at %s:\n%s" % (self.hist_path, str(exc)))
return None
class GsTask(AbinitTask):
"""
Base class for ground-state tasks. A ground state task produces a GSR file
Provides the method `open_gsr` that reads and returns a GSR file.
"""
@property
def gsr_path(self):
"""Absolute path of the GSR file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._gsr_path
except AttributeError:
path = self.outdir.has_abiext("GSR")
if path: self._gsr_path = path
return path
def open_gsr(self):
"""
Open the GSR file located in the in self.outdir.
Returns :class:`GsrFile` object, None if file could not be found or file is not readable.
"""
gsr_path = self.gsr_path
if not gsr_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a GSR file in %s" % (self, self.outdir))
return None
# Open the GSR file.
from abipy.electrons.gsr import GsrFile
try:
return GsrFile(gsr_path)
except Exception as exc:
logger.critical("Exception while reading GSR file at %s:\n%s" % (gsr_path, str(exc)))
return None
class ScfTask(GsTask):
"""
Self-consistent ground-state calculations.
Provide support for in-place restart via (WFK|DEN) files
"""
CRITICAL_EVENTS = [
events.ScfConvergenceWarning,
]
color_rgb = np.array((255, 0, 0)) / 255
def restart(self):
"""SCF calculations can be restarted if we have either the WFK file or the DEN file."""
# Prefer WFK over DEN files since we can reuse the wavefunctions.
for ext in ("WFK", "DEN"):
restart_file = self.outdir.has_abiext(ext)
irdvars = irdvars_for_ext(ext)
if restart_file: break
else:
raise self.RestartError("%s: Cannot find WFK or DEN file to restart from." % self)
# Move out --> in.
self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
# Now we can resubmit the job.
self.history.info("Will restart from %s", restart_file)
return self._restart()
def inspect(self, **kwargs):
"""
Plot the SCF cycle results with matplotlib.
Returns
`matplotlib` figure, None if some error occurred.
"""
try:
scf_cycle = abiinspect.GroundStateScfCycle.from_file(self.output_file.path)
except IOError:
return None
if scf_cycle is not None:
if "title" not in kwargs: kwargs["title"] = str(self)
return scf_cycle.plot(**kwargs)
return None
def get_results(self, **kwargs):
results = super(ScfTask, self).get_results(**kwargs)
# Open the GSR file and add its data to results.out
with self.open_gsr() as gsr:
results["out"].update(gsr.as_dict())
# Add files to GridFS
results.register_gridfs_files(GSR=gsr.filepath)
return results
class CollinearThenNonCollinearScfTask(ScfTask):
"""
A specialized ScfTaks that performs an initial SCF run with nsppol = 2.
The spin polarized WFK file is then used to start a non-collinear SCF run (nspinor == 2)
initialized from the previous WFK file.
"""
def __init__(self, input, workdir=None, manager=None, deps=None):
super(CollinearThenNonCollinearScfTask, self).__init__(input, workdir=workdir, manager=manager, deps=deps)
# Enforce nspinor = 1, nsppol = 2 and prtwf = 1.
self._input = self.input.deepcopy()
self.input.set_spin_mode("polarized")
self.input.set_vars(prtwf=1)
self.collinear_done = False
def _on_ok(self):
results = super(CollinearThenNonCollinearScfTask, self)._on_ok()
if not self.collinear_done:
self.input.set_spin_mode("spinor")
self.collinear_done = True
self.finalized = False
self.restart()
return results
class NscfTask(GsTask):
"""
Non-Self-consistent GS calculation. Provide in-place restart via WFK files
"""
CRITICAL_EVENTS = [
events.NscfConvergenceWarning,
]
color_rgb = np.array((255, 122, 122)) / 255
def restart(self):
"""NSCF calculations can be restarted only if we have the WFK file."""
ext = "WFK"
restart_file = self.outdir.has_abiext(ext)
if not restart_file:
raise self.RestartError("%s: Cannot find the WFK file to restart from." % self)
# Move out --> in.
self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
irdvars = irdvars_for_ext(ext)
self.set_vars(irdvars)
# Now we can resubmit the job.
self.history.info("Will restart from %s", restart_file)
return self._restart()
def get_results(self, **kwargs):
results = super(NscfTask, self).get_results(**kwargs)
# Read the GSR file.
with self.open_gsr() as gsr:
results["out"].update(gsr.as_dict())
# Add files to GridFS
results.register_gridfs_files(GSR=gsr.filepath)
return results
class RelaxTask(GsTask, ProduceHist):
"""
Task for structural optimizations.
"""
# TODO possible ScfConvergenceWarning?
CRITICAL_EVENTS = [
events.RelaxConvergenceWarning,
]
color_rgb = np.array((255, 61, 255)) / 255
def get_final_structure(self):
"""Read the final structure from the GSR file."""
try:
with self.open_gsr() as gsr:
return gsr.structure
except AttributeError:
raise RuntimeError("Cannot find the GSR file with the final structure to restart from.")
def restart(self):
"""
Restart the structural relaxation.
Structure relaxations can be restarted only if we have the WFK file or the DEN or the GSR file.
from which we can read the last structure (mandatory) and the wavefunctions (not mandatory but useful).
Prefer WFK over other files since we can reuse the wavefunctions.
.. note::
The problem in the present approach is that some parameters in the input
are computed from the initial structure and may not be consistent with
the modification of the structure done during the structure relaxation.
"""
restart_file = None
# Try to restart from the WFK file if possible.
# FIXME: This part has been disabled because WFK=IO is a mess if paral_kgb == 1
# This is also the reason why I wrote my own MPI-IO code for the GW part!
wfk_file = self.outdir.has_abiext("WFK")
if False and wfk_file:
irdvars = irdvars_for_ext("WFK")
restart_file = self.out_to_in(wfk_file)
# Fallback to DEN file. Note that here we look for out_DEN instead of out_TIM?_DEN
# This happens when the previous run completed and task.on_done has been performed.
# ********************************************************************************
# Note that it's possible to have an undected error if we have multiple restarts
# and the last relax died badly. In this case indeed out_DEN is the file produced
# by the last run that has executed on_done.
# ********************************************************************************
if restart_file is None:
for ext in ("", ".nc"):
out_den = self.outdir.path_in("out_DEN" + ext)
if os.path.exists(out_den):
irdvars = irdvars_for_ext("DEN")
restart_file = self.out_to_in(out_den)
break
if restart_file is None:
# Try to restart from the last TIM?_DEN file.
# This should happen if the previous run didn't complete in clean way.
# Find the last TIM?_DEN file.
last_timden = self.outdir.find_last_timden_file()
if last_timden is not None:
if last_timden.path.endswith(".nc"):
ofile = self.outdir.path_in("out_DEN.nc")
else:
ofile = self.outdir.path_in("out_DEN")
os.rename(last_timden.path, ofile)
restart_file = self.out_to_in(ofile)
irdvars = irdvars_for_ext("DEN")
if restart_file is None:
# Don't raise RestartError as we can still change the structure.
self.history.warning("Cannot find the WFK|DEN|TIM?_DEN file to restart from.")
else:
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
self.history.info("Will restart from %s", restart_file)
# FIXME Here we should read the HIST file but restartxf if broken!
#self.set_vars({"restartxf": -1})
# Read the relaxed structure from the GSR file and change the input.
self._change_structure(self.get_final_structure())
# Now we can resubmit the job.
return self._restart()
def inspect(self, **kwargs):
"""
Plot the evolution of the structural relaxation with matplotlib.
Args:
what: Either "hist" or "scf". The first option (default) extracts data
from the HIST file and plot the evolution of the structural
parameters, forces, pressures and energies.
The second option, extracts data from the main output file and
plot the evolution of the SCF cycles (etotal, residuals, etc).
Returns:
`matplotlib` figure, None if some error occurred.
"""
what = kwargs.pop("what", "hist")
if what == "hist":
# Read the hist file to get access to the structure.
with self.open_hist() as hist:
return hist.plot(**kwargs) if hist else None
elif what == "scf":
# Get info on the different SCF cycles
relaxation = abiinspect.Relaxation.from_file(self.output_file.path)
if "title" not in kwargs: kwargs["title"] = str(self)
return relaxation.plot(**kwargs) if relaxation is not None else None
else:
raise ValueError("Wrong value for what %s" % what)
def get_results(self, **kwargs):
results = super(RelaxTask, self).get_results(**kwargs)
# Open the GSR file and add its data to results.out
with self.open_gsr() as gsr:
results["out"].update(gsr.as_dict())
# Add files to GridFS
results.register_gridfs_files(GSR=gsr.filepath)
return results
def reduce_dilatmx(self, target=1.01):
actual_dilatmx = self.get_inpvar('dilatmx', 1.)
new_dilatmx = actual_dilatmx - min((actual_dilatmx-target), actual_dilatmx*0.05)
self.set_vars(dilatmx=new_dilatmx)
def fix_ofiles(self):
"""
Note that ABINIT produces lots of out_TIM1_DEN files for each step.
Here we list all TIM*_DEN files, we select the last one and we rename it in out_DEN
This change is needed so that we can specify dependencies with the syntax {node: "DEN"}
without having to know the number of iterations needed to converge the run in node!
"""
super(RelaxTask, self).fix_ofiles()
# Find the last TIM?_DEN file.
last_timden = self.outdir.find_last_timden_file()
if last_timden is None:
logger.warning("Cannot find TIM?_DEN files")
return
# Rename last TIMDEN with out_DEN.
ofile = self.outdir.path_in("out_DEN")
if last_timden.path.endswith(".nc"): ofile += ".nc"
self.history.info("Renaming last_denfile %s --> %s" % (last_timden.path, ofile))
os.rename(last_timden.path, ofile)
class DfptTask(AbinitTask):
"""
Base class for DFPT tasks (Phonons, ...)
Mainly used to implement methods that are common to DFPT calculations with Abinit.
Provide the method `open_ddb` that reads and return a Ddb file.
.. warning::
This class should not be instantiated directly.
"""
@property
def ddb_path(self):
"""Absolute path of the DDB file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._ddb_path
except AttributeError:
path = self.outdir.has_abiext("DDB")
if path: self._ddb_path = path
return path
def open_ddb(self):
"""
Open the DDB file located in the in self.outdir.
Returns :class:`DdbFile` object, None if file could not be found or file is not readable.
"""
ddb_path = self.ddb_path
if not ddb_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a DDB file in %s" % (self, self.outdir))
return None
# Open the DDB file.
from abipy.dfpt.ddb import DdbFile
try:
return DdbFile(ddb_path)
except Exception as exc:
logger.critical("Exception while reading DDB file at %s:\n%s" % (ddb_path, str(exc)))
return None
class DdeTask(DfptTask):
"""Task for DDE calculations."""
def make_links(self):
"""Replace the default behaviour of make_links"""
for dep in self.deps:
if dep.exts == ["DDK"]:
ddk_task = dep.node
out_ddk = ddk_task.outdir.has_abiext("DDK")
if not out_ddk:
raise RuntimeError("%s didn't produce the DDK file" % ddk_task)
# Get (fortran) idir and costruct the name of the 1WF expected by Abinit
rfdir = list(ddk_task.input["rfdir"])
if rfdir.count(1) != 1:
raise RuntimeError("Only one direction should be specifned in rfdir but rfdir = %s" % rfdir)
idir = rfdir.index(1) + 1
ddk_case = idir + 3 * len(ddk_task.input.structure)
infile = self.indir.path_in("in_1WF%d" % ddk_case)
os.symlink(out_ddk, infile)
elif dep.exts == ["WFK"]:
gs_task = dep.node
out_wfk = gs_task.outdir.has_abiext("WFK")
if not out_wfk:
raise RuntimeError("%s didn't produce the WFK file" % gs_task)
if not os.path.exists(self.indir.path_in("in_WFK")):
os.symlink(out_wfk, self.indir.path_in("in_WFK"))
else:
raise ValueError("Don't know how to handle extension: %s" % dep.exts)
def get_results(self, **kwargs):
results = super(DdeTask, self).get_results(**kwargs)
return results.register_gridfs_file(DDB=(self.outdir.has_abiext("DDE"), "t"))
class DteTask(DfptTask):
"""Task for DTE calculations."""
# @check_spectator
def start(self, **kwargs):
kwargs['autoparal'] = False
return super(DteTask, self).start(**kwargs)
def make_links(self):
"""Replace the default behaviour of make_links"""
for dep in self.deps:
for d in dep.exts:
if d == "DDK":
ddk_task = dep.node
out_ddk = ddk_task.outdir.has_abiext("DDK")
if not out_ddk:
raise RuntimeError("%s didn't produce the DDK file" % ddk_task)
# Get (fortran) idir and costruct the name of the 1WF expected by Abinit
rfdir = list(ddk_task.input["rfdir"])
if rfdir.count(1) != 1:
raise RuntimeError("Only one direction should be specifned in rfdir but rfdir = %s" % rfdir)
idir = rfdir.index(1) + 1
ddk_case = idir + 3 * len(ddk_task.input.structure)
infile = self.indir.path_in("in_1WF%d" % ddk_case)
os.symlink(out_ddk, infile)
elif d == "WFK":
gs_task = dep.node
out_wfk = gs_task.outdir.has_abiext("WFK")
if not out_wfk:
raise RuntimeError("%s didn't produce the WFK file" % gs_task)
if not os.path.exists(self.indir.path_in("in_WFK")):
os.symlink(out_wfk, self.indir.path_in("in_WFK"))
elif d == "DEN":
gs_task = dep.node
out_wfk = gs_task.outdir.has_abiext("DEN")
if not out_wfk:
raise RuntimeError("%s didn't produce the WFK file" % gs_task)
if not os.path.exists(self.indir.path_in("in_DEN")):
os.symlink(out_wfk, self.indir.path_in("in_DEN"))
elif d == "1WF":
gs_task = dep.node
out_wfk = gs_task.outdir.has_abiext("1WF")
if not out_wfk:
raise RuntimeError("%s didn't produce the 1WF file" % gs_task)
dest = self.indir.path_in("in_" + out_wfk.split("_")[-1])
if not os.path.exists(dest):
os.symlink(out_wfk, dest)
elif d == "1DEN":
gs_task = dep.node
out_wfk = gs_task.outdir.has_abiext("DEN")
if not out_wfk:
raise RuntimeError("%s didn't produce the 1WF file" % gs_task)
dest = self.indir.path_in("in_" + out_wfk.split("_")[-1])
if not os.path.exists(dest):
os.symlink(out_wfk, dest)
else:
raise ValueError("Don't know how to handle extension: %s" % dep.exts)
def get_results(self, **kwargs):
results = super(DdeTask, self).get_results(**kwargs)
return results.register_gridfs_file(DDB=(self.outdir.has_abiext("DDE"), "t"))
class DdkTask(DfptTask):
"""Task for DDK calculations."""
color_rgb = np.array((61, 158, 255)) / 255
#@check_spectator
def _on_ok(self):
super(DdkTask, self)._on_ok()
# Copy instead of removing, otherwise optic tests fail
# Fixing this problem requires a rationalization of file extensions.
#if self.outdir.rename_abiext('1WF', 'DDK') > 0:
#if self.outdir.copy_abiext('1WF', 'DDK') > 0:
self.outdir.symlink_abiext('1WF', 'DDK')
def get_results(self, **kwargs):
results = super(DdkTask, self).get_results(**kwargs)
return results.register_gridfs_file(DDK=(self.outdir.has_abiext("DDK"), "t"))
class BecTask(DfptTask):
"""
Task for the calculation of Born effective charges.
bec_deps = {ddk_task: "DDK" for ddk_task in ddk_tasks}
bec_deps.update({scf_task: "WFK"})
"""
color_rgb = np.array((122, 122, 255)) / 255
def make_links(self):
"""Replace the default behaviour of make_links"""
#print("In BEC make_links")
for dep in self.deps:
if dep.exts == ["DDK"]:
ddk_task = dep.node
out_ddk = ddk_task.outdir.has_abiext("DDK")
if not out_ddk:
raise RuntimeError("%s didn't produce the DDK file" % ddk_task)
# Get (fortran) idir and costruct the name of the 1WF expected by Abinit
rfdir = list(ddk_task.input["rfdir"])
if rfdir.count(1) != 1:
raise RuntimeError("Only one direction should be specifned in rfdir but rfdir = %s" % rfdir)
idir = rfdir.index(1) + 1
ddk_case = idir + 3 * len(ddk_task.input.structure)
infile = self.indir.path_in("in_1WF%d" % ddk_case)
os.symlink(out_ddk, infile)
elif dep.exts == ["WFK"]:
gs_task = dep.node
out_wfk = gs_task.outdir.has_abiext("WFK")
if not out_wfk:
raise RuntimeError("%s didn't produce the WFK file" % gs_task)
os.symlink(out_wfk, self.indir.path_in("in_WFK"))
else:
raise ValueError("Don't know how to handle extension: %s" % dep.exts)
class PhononTask(DfptTask):
"""
DFPT calculations for a single atomic perturbation.
Provide support for in-place restart via (1WF|1DEN) files
"""
# TODO:
# for the time being we don't discern between GS and PhononCalculations.
CRITICAL_EVENTS = [
events.ScfConvergenceWarning,
]
color_rgb = np.array((0, 0, 255)) / 255
def restart(self):
"""
Phonon calculations can be restarted only if we have the 1WF file or the 1DEN file.
from which we can read the first-order wavefunctions or the first order density.
Prefer 1WF over 1DEN since we can reuse the wavefunctions.
"""
# Abinit adds the idir-ipert index at the end of the file and this breaks the extension
# e.g. out_1WF4, out_DEN4. find_1wf_files and find_1den_files returns the list of files found
restart_file, irdvars = None, None
# Highest priority to the 1WF file because restart is more efficient.
wf_files = self.outdir.find_1wf_files()
if wf_files is not None:
restart_file = wf_files[0].path
irdvars = irdvars_for_ext("1WF")
if len(wf_files) != 1:
restart_file = None
logger.critical("Found more than one 1WF file. Restart is ambiguous!")
if restart_file is None:
den_files = self.outdir.find_1den_files()
if den_files is not None:
restart_file = den_files[0].path
irdvars = {"ird1den": 1}
if len(den_files) != 1:
restart_file = None
logger.critical("Found more than one 1DEN file. Restart is ambiguous!")
if restart_file is None:
# Raise because otherwise restart is equivalent to a run from scratch --> infinite loop!
raise self.RestartError("%s: Cannot find the 1WF|1DEN file to restart from." % self)
# Move file.
self.history.info("Will restart from %s", restart_file)
restart_file = self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
# Now we can resubmit the job.
return self._restart()
def inspect(self, **kwargs):
"""
Plot the Phonon SCF cycle results with matplotlib.
Returns:
`matplotlib` figure, None if some error occurred.
"""
scf_cycle = abiinspect.PhononScfCycle.from_file(self.output_file.path)
if scf_cycle is not None:
if "title" not in kwargs: kwargs["title"] = str(self)
return scf_cycle.plot(**kwargs)
def get_results(self, **kwargs):
results = super(PhononTask, self).get_results(**kwargs)
return results.register_gridfs_files(DDB=(self.outdir.has_abiext("DDB"), "t"))
def make_links(self):
super(PhononTask, self).make_links()
# fix the problem that abinit uses the 1WF extension for the DDK output file but reads it with the irdddk flag
#if self.indir.has_abiext('DDK'):
# self.indir.rename_abiext('DDK', '1WF')
class EphTask(AbinitTask):
"""
Class for electron-phonon calculations.
"""
color_rgb = np.array((255, 128, 0)) / 255
class ManyBodyTask(AbinitTask):
"""
Base class for Many-body tasks (Screening, Sigma, Bethe-Salpeter)
Mainly used to implement methods that are common to MBPT calculations with Abinit.
.. warning::
This class should not be instantiated directly.
"""
def reduce_memory_demand(self):
"""
Method that can be called by the scheduler to decrease the memory demand of a specific task.
Returns True in case of success, False in case of Failure.
"""
# The first digit governs the storage of W(q), the second digit the storage of u(r)
# Try to avoid the storage of u(r) first since reading W(q) from file will lead to a drammatic slowdown.
prev_gwmem = int(self.get_inpvar("gwmem", default=11))
first_dig, second_dig = prev_gwmem // 10, prev_gwmem % 10
if second_dig == 1:
self.set_vars(gwmem="%.2d" % (10 * first_dig))
return True
if first_dig == 1:
self.set_vars(gwmem="%.2d" % 00)
return True
# gwmem 00 d'oh!
return False
class ScrTask(ManyBodyTask):
"""Tasks for SCREENING calculations """
color_rgb = np.array((255, 128, 0)) / 255
#def inspect(self, **kwargs):
# """Plot graph showing the number of q-points computed and the wall-time used"""
@property
def scr_path(self):
"""Absolute path of the SCR file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._scr_path
except AttributeError:
path = self.outdir.has_abiext("SCR.nc")
if path: self._scr_path = path
return path
def open_scr(self):
"""
Open the SIGRES file located in the in self.outdir.
Returns :class:`ScrFile` object, None if file could not be found or file is not readable.
"""
scr_path = self.scr_path
if not scr_path:
logger.critical("%s didn't produce a SCR.nc file in %s" % (self, self.outdir))
return None
# Open the GSR file and add its data to results.out
from abipy.electrons.scr import ScrFile
try:
return ScrFile(scr_path)
except Exception as exc:
logger.critical("Exception while reading SCR file at %s:\n%s" % (scr_path, str(exc)))
return None
class SigmaTask(ManyBodyTask):
"""
Tasks for SIGMA calculations. Provides support for in-place restart via QPS files
"""
CRITICAL_EVENTS = [
events.QPSConvergenceWarning,
]
color_rgb = np.array((0, 255, 0)) / 255
def restart(self):
# G calculations can be restarted only if we have the QPS file
# from which we can read the results of the previous step.
ext = "QPS"
restart_file = self.outdir.has_abiext(ext)
if not restart_file:
raise self.RestartError("%s: Cannot find the QPS file to restart from." % self)
self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
irdvars = irdvars_for_ext(ext)
self.set_vars(irdvars)
# Now we can resubmit the job.
self.history.info("Will restart from %s", restart_file)
return self._restart()
#def inspect(self, **kwargs):
# """Plot graph showing the number of k-points computed and the wall-time used"""
@property
def sigres_path(self):
"""Absolute path of the SIGRES file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._sigres_path
except AttributeError:
path = self.outdir.has_abiext("SIGRES")
if path: self._sigres_path = path
return path
def open_sigres(self):
"""
Open the SIGRES file located in the in self.outdir.
Returns :class:`SigresFile` object, None if file could not be found or file is not readable.
"""
sigres_path = self.sigres_path
if not sigres_path:
logger.critical("%s didn't produce a SIGRES file in %s" % (self, self.outdir))
return None
# Open the SIGRES file and add its data to results.out
from abipy.electrons.gw import SigresFile
try:
return SigresFile(sigres_path)
except Exception as exc:
logger.critical("Exception while reading SIGRES file at %s:\n%s" % (sigres_path, str(exc)))
return None
def get_scissors_builder(self):
"""
Returns an instance of :class:`ScissorsBuilder` from the SIGRES file.
Raise:
`RuntimeError` if SIGRES file is not found.
"""
from abipy.electrons.scissors import ScissorsBuilder
if self.sigres_path:
return ScissorsBuilder.from_file(self.sigres_path)
else:
raise RuntimeError("Cannot find SIGRES file!")
def get_results(self, **kwargs):
results = super(SigmaTask, self).get_results(**kwargs)
# Open the SIGRES file and add its data to results.out
with self.open_sigres() as sigres:
#results["out"].update(sigres.as_dict())
results.register_gridfs_files(SIGRES=sigres.filepath)
return results
class BseTask(ManyBodyTask):
"""
Task for Bethe-Salpeter calculations.
.. note::
The BSE codes provides both iterative and direct schemes for the computation of the dielectric function.
The direct diagonalization cannot be restarted whereas Haydock and CG support restarting.
"""
CRITICAL_EVENTS = [
events.HaydockConvergenceWarning,
#events.BseIterativeDiagoConvergenceWarning,
]
color_rgb = np.array((128, 0, 255)) / 255
def restart(self):
"""
BSE calculations with Haydock can be restarted only if we have the
excitonic Hamiltonian and the HAYDR_SAVE file.
"""
# TODO: This version seems to work but the main output file is truncated
# TODO: Handle restart if CG method is used
# TODO: restart should receive a list of critical events
# the log file is complete though.
irdvars = {}
# Move the BSE blocks to indata.
# This is done only once at the end of the first run.
# Successive restarts will use the BSR|BSC files in the indir directory
# to initialize the excitonic Hamiltonian
count = 0
for ext in ("BSR", "BSC"):
ofile = self.outdir.has_abiext(ext)
if ofile:
count += 1
irdvars.update(irdvars_for_ext(ext))
self.out_to_in(ofile)
if not count:
# outdir does not contain the BSR|BSC file.
# This means that num_restart > 1 and the files should be in task.indir
count = 0
for ext in ("BSR", "BSC"):
ifile = self.indir.has_abiext(ext)
if ifile:
count += 1
if not count:
raise self.RestartError("%s: Cannot find BSR|BSC files in %s" % (self, self.indir))
# Rename HAYDR_SAVE files
count = 0
for ext in ("HAYDR_SAVE", "HAYDC_SAVE"):
ofile = self.outdir.has_abiext(ext)
if ofile:
count += 1
irdvars.update(irdvars_for_ext(ext))
self.out_to_in(ofile)
if not count:
raise self.RestartError("%s: Cannot find the HAYDR_SAVE file to restart from." % self)
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
# Now we can resubmit the job.
#self.history.info("Will restart from %s", restart_file)
return self._restart()
#def inspect(self, **kwargs):
# """
# Plot the Haydock iterations with matplotlib.
#
# Returns
# `matplotlib` figure, None if some error occurred.
# """
# haydock_cycle = abiinspect.HaydockIterations.from_file(self.output_file.path)
# if haydock_cycle is not None:
# if "title" not in kwargs: kwargs["title"] = str(self)
# return haydock_cycle.plot(**kwargs)
@property
def mdf_path(self):
"""Absolute path of the MDF file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._mdf_path
except AttributeError:
path = self.outdir.has_abiext("MDF.nc")
if path: self._mdf_path = path
return path
def open_mdf(self):
"""
Open the MDF file located in the in self.outdir.
Returns :class:`MdfFile` object, None if file could not be found or file is not readable.
"""
mdf_path = self.mdf_path
if not mdf_path:
logger.critical("%s didn't produce a MDF file in %s" % (self, self.outdir))
return None
# Open the DFF file and add its data to results.out
from abipy.electrons.bse import MdfFile
try:
return MdfFile(mdf_path)
except Exception as exc:
logger.critical("Exception while reading MDF file at %s:\n%s" % (mdf_path, str(exc)))
return None
def get_results(self, **kwargs):
results = super(BseTask, self).get_results(**kwargs)
with self.open_mdf() as mdf:
#results["out"].update(mdf.as_dict())
#epsilon_infinity optical_gap
results.register_gridfs_files(MDF=mdf.filepath)
return results
class OpticTask(Task):
"""
Task for the computation of optical spectra with optic i.e.
RPA without local-field effects and velocity operator computed from DDK files.
"""
color_rgb = np.array((255, 204, 102)) / 255
def __init__(self, optic_input, nscf_node, ddk_nodes, use_ddknc=False, workdir=None, manager=None):
"""
Create an instance of :class:`OpticTask` from an string containing the input.
Args:
optic_input: :class:`OpticInput` object with optic variables.
nscf_node: The task that will produce the WFK file with the KS energies or path to the WFK file.
ddk_nodes: List of :class:`DdkTask` nodes that will produce the DDK files or list of DDK filepaths.
Order (x, y, z)
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
"""
# Convert paths to FileNodes
self.nscf_node = Node.as_node(nscf_node)
self.ddk_nodes = [Node.as_node(n) for n in ddk_nodes]
assert len(ddk_nodes) == 3
#print(self.nscf_node, self.ddk_nodes)
# Use DDK extension instead of 1WF
if use_ddknc:
deps = {n: "DDK.nc" for n in self.ddk_nodes}
else:
deps = {n: "1WF" for n in self.ddk_nodes}
deps.update({self.nscf_node: "WFK"})
super(OpticTask, self).__init__(optic_input, workdir=workdir, manager=manager, deps=deps)
def set_workdir(self, workdir, chroot=False):
"""Set the working directory of the task."""
super(OpticTask, self).set_workdir(workdir, chroot=chroot)
# Small hack: the log file of optics is actually the main output file.
self.output_file = self.log_file
@deprecated(message="_set_inpvars is deprecated. Use set_vars")
def _set_inpvars(self, *args, **kwargs):
return self.set_vars(*args, **kwargs)
def set_vars(self, *args, **kwargs):
"""
Optic does not use `get` or `ird` variables hence we should never try
to change the input when we connect this task
"""
kwargs.update(dict(*args))
self.history.info("OpticTask intercepted set_vars with args %s" % kwargs)
if "autoparal" in kwargs: self.input.set_vars(autoparal=kwargs["autoparal"])
if "max_ncpus" in kwargs: self.input.set_vars(max_ncpus=kwargs["max_ncpus"])
@property
def executable(self):
"""Path to the executable required for running the :class:`OpticTask`."""
try:
return self._executable
except AttributeError:
return "optic"
@property
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
#optic.in ! Name of input file
#optic.out ! Unused
#optic ! Root name for all files that will be produced
app(self.input_file.path) # Path to the input file
app(os.path.join(self.workdir, "unused")) # Path to the output file
app(os.path.join(self.workdir, self.prefix.odata)) # Prefix for output data
return "\n".join(lines)
@property
def wfk_filepath(self):
"""Returns (at runtime) the absolute path of the WFK file produced by the NSCF run."""
return self.nscf_node.outdir.has_abiext("WFK")
@property
def ddk_filepaths(self):
"""Returns (at runtime) the absolute path of the DDK files produced by the DDK runs."""
# This to support new version of optic that used DDK.nc
paths = [ddk_task.outdir.has_abiext("DDK.nc") for ddk_task in self.ddk_nodes]
if all(p for p in paths):
return paths
# This is deprecated and can be removed when new version of Abinit is released.
return [ddk_task.outdir.has_abiext("1WF") for ddk_task in self.ddk_nodes]
def make_input(self):
"""Construct and write the input file of the calculation."""
# Set the file paths.
all_files ={"ddkfile_" + str(n + 1): ddk for n, ddk in enumerate(self.ddk_filepaths)}
all_files.update({"wfkfile": self.wfk_filepath})
files_nml = {"FILES": all_files}
files= nmltostring(files_nml)
# Get the input specified by the user
user_file = nmltostring(self.input.as_dict())
# Join them.
return files + user_file
def setup(self):
"""Public method called before submitting the task."""
def make_links(self):
"""
Optic allows the user to specify the paths of the input file.
hence we don't need to create symbolic links.
"""
def get_results(self, **kwargs):
return super(OpticTask, self).get_results(**kwargs)
def fix_abicritical(self):
"""
Cannot fix abicritical errors for optic
"""
return 0
#@check_spectator
def reset_from_scratch(self):
"""
restart from scratch, this is to be used if a job is restarted with more resources after a crash
"""
# Move output files produced in workdir to _reset otherwise check_status continues
# to see the task as crashed even if the job did not run
# Create reset directory if not already done.
reset_dir = os.path.join(self.workdir, "_reset")
reset_file = os.path.join(reset_dir, "_counter")
if not os.path.exists(reset_dir):
os.mkdir(reset_dir)
num_reset = 1
else:
with open(reset_file, "rt") as fh:
num_reset = 1 + int(fh.read())
# Move files to reset and append digit with reset index.
def move_file(f):
if not f.exists: return
try:
f.move(os.path.join(reset_dir, f.basename + "_" + str(num_reset)))
except OSError as exc:
logger.warning("Couldn't move file {}. exc: {}".format(f, str(exc)))
for fname in ("output_file", "log_file", "stderr_file", "qout_file", "qerr_file", "mpiabort_file"):
move_file(getattr(self, fname))
with open(reset_file, "wt") as fh:
fh.write(str(num_reset))
self.start_lockfile.remove()
# Reset datetimes
self.datetimes.reset()
return self._restart(submit=False)
def fix_queue_critical(self):
"""
This function tries to fix critical events originating from the queue submission system.
General strategy, first try to increase resources in order to fix the problem,
if this is not possible, call a task specific method to attempt to decrease the demands.
Returns:
1 if task has been fixed else 0.
"""
from pymatgen.io.abinit.scheduler_error_parsers import NodeFailureError, MemoryCancelError, TimeCancelError
#assert isinstance(self.manager, TaskManager)
if not self.queue_errors:
if self.mem_scales or self.load_scales:
try:
self.manager.increase_resources() # acts either on the policy or on the qadapter
self.reset_from_scratch()
return
except ManagerIncreaseError:
self.set_status(self.S_ERROR, msg='unknown queue error, could not increase resources any further')
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='unknown queue error, no options left')
raise FixQueueCriticalError
else:
for error in self.queue_errors:
logger.info('fixing: %s' % str(error))
if isinstance(error, NodeFailureError):
# if the problematic node is known, exclude it
if error.nodes is not None:
try:
self.manager.exclude_nodes(error.nodes)
self.reset_from_scratch()
self.set_status(self.S_READY, msg='excluding nodes')
except:
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='Node error but no node identified.')
raise FixQueueCriticalError
elif isinstance(error, MemoryCancelError):
# ask the qadapter to provide more resources, i.e. more cpu's so more total memory if the code
# scales this should fix the memeory problem
# increase both max and min ncpu of the autoparalel and rerun autoparalel
if self.mem_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased ncps to solve memory problem')
return
except ManagerIncreaseError:
logger.warning('increasing ncpus failed')
# if the max is reached, try to increase the memory per cpu:
try:
self.manager.increase_mem()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased mem')
return
except ManagerIncreaseError:
logger.warning('increasing mem failed')
# if this failed ask the task to provide a method to reduce the memory demand
try:
self.reduce_memory_demand()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='decreased mem demand')
return
except DecreaseDemandsError:
logger.warning('decreasing demands failed')
msg = ('Memory error detected but the memory could not be increased neigther could the\n'
'memory demand be decreased. Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
raise FixQueueCriticalError
elif isinstance(error, TimeCancelError):
# ask the qadapter to provide more time
try:
self.manager.increase_time()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased wall time')
return
except ManagerIncreaseError:
logger.warning('increasing the waltime failed')
# if this fails ask the qadapter to increase the number of cpus
if self.load_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased number of cpus')
return
except ManagerIncreaseError:
logger.warning('increase ncpus to speed up the calculation to stay in the walltime failed')
# if this failed ask the task to provide a method to speed up the task
try:
self.speed_up()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='task speedup')
return
except DecreaseDemandsError:
logger.warning('decreasing demands failed')
msg = ('Time cancel error detected but the time could not be increased neither could\n'
'the time demand be decreased by speedup of increasing the number of cpus.\n'
'Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
else:
msg = 'No solution provided for error %s. Unrecoverable error.' % error.name
self.set_status(self.S_ERROR, msg)
return 0
def autoparal_run(self):
"""
Find an optimal set of parameters for the execution of the Optic task
This method can change the submission parameters e.g. the number of CPUs for MPI and OpenMp.
Returns 0 if success
"""
policy = self.manager.policy
if policy.autoparal == 0: # or policy.max_ncpus in [None, 1]:
logger.info("Nothing to do in autoparal, returning (None, None)")
return 0
if policy.autoparal != 1:
raise NotImplementedError("autoparal != 1")
############################################################################
# Run ABINIT in sequential to get the possible configurations with max_ncpus
############################################################################
# Set the variables for automatic parallelization
# Will get all the possible configurations up to max_ncpus
# Return immediately if max_ncpus == 1
max_ncpus = self.manager.max_cores
if max_ncpus == 1: return 0
autoparal_vars = dict(autoparal=policy.autoparal, max_ncpus=max_ncpus)
self.set_vars(autoparal_vars)
# Run the job in a shell subprocess with mpi_procs = 1
# we don't want to make a request to the queue manager for this simple job!
# Return code is always != 0
process = self.manager.to_shell_manager(mpi_procs=1).launch(self)
self.history.pop()
retcode = process.wait()
# To avoid: ResourceWarning: unclosed file <_io.BufferedReader name=87> in py3k
process.stderr.close()
# Remove the variables added for the automatic parallelization
self.input.remove_vars(list(autoparal_vars.keys()))
##############################################################
# Parse the autoparal configurations from the main output file
##############################################################
parser = ParalHintsParser()
try:
pconfs = parser.parse(self.output_file.path)
except parser.Error:
logger.critical("Error while parsing Autoparal section:\n%s" % straceback())
return 2
######################################################
# Select the optimal configuration according to policy
######################################################
#optconf = self.find_optconf(pconfs)
# Select the partition on which we'll be running and set MPI/OMP cores.
optconf = self.manager.select_qadapter(pconfs)
####################################################
# Change the input file and/or the submission script
####################################################
self.set_vars(optconf.vars)
# Write autoparal configurations to JSON file.
d = pconfs.as_dict()
d["optimal_conf"] = optconf
json_pretty_dump(d, os.path.join(self.workdir, "autoparal.json"))
##############
# Finalization
##############
# Reset the status, remove garbage files ...
self.set_status(self.S_INIT, msg='finished auto paralell')
# Remove the output file since Abinit likes to create new files
# with extension .outA, .outB if the file already exists.
os.remove(self.output_file.path)
#os.remove(self.log_file.path)
os.remove(self.stderr_file.path)
return 0
class AnaddbTask(Task):
"""Task for Anaddb runs (post-processing of DFPT calculations)."""
color_rgb = np.array((204, 102, 255)) / 255
def __init__(self, anaddb_input, ddb_node,
gkk_node=None, md_node=None, ddk_node=None, workdir=None, manager=None):
"""
Create an instance of :class:`AnaddbTask` from a string containing the input.
Args:
anaddb_input: string with the anaddb variables.
ddb_node: The node that will produce the DDB file. Accept :class:`Task`, :class:`Work` or filepath.
gkk_node: The node that will produce the GKK file (optional). Accept :class:`Task`, :class:`Work` or filepath.
md_node: The node that will produce the MD file (optional). Accept `Task`, `Work` or filepath.
gkk_node: The node that will produce the GKK file (optional). Accept `Task`, `Work` or filepath.
workdir: Path to the working directory (optional).
manager: :class:`TaskManager` object (optional).
"""
# Keep a reference to the nodes.
self.ddb_node = Node.as_node(ddb_node)
deps = {self.ddb_node: "DDB"}
self.gkk_node = Node.as_node(gkk_node)
if self.gkk_node is not None:
deps.update({self.gkk_node: "GKK"})
# I never used it!
self.md_node = Node.as_node(md_node)
if self.md_node is not None:
deps.update({self.md_node: "MD"})
self.ddk_node = Node.as_node(ddk_node)
if self.ddk_node is not None:
deps.update({self.ddk_node: "DDK"})
super(AnaddbTask, self).__init__(input=anaddb_input, workdir=workdir, manager=manager, deps=deps)
@classmethod
def temp_shell_task(cls, inp, ddb_node, mpi_procs=1,
gkk_node=None, md_node=None, ddk_node=None, workdir=None, manager=None):
"""
Build a :class:`AnaddbTask` with a temporary workdir. The task is executed via
the shell with 1 MPI proc. Mainly used for post-processing the DDB files.
Args:
mpi_procs: Number of MPI processes to use.
anaddb_input: string with the anaddb variables.
ddb_node: The node that will produce the DDB file. Accept :class:`Task`, :class:`Work` or filepath.
See `AnaddbInit` for the meaning of the other arguments.
"""
# Build a simple manager to run the job in a shell subprocess
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
if manager is None: manager = TaskManager.from_user_config()
# Construct the task and run it
return cls(inp, ddb_node,
gkk_node=gkk_node, md_node=md_node, ddk_node=ddk_node,
workdir=workdir, manager=manager.to_shell_manager(mpi_procs=mpi_procs))
@property
def executable(self):
"""Path to the executable required for running the :class:`AnaddbTask`."""
try:
return self._executable
except AttributeError:
return "anaddb"
@property
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
app(self.input_file.path) # 1) Path of the input file
app(self.output_file.path) # 2) Path of the output file
app(self.ddb_filepath) # 3) Input derivative database e.g. t13.ddb.in
app(self.md_filepath) # 4) Output molecular dynamics e.g. t13.md
app(self.gkk_filepath) # 5) Input elphon matrix elements (GKK file)
app(self.outdir.path_join("out")) # 6) Base name for elphon output files e.g. t13
app(self.ddk_filepath) # 7) File containing ddk filenames for elphon/transport.
return "\n".join(lines)
@property
def ddb_filepath(self):
"""Returns (at runtime) the absolute path of the input DDB file."""
# This is not very elegant! A possible approach could to be path self.ddb_node.outdir!
if isinstance(self.ddb_node, FileNode): return self.ddb_node.filepath
path = self.ddb_node.outdir.has_abiext("DDB")
return path if path else "DDB_FILE_DOES_NOT_EXIST"
@property
def md_filepath(self):
"""Returns (at runtime) the absolute path of the input MD file."""
if self.md_node is None: return "MD_FILE_DOES_NOT_EXIST"
if isinstance(self.md_node, FileNode): return self.md_node.filepath
path = self.md_node.outdir.has_abiext("MD")
return path if path else "MD_FILE_DOES_NOT_EXIST"
@property
def gkk_filepath(self):
"""Returns (at runtime) the absolute path of the input GKK file."""
if self.gkk_node is None: return "GKK_FILE_DOES_NOT_EXIST"
if isinstance(self.gkk_node, FileNode): return self.gkk_node.filepath
path = self.gkk_node.outdir.has_abiext("GKK")
return path if path else "GKK_FILE_DOES_NOT_EXIST"
@property
def ddk_filepath(self):
"""Returns (at runtime) the absolute path of the input DKK file."""
if self.ddk_node is None: return "DDK_FILE_DOES_NOT_EXIST"
if isinstance(self.ddk_node, FileNode): return self.ddk_node.filepath
path = self.ddk_node.outdir.has_abiext("DDK")
return path if path else "DDK_FILE_DOES_NOT_EXIST"
def setup(self):
"""Public method called before submitting the task."""
def make_links(self):
"""
Anaddb allows the user to specify the paths of the input file.
hence we don't need to create symbolic links.
"""
def open_phbst(self):
"""Open PHBST file produced by Anaddb and returns :class:`PhbstFile` object."""
from abipy.dfpt.phonons import PhbstFile
phbst_path = os.path.join(self.workdir, "run.abo_PHBST.nc")
if not phbst_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a PHBST file in %s" % (self, self.outdir))
return None
try:
return PhbstFile(phbst_path)
except Exception as exc:
logger.critical("Exception while reading GSR file at %s:\n%s" % (phbst_path, str(exc)))
return None
def open_phdos(self):
"""Open PHDOS file produced by Anaddb and returns :class:`PhdosFile` object."""
from abipy.dfpt.phonons import PhdosFile
phdos_path = os.path.join(self.workdir, "run.abo_PHDOS.nc")
if not phdos_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a PHBST file in %s" % (self, self.outdir))
return None
try:
return PhdosFile(phdos_path)
except Exception as exc:
logger.critical("Exception while reading GSR file at %s:\n%s" % (phdos_path, str(exc)))
return None
def get_results(self, **kwargs):
results = super(AnaddbTask, self).get_results(**kwargs)
return results
|
setten/pymatgen
|
pymatgen/io/abinit/tasks.py
|
Python
|
mit
| 173,421
|
[
"ABINIT",
"NetCDF",
"Wannier90",
"pymatgen"
] |
44efeabdb866a65e2ed055b4066f04c40f56023a258c9b150052add15ed258ce
|
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
import essentia
from essentia import INFO
namespace = 'highlevel'
dependencies = [ 'lowlevel', 'tempotap', 'beats' ]
def compute(audio, pool, options):
defaultStats=['mean', 'min', 'max', 'var', 'dmean', 'dvar', 'dmean2', 'dvar2', 'value']
aggPool = essentia.PoolAggregator(defaultStats=defaultStats)(pool)
descriptors = aggPool.descriptorNames()
profile = 'music'
INFO('Computing High-Level descriptors...')
if profile == 'music':
# Excitement
excitement(aggPool)
# Excitement
intensity(aggPool)
INFO('100% done...')
def excitement(pool):
# this describes if a song is exciting or not on 3 levels: 1 (not exciting), 2 or 3 (very exciting)
spectral_centroid_mean = pool.value('lowlevel.spectral_centroid.mean')
tempotap_bpm_value = pool.value('rhythm.bpm.value')
rhythm_beats_loudness_mean = pool.value('rhythm.beats_loudness.mean')
rhythm_onset_rate_value = pool.value('rhythm.onset_rate.value')
# Weka tree J48 calculated with essentia_0.4.0:2875
if spectral_centroid_mean <= 2254.374756:
if rhythm_onset_rate_value <= 4.521962:
if spectral_centroid_mean <= 1932.181519:
excitement = 1
else:
if rhythm_beats_loudness_mean <= 0.032491:
excitement = 1
else:
excitement = 2
else:
if rhythm_beats_loudness_mean <= 0.051655:
excitement = 3
else:
excitement = 2
else:
if spectral_centroid_mean <= 2477.170654:
excitement = 2
else:
if tempotap_bpm_value <= 128.839981:
if rhythm_beats_loudness_mean <= 0.041298:
excitement = 3
else:
excitement = 2
else:
excitement = 3
pool.add(namespace + '.' + 'excitement', excitement)#, pool.GlobalScope)
def intensity(pool):
# this describes if a song is intense or not: from 0 to 1
tempotap_bpm_value = pool.value('rhythm.bpm.value')
rhythm_onset_rate_value = pool.value('rhythm.onset_rate.value')
rhythm_beats_loudness_mean = pool.value('rhythm.beats_loudness.mean')
rhythm_beats_loudness_bass_mean = pool.value('rhythm.beats_loudness_bass.mean')
intensity = 0
# this algorithm is based on the common sense
# the thresholds were found from essentia_0.4.0:2885
if tempotap_bpm_value < 100.0:
intensity += 1
else:
if tempotap_bpm_value < 120.0:
intensity += 2
else:
intensity += 3
if rhythm_onset_rate_value < 3.0:
intensity += 1
else:
if rhythm_onset_rate_value < 5.0:
intensity += 2
else:
intensity += 3
if rhythm_beats_loudness_mean < 0.1:
intensity += 1
else:
if rhythm_beats_loudness_mean < 0.2:
intensity += 2
else:
intensity += 3
if rhythm_beats_loudness_bass_mean < 0.2:
intensity += 1
else:
if rhythm_beats_loudness_bass_mean < 0.4:
intensity += 2
else:
intensity += 3
intensity /= 12.0
pool.add(namespace + '.' + 'intensity', intensity)#, pool.GlobalScope)
|
carthach/essentia
|
src/python/essentia/extractor/highlevel.py
|
Python
|
agpl-3.0
| 3,989
|
[
"exciting"
] |
9ef12ecce240175039749bc740f82ad56e582965221897ad0583af3fe2dfc709
|
#!/bin/env python
# -*- coding: utf-8 -*-
#
#Created on 31.03.17
#
#Created for pymepps-streaming
#
#@author: Tobias Sebastian Finn, tobias.sebastian.finn@studium.uni-hamburg.de
#
# Copyright (C) {2017} {Tobias Sebastian Finn}
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# System modules
import logging
import collections
import os
import dateutil.parser
# External modules
from siphon.catalog import TDSCatalog
from siphon.ncss import NCSS
from siphon.ncss import NCSSQuery
# Internal modules
from pymeppsstream.streams.stream import Stream
logger = logging.getLogger(__name__)
class NCSubsetStream(Stream):
def __init__(self, name, path, dataset, download_path='.', **kwargs):
"""
NetCDF subset stream. This stream could be used to download a netcdf
subset from a thredds server. NCSubsetsetStream is based on siphon.
Parameters
----------
name : str
The name of the stream. This should be an unique name!
path : str
The url path to the catalog xml of the thredds server.
dataset : str
The netcdf subset is created for this dataset name.
download_path : str, optional
The base path where the files should be downloaded. The download
path is composed with this path and the name of the instance. If the
download path doesn't exist it will be created. Default is the
current working directory.
variables : *str
Specify one or more variables for the query. This query attribute
has to be set.
kwargs :
The kwargs are used for the ncss query. All methods of
siphon.ncss.NCSSQuery are supported.
Example
-------
netcdf_subset = NCSubsetStream(
name='thredds_metno',
path='http://thredds.met.no/thredds/catalog/meps25files/' \
'catalog.xml',
dataset='meps_allmembers_full_2_5km_latest.nc',
path='/scratch/data'
lonlat_point=(10, 53.5),
accept='netcdf',
variables='air_temperature_2m',
all_times=())
netcdf_subset.get_data()
"""
super().__init__(name, path)
self.download_path = download_path
self.ncss = self.get_ncss(path, dataset)
self.query = NCSSQuery()
self.add_to_query(**kwargs)
self.download_path = os.path.join(download_path, self.name)
if not os.path.exists(self.download_path):
os.makedirs(self.download_path)
@staticmethod
def get_ncss(url, dataset):
"""
Returns a netcdf subset service instance.
Parameters
----------
url : str
The url path to the catalog xml of the thredds server.
dataset : str
The netcdf subset is created for this dataset name.
Returns
-------
ncss : siphon.ncss.NCSS
The netcdf subset service instance for the given url and dataset.
"""
cat = TDSCatalog(url)
ds = cat.datasets[dataset]
ncss = NCSS(ds.access_urls['NetcdfSubset'])
return ncss
def add_to_query(self, **kwargs):
"""
Method to add variables to the query.
Parameters
----------
kwargs : dict(str, ), optional
The key of the kwargs is used to call the method of the query.
The values are used as parameter for the method call.
"""
for key in kwargs:
try:
if isinstance(kwargs[key], collections.Iterable) and \
not isinstance(kwargs[key], str):
getattr(self.query, key)(*kwargs[key])
else:
getattr(self.query, key)(kwargs[key])
except AttributeError:
pass
def get_metadata(self):
"""
Method to get the metadata from the netcdf subset service.
Returns
-------
metadata : dict(str, )
A dict containing the metadata for the ncss. The most important
variables are read from the ncss dataset and saved within this dict.
"""
ncss_metadata = self.ncss.metadata
metadata = {
'time': ncss_metadata.time_span,
'latlonbox': ncss_metadata.lat_lon_box,
'variables': ncss_metadata.variables,
}
return metadata
def get_data(self):
"""
Method to download the ncss to download_path/date.nc. The date is the
start time of the data and has the format %Y%m%d_%H%M.
Returns
-------
file_path : str
The is downloaded to this path.
"""
md = self.get_metadata()
start_time = dateutil.parser.parse(md['time']['begin'])
raw_data = self.ncss.get_data_raw(self.query)
file_path = os.path.join(self.download_path,
start_time.strftime('%Y%m%d_%H%M.nc'))
with open(file_path, 'w+b') as nc_file:
nc_file.write(raw_data)
return file_path
|
maestrotf/pymepps-streaming
|
pymeppsstream/streams/ncsubset.py
|
Python
|
gpl-3.0
| 5,787
|
[
"NetCDF"
] |
32c9ed54ccbef432803edddf036e97e16dce1efc0d47fa9149060e5b9a9cd1da
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ldap
from nose.plugins.attrib import attr
from nose.tools import assert_true, assert_equal, assert_false
import desktop.conf
from desktop.lib.test_utils import grant_access
from desktop.lib.django_test_util import make_logged_in_client
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.core.urlresolvers import reverse
from useradmin.models import LdapGroup, UserProfile, get_profile
from hadoop import pseudo_hdfs4
from views import sync_ldap_users, sync_ldap_groups, import_ldap_users, import_ldap_groups, \
add_ldap_users, add_ldap_groups, sync_ldap_users_groups
import ldap_access
from tests import LdapTestConnection, reset_all_groups, reset_all_users
def test_useradmin_ldap_user_group_membership_sync():
settings.MIDDLEWARE_CLASSES.append('useradmin.middleware.LdapSynchronizationMiddleware')
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Make sure LDAP groups exist or they won't sync
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
try:
# Import curly who is part of TestUsers and Test Administrators
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'curly', sync_groups=False, import_by_dn=False)
# Set a password so that we can login
user = User.objects.get(username='curly')
user.set_password('test')
user.save()
# Should have 0 groups
assert_equal(0, user.groups.all().count())
# Make an authenticated request as curly so that we can see call middleware.
c = make_logged_in_client('curly', 'test', is_superuser=False)
grant_access("curly", "test", "useradmin")
response = c.get('/useradmin/users')
# Refresh user groups
user = User.objects.get(username='curly')
# Should have 3 groups now. 2 from LDAP and 1 from 'grant_access' call.
assert_equal(3, user.groups.all().count(), user.groups.all())
# Now remove a group and try again.
old_group = ldap_access.CACHED_LDAP_CONN._instance.users['curly']['groups'].pop()
# Make an authenticated request as curly so that we can see call middleware.
response = c.get('/useradmin/users')
# Refresh user groups
user = User.objects.get(username='curly')
# Should have 2 groups now. 1 from LDAP and 1 from 'grant_access' call.
assert_equal(3, user.groups.all().count(), user.groups.all())
finally:
settings.MIDDLEWARE_CLASSES.remove('useradmin.middleware.LdapSynchronizationMiddleware')
def test_useradmin_ldap_suboordinate_group_integration():
reset_all_users()
reset_all_groups()
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test old subgroups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("suboordinate"))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 3)
assert_equal(Group.objects.all().count(), 2)
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 2)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all()[0].username, larry.username)
# Only sync already imported
ldap_access.CACHED_LDAP_CONN.remove_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='moe').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 3)
assert_equal(User.objects.get(username='moe').groups.all().count(), 1)
# Import all members of TestUsers and members of subgroups
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 4)
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_nested_group_integration():
reset_all_users()
reset_all_groups()
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test old subgroups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("nested"))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 3)
assert_equal(Group.objects.all().count(), 2)
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 2)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all()[0].username, larry.username)
# Only sync already imported
assert_equal(test_users.user_set.all().count(), 3)
ldap_access.CACHED_LDAP_CONN.remove_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='moe').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 3)
assert_equal(User.objects.get(username='moe').groups.all().count(), 1)
# Import all members of TestUsers and not members of suboordinate groups (even though specified)
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Nested group import
# First without recursive import, then with.
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'NestedGroups', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
nested_groups = Group.objects.get(name='NestedGroups')
nested_group = Group.objects.get(name='NestedGroup')
assert_true(LdapGroup.objects.filter(group=nested_groups).exists())
assert_true(LdapGroup.objects.filter(group=nested_group).exists())
assert_equal(nested_groups.user_set.all().count(), 0, nested_groups.user_set.all())
assert_equal(nested_group.user_set.all().count(), 0, nested_group.user_set.all())
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'NestedGroups', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
nested_groups = Group.objects.get(name='NestedGroups')
nested_group = Group.objects.get(name='NestedGroup')
assert_true(LdapGroup.objects.filter(group=nested_groups).exists())
assert_true(LdapGroup.objects.filter(group=nested_group).exists())
assert_equal(nested_groups.user_set.all().count(), 0, nested_groups.user_set.all())
assert_equal(nested_group.user_set.all().count(), 1, nested_group.user_set.all())
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_suboordinate_posix_group_integration():
reset_all_users()
reset_all_groups()
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test old subgroups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("suboordinate"))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 2, User.objects.all())
assert_equal(Group.objects.all().count(), 2, Group.objects.all())
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 1)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all()[0].username, larry.username)
# Only sync already imported
ldap_access.CACHED_LDAP_CONN.remove_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 1)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 1)
# Import all members of PosixGroup and members of subgroups
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_nested_posix_group_integration():
reset_all_users()
reset_all_groups()
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test nested groups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("nested"))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 2, User.objects.all())
assert_equal(Group.objects.all().count(), 2, Group.objects.all())
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 1)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all()[0].username, larry.username)
# Only sync already imported
ldap_access.CACHED_LDAP_CONN.remove_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 1)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 1)
# Import all members of PosixGroup and members of subgroups (there should be no subgroups)
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Import all members of NestedPosixGroups and members of subgroups
reset_all_users()
reset_all_groups()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'NestedPosixGroups', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='NestedPosixGroups')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_user_integration():
done = []
try:
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Try importing a user
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'lårry', sync_groups=False, import_by_dn=False)
larry = User.objects.get(username='lårry')
assert_true(larry.first_name == 'Larry')
assert_true(larry.last_name == 'Stooge')
assert_true(larry.email == 'larry@stooges.com')
assert_true(get_profile(larry).creation_method == str(UserProfile.CreationMethod.EXTERNAL))
# Should be a noop
sync_ldap_users(ldap_access.CACHED_LDAP_CONN)
sync_ldap_groups(ldap_access.CACHED_LDAP_CONN)
assert_equal(User.objects.all().count(), 1)
assert_equal(Group.objects.all().count(), 0)
# Make sure that if a Hue user already exists with a naming collision, we
# won't overwrite any of that user's information.
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'otherguy', sync_groups=False, import_by_dn=False)
hue_user = User.objects.get(username='otherguy')
assert_equal(get_profile(hue_user).creation_method, str(UserProfile.CreationMethod.HUE))
assert_equal(hue_user.first_name, 'Different')
# Make sure LDAP groups exist or they won't sync
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
# Try importing a user and sync groups
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'curly', sync_groups=True, import_by_dn=False)
curly = User.objects.get(username='curly')
assert_equal(curly.first_name, 'Curly')
assert_equal(curly.last_name, 'Stooge')
assert_equal(curly.email, 'curly@stooges.com')
assert_equal(get_profile(curly).creation_method, str(UserProfile.CreationMethod.EXTERNAL))
assert_equal(2, curly.groups.all().count(), curly.groups.all())
reset_all_users()
reset_all_groups()
# Test import case sensitivity
done.append(desktop.conf.LDAP.IGNORE_USERNAME_CASE.set_for_testing(True))
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Lårry', sync_groups=False, import_by_dn=False)
assert_false(User.objects.filter(username='Lårry').exists())
assert_true(User.objects.filter(username='lårry').exists())
# Test lower case
User.objects.filter(username__iexact='Rock').delete()
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Rock', sync_groups=False, import_by_dn=False)
assert_false(User.objects.filter(username='Rock').exists())
assert_true(User.objects.filter(username='rock').exists())
done.append(desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.set_for_testing(True))
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Rock', sync_groups=False, import_by_dn=False)
assert_false(User.objects.filter(username='Rock').exists())
assert_true(User.objects.filter(username='rock').exists())
User.objects.filter(username='Rock').delete()
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Rock', sync_groups=False, import_by_dn=False)
assert_false(User.objects.filter(username='Rock').exists())
assert_true(User.objects.filter(username='rock').exists())
finally:
for finish in done:
finish()
def test_add_ldap_users():
done = []
try:
URL = reverse(add_ldap_users)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
c = make_logged_in_client('test', is_superuser=True)
assert_true(c.get(URL))
response = c.post(URL, dict(username_pattern='moe', password1='test', password2='test'))
assert_true('Location' in response, response)
assert_true('/useradmin/users' in response['Location'], response)
response = c.post(URL, dict(username_pattern='bad_name', password1='test', password2='test'))
assert_true('Could not' in response.context['form'].errors['username_pattern'][0], response)
# Test wild card
response = c.post(URL, dict(username_pattern='*rr*', password1='test', password2='test'))
assert_true('/useradmin/users' in response['Location'], response)
# Test ignore case
done.append(desktop.conf.LDAP.IGNORE_USERNAME_CASE.set_for_testing(True))
User.objects.filter(username='moe').delete()
assert_false(User.objects.filter(username='Moe').exists())
assert_false(User.objects.filter(username='moe').exists())
response = c.post(URL, dict(username_pattern='Moe', password1='test', password2='test'))
assert_true('Location' in response, response)
assert_true('/useradmin/users' in response['Location'], response)
assert_false(User.objects.filter(username='Moe').exists())
assert_true(User.objects.filter(username='moe').exists())
# Test lower case
done.append(desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.set_for_testing(True))
User.objects.filter(username__iexact='Rock').delete()
assert_false(User.objects.filter(username='Rock').exists())
assert_false(User.objects.filter(username='rock').exists())
response = c.post(URL, dict(username_pattern='rock', password1='test', password2='test'))
assert_true('Location' in response, response)
assert_true('/useradmin/users' in response['Location'], response)
assert_false(User.objects.filter(username='Rock').exists())
assert_true(User.objects.filter(username='rock').exists())
# Test regular with spaces (should fail)
response = c.post(URL, dict(username_pattern='user with space', password1='test', password2='test'))
assert_true("Username must not contain whitespaces and ':'" in response.context['form'].errors['username_pattern'][0], response)
# Test dn with spaces in username and dn (should fail)
response = c.post(URL, dict(username_pattern='uid=user with space,ou=People,dc=example,dc=com', password1='test', password2='test', dn=True))
assert_true("There was a problem with some of the LDAP information" in response.content, response)
assert_true("Username must not contain whitespaces" in response.content, response)
# Test dn with spaces in dn, but not username (should succeed)
response = c.post(URL, dict(username_pattern='uid=user without space,ou=People,dc=example,dc=com', password1='test', password2='test', dn=True))
assert_true(User.objects.filter(username='spaceless').exists())
finally:
for finish in done:
finish()
def test_add_ldap_groups():
URL = reverse(add_ldap_groups)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
c = make_logged_in_client(username='test', is_superuser=True)
assert_true(c.get(URL))
response = c.post(URL, dict(groupname_pattern='TestUsers'))
assert_true('Location' in response, response)
assert_true('/useradmin/groups' in response['Location'])
# Test with space
response = c.post(URL, dict(groupname_pattern='Test Administrators'))
assert_true('Location' in response, response)
assert_true('/useradmin/groups' in response['Location'], response)
response = c.post(URL, dict(groupname_pattern='toolongnametoolongnametoolongnametoolongnametoolongnametoolongname'
'toolongnametoolongnametoolongnametoolongnametoolongnametoolongname'
'toolongnametoolongnametoolongnametoolongnametoolongnametoolongname'
'toolongnametoolongnametoolongnametoolongnametoolongnametoolongname'))
assert_true('Ensure this value has at most 256 characters' in response.context['form'].errors['groupname_pattern'][0], response)
# Test wild card
response = c.post(URL, dict(groupname_pattern='*r*'))
assert_true('/useradmin/groups' in response['Location'], response)
def test_sync_ldap_users_groups():
URL = reverse(sync_ldap_users_groups)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
c = make_logged_in_client('test', is_superuser=True)
assert_true(c.get(URL))
assert_true(c.post(URL))
def test_ldap_exception_handling():
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
class LdapTestConnectionError(LdapTestConnection):
def find_users(self, user, find_by_dn=False):
raise ldap.LDAPError('No such object')
ldap_access.CACHED_LDAP_CONN = LdapTestConnectionError()
c = make_logged_in_client('test', is_superuser=True)
response = c.post(reverse(add_ldap_users), dict(username_pattern='moe', password1='test', password2='test'), follow=True)
assert_true('There was an error when communicating with LDAP' in response.content, response)
@attr('requires_hadoop')
def test_ensure_home_directory_add_ldap_users():
try:
URL = reverse(add_ldap_users)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
cluster = pseudo_hdfs4.shared_cluster()
c = make_logged_in_client(cluster.superuser, is_superuser=True)
cluster.fs.setuser(cluster.superuser)
assert_true(c.get(URL))
response = c.post(URL, dict(username_pattern='moe', password1='test', password2='test'))
assert_true('/useradmin/users' in response['Location'])
assert_false(cluster.fs.exists('/user/moe'))
# Try same thing with home directory creation.
response = c.post(URL, dict(username_pattern='curly', password1='test', password2='test', ensure_home_directory=True))
assert_true('/useradmin/users' in response['Location'])
assert_true(cluster.fs.exists('/user/curly'))
response = c.post(URL, dict(username_pattern='bad_name', password1='test', password2='test'))
assert_true('Could not' in response.context['form'].errors['username_pattern'][0])
assert_false(cluster.fs.exists('/user/bad_name'))
# See if moe, who did not ask for his home directory, has a home directory.
assert_false(cluster.fs.exists('/user/moe'))
# Try wild card now
response = c.post(URL, dict(username_pattern='*rr*', password1='test', password2='test', ensure_home_directory=True))
assert_true('/useradmin/users' in response['Location'])
assert_true(cluster.fs.exists('/user/curly'))
assert_true(cluster.fs.exists(u'/user/lårry'))
assert_false(cluster.fs.exists('/user/otherguy'))
finally:
# Clean up
if cluster.fs.exists('/user/curly'):
cluster.fs.rmtree('/user/curly')
if cluster.fs.exists(u'/user/lårry'):
cluster.fs.rmtree(u'/user/lårry')
if cluster.fs.exists('/user/otherguy'):
cluster.fs.rmtree('/user/otherguy')
@attr('requires_hadoop')
def test_ensure_home_directory_sync_ldap_users_groups():
URL = reverse(sync_ldap_users_groups)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
cluster = pseudo_hdfs4.shared_cluster()
c = make_logged_in_client(cluster.superuser, is_superuser=True)
cluster.fs.setuser(cluster.superuser)
c.post(reverse(add_ldap_users), dict(username_pattern='curly', password1='test', password2='test'))
assert_false(cluster.fs.exists('/user/curly'))
assert_true(c.post(URL, dict(ensure_home_directory=True)))
assert_true(cluster.fs.exists('/user/curly'))
|
vmanoria/bluemix-hue-filebrowser
|
hue-3.8.1-bluemix/apps/useradmin/src/useradmin/test_ldap_deprecated.py
|
Python
|
gpl-2.0
| 32,107
|
[
"MOE"
] |
d79634870ae201ee0379726156deb36128f1ccdacdd56481eb2043eb859152f7
|
"""
Filename: arma.py
Authors: Doc-Jin Jang, Jerry Choi, Thomas Sargent, John Stachurski
Provides functions for working with and visualizing scalar ARMA processes.
"""
import numpy as np
from numpy import conj, pi
import matplotlib.pyplot as plt
from scipy.signal import dimpulse, freqz, dlsim
# == Ignore unnecessary warnings concerning casting complex variables back to
# floats == #
import warnings
warnings.filterwarnings('ignore')
class ARMA(object):
r"""
This class represents scalar ARMA(p, q) processes.
If phi and theta are scalars, then the model is
understood to be
.. math::
X_t = \phi X_{t-1} + \epsilon_t + \theta \epsilon_{t-1}
where :math:`epsilon_t` is a white noise process with standard
deviation :math:`sigma`. If phi and theta are arrays or sequences,
then the interpretation is the ARMA(p, q) model
.. math::
X_t = \phi_1 X_{t-1} + ... + \phi_p X_{t-p} +
\epsilon_t + \theta_1 \epsilon_{t-1} + ... +
\theta_q \epsilon_{t-q}
where
* :math:`\phi = (\phi_1, \phi_2,..., \phi_p)`
* :math:`\theta = (\theta_1, \theta_2,..., \theta_q)`
* :math:`\sigma` is a scalar, the standard deviation of the
white noise
Parameters
----------
phi : scalar or iterable or array_like(float)
Autocorrelation values for the autocorrelated variable.
See above for explanation.
theta : scalar or iterable or array_like(float)
Autocorrelation values for the white noise of the model.
See above for explanation
sigma : scalar(float)
The standard deviation of the white noise
Attributes
----------
phi, theta, sigma : see Parmeters
ar_poly : array_like(float)
The polynomial form that is needed by scipy.signal to do the
processing we desire. Corresponds with the phi values
ma_poly : array_like(float)
The polynomial form that is needed by scipy.signal to do the
processing we desire. Corresponds with the theta values
"""
def __init__(self, phi, theta=0, sigma=1):
self._phi, self._theta = phi, theta
self.sigma = sigma
self.set_params()
def __repr__(self):
m = "ARMA(phi=%s, theta=%s, sigma=%s)"
return m % (self.phi, self.theta, self.sigma)
def __str__(self):
m = "An ARMA({p}, {q}) process"
p = np.asarray(self.phi).size
q = np.asarray(self.theta).size
return m.format(p=p, q=q)
# Special latex print method for working in notebook
def _repr_latex_(self):
m = r"$X_t = "
phi = np.atleast_1d(self.phi)
theta = np.atleast_1d(self.theta)
rhs = ""
for (tm, phi_p) in enumerate(phi):
# don't include terms if they are equal to zero
if abs(phi_p) > 1e-12:
rhs += r"%+g X_{t-%i}" % (phi_p, tm+1)
if rhs[0] == "+":
rhs = rhs[1:] # remove initial `+` if phi_1 was positive
rhs += r" + \epsilon_t"
for (tm, th_q) in enumerate(theta):
# don't include terms if they are equal to zero
if abs(th_q) > 1e-12:
rhs += r"%+g \epsilon_{t-%i}" % (th_q, tm+1)
return m + rhs + "$"
@property
def phi(self):
return self._phi
@phi.setter
def phi(self, new_value):
self._phi = new_value
self.set_params()
@property
def theta(self):
return self._theta
@theta.setter
def theta(self, new_value):
self._theta = new_value
self.set_params()
def set_params(self):
r"""
Internally, scipy.signal works with systems of the form
.. math::
ar_{poly}(L) X_t = ma_{poly}(L) \epsilon_t
where L is the lag operator. To match this, we set
.. math::
ar_{poly} = (1, -\phi_1, -\phi_2,..., -\phi_p)
ma_{poly} = (1, \theta_1, \theta_2,..., \theta_q)
In addition, ar_poly must be at least as long as ma_poly.
This can be achieved by padding it out with zeros when required.
"""
# === set up ma_poly === #
ma_poly = np.asarray(self._theta)
self.ma_poly = np.insert(ma_poly, 0, 1) # The array (1, theta)
# === set up ar_poly === #
if np.isscalar(self._phi):
ar_poly = np.array(-self._phi)
else:
ar_poly = -np.asarray(self._phi)
self.ar_poly = np.insert(ar_poly, 0, 1) # The array (1, -phi)
# === pad ar_poly with zeros if required === #
if len(self.ar_poly) < len(self.ma_poly):
temp = np.zeros(len(self.ma_poly) - len(self.ar_poly))
self.ar_poly = np.hstack((self.ar_poly, temp))
def impulse_response(self, impulse_length=30):
"""
Get the impulse response corresponding to our model.
Returns
-------
psi : array_like(float)
psi[j] is the response at lag j of the impulse response.
We take psi[0] as unity.
"""
sys = self.ma_poly, self.ar_poly, 1
times, psi = dimpulse(sys, n=impulse_length)
psi = psi[0].flatten() # Simplify return value into flat array
return psi
def spectral_density(self, two_pi=True, res=1200):
r"""
Compute the spectral density function. The spectral density is
the discrete time Fourier transform of the autocovariance
function. In particular,
.. math::
f(w) = \sum_k \gamma(k) exp(-ikw)
where gamma is the autocovariance function and the sum is over
the set of all integers.
Parameters
----------
two_pi : Boolean, optional
Compute the spectral density function over [0, pi] if
two_pi is False and [0, 2 pi] otherwise. Default value is
True
res : scalar or array_like(int), optional(default=1200)
If res is a scalar then the spectral density is computed at
`res` frequencies evenly spaced around the unit circle, but
if res is an array then the function computes the response
at the frequencies given by the array
Returns
-------
w : array_like(float)
The normalized frequencies at which h was computed, in
radians/sample
spect : array_like(float)
The frequency response
"""
w, h = freqz(self.ma_poly, self.ar_poly, worN=res, whole=two_pi)
spect = h * conj(h) * self.sigma**2
return w, spect
def autocovariance(self, num_autocov=16):
"""
Compute the autocovariance function from the ARMA parameters
over the integers range(num_autocov) using the spectral density
and the inverse Fourier transform.
Parameters
----------
num_autocov : scalar(int), optional(default=16)
The number of autocovariances to calculate
"""
spect = self.spectral_density()[1]
acov = np.fft.ifft(spect).real
# num_autocov should be <= len(acov) / 2
return acov[:num_autocov]
def simulation(self, ts_length=90):
"""
Compute a simulated sample path assuming Gaussian shocks.
Parameters
----------
ts_length : scalar(int), optional(default=90)
Number of periods to simulate for
Returns
-------
vals : array_like(float)
A simulation of the model that corresponds to this class
"""
sys = self.ma_poly, self.ar_poly, 1
u = np.random.randn(ts_length, 1) * self.sigma
vals = dlsim(sys, u)[1]
return vals.flatten()
def plot_impulse_response(self, ax=None, show=True):
if show:
fig, ax = plt.subplots()
ax.set_title('Impulse response')
yi = self.impulse_response()
ax.stem(list(range(len(yi))), yi)
ax.set_xlim(xmin=(-0.5))
ax.set_ylim(min(yi)-0.1, max(yi)+0.1)
ax.set_xlabel('time')
ax.set_ylabel('response')
if show:
plt.show()
def plot_spectral_density(self, ax=None, show=True):
if show:
fig, ax = plt.subplots()
ax.set_title('Spectral density')
w, spect = self.spectral_density(two_pi=False)
ax.semilogy(w, spect)
ax.set_xlim(0, pi)
ax.set_ylim(0, np.max(spect))
ax.set_xlabel('frequency')
ax.set_ylabel('spectrum')
if show:
plt.show()
def plot_autocovariance(self, ax=None, show=True):
if show:
fig, ax = plt.subplots()
ax.set_title('Autocovariance')
acov = self.autocovariance()
ax.stem(list(range(len(acov))), acov)
ax.set_xlim(-0.5, len(acov) - 0.5)
ax.set_xlabel('time')
ax.set_ylabel('autocovariance')
if show:
plt.show()
def plot_simulation(self, ax=None, show=True):
if show:
fig, ax = plt.subplots()
ax.set_title('Sample path')
x_out = self.simulation()
ax.plot(x_out)
ax.set_xlabel('time')
ax.set_ylabel('state space')
if show:
plt.show()
def quad_plot(self):
"""
Plots the impulse response, spectral_density, autocovariance,
and one realization of the process.
"""
num_rows, num_cols = 2, 2
fig, axes = plt.subplots(num_rows, num_cols, figsize=(12, 8))
plt.subplots_adjust(hspace=0.4)
plot_functions = [self.plot_impulse_response,
self.plot_spectral_density,
self.plot_autocovariance,
self.plot_simulation]
for plot_func, ax in zip(plot_functions, axes.flatten()):
plot_func(ax, show=False)
plt.show()
|
gxxjjj/QuantEcon.py
|
quantecon/arma.py
|
Python
|
bsd-3-clause
| 9,906
|
[
"Gaussian"
] |
9cbbc52eae5ed8041e73d4eee98b7266c4a987cc0919efeb838408c00775ddc4
|
from ase import *
from gpaw import GPAW
from gpaw.utilities import equal
a = 5.0
H = Atoms('H', [(a/2, a/2, a/2)], magmoms=[1],
pbc=False,
cell=(a, a, a))
H.set_calculator(GPAW(h=0.1, setups='ae', fixmom=True))
e1 = H.get_potential_energy()
c = a / 2.0
d = 0.74
s = d / 2 / 3**0.5
H2 = Atoms('H2',
[(c - s, c - s, c - s),
(c + s, c + s, c + s)],
pbc=False,
cell=(a, a, a))
H2.set_calculator(GPAW(h=0.1, setups='ae'))
e2 = H2.get_potential_energy()
print e1, e2, 2 * e1 - e2
equal(2 * e1 - e2, 4.55354238957, 1e-5)
|
qsnake/gpaw
|
oldtest/ae-calculation.py
|
Python
|
gpl-3.0
| 584
|
[
"ASE",
"GPAW"
] |
f085f4d92fec6dc6dc57bbab29d8082a10e8b47b927fe34b88b86c09ee43812f
|
from unittest import TestCase
import brainrnaseq as brs
import pandas as pd
class BrainRNASeqTest(TestCase):
def test_mapping_data(self):
items = {
'Homo sapiens': ['A1BG', 'A1B|ABG|GAB|HYST2477', '16S rRNA', '-'],
'Mus musculus': ['Pzp', 'A1m|A2m|AI893533|MAM', 'ND2', '-'],
}
for species, vals in items.items():
for _ in range(2):
map = brs.cache.get_mapping_data(
species=species,
)
self.assertIsInstance(
map,
pd.DataFrame,
)
self.assertEqual(
map.index[0],
vals[0],
)
self.assertEqual(
map.iloc[0]['Synonyms'],
vals[1],
)
self.assertEqual(
map.index[-1],
vals[2],
)
self.assertEqual(
map.iloc[-1]['Synonyms'],
vals[3],
)
map = brs.cache.get_mapping_data(
species=species,
force=True,
)
self.assertIsInstance(
map,
pd.DataFrame,
)
def test_mapping(self):
for _ in range(2):
for gene in ['Jak2', 'Fd17']:
self.assertEqual(
brs.mapping.get_symbol_mapping(
gene=gene,
species='Mus musculus',
),
'Jak2',
)
self.assertEqual(
brs.mapping.get_symbol_mapping(
gene='NaNNaNNaN',
species='Mus musculus',
),
None,
)
for gene in ['JAK2', 'JTK10', 'THCYT3']:
self.assertEqual(
brs.mapping.get_symbol_mapping(
gene=gene,
species='Homo sapiens',
),
'JAK2',
)
# Ambiguous gene
self.assertEqual(
brs.mapping.get_symbol_mapping(
gene='AP-1',
species='Homo sapiens',
),
'FOS',
)
def test_barres_seq_data(self):
brs.cache.get_barres_seq_data()
items = {
'Homo sapiens': ['Gene', '1/2-SBSRNA4', 'ZZZ3'],
'Mus musculus': ['gene', '0610005C13Rik', 'Zzz3'],
}
for species, (col, first, last) in items.items():
self.assertEqual(
brs.cache.BARRES_SPECIES_DATA[species].iloc[0][col],
first,
)
self.assertEqual(
brs.cache.BARRES_SPECIES_DATA[species].iloc[-1][col],
last,
)
def test_hansen_seq_data(self):
brs.cache.get_hansen_seq_data()
items = {
'Homo sapiens': ['A1BG', 'SCO2'],
'Mus musculus': ['A1bg', 'Sco2'],
}
for species, (first, last) in items.items():
self.assertEqual(
brs.cache.HANSEN_SPECIES_DATA[species].index[0],
first,
)
self.assertEqual(
brs.cache.HANSEN_SPECIES_DATA[species].index[-1],
last,
)
def test_barres_enrichment_table(self):
tab = brs.enrichments.build_barres_table()
self.assertIsInstance(
tab,
dict,
)
def test_hansen_enrichment_table(self):
tab = brs.enrichments.build_hansen_table()
self.assertIsInstance(
tab,
dict,
)
def test_barres_enrichments(self):
items = {
'Homo sapiens': {
'AGT': 'Astrocyte',
'GFAP': 'Astrocyte',
'IDI2-AS1': 'Astrocyte',
'CD34': 'Endothelia',
'RNU11': 'Endothelia',
'CCL3L1': 'Microglia',
'CD33': 'Microglia',
'INPP5D': 'Microglia',
'SIGLEC5': 'Microglia',
'SIGLEC8': 'Microglia',
'SIGLEC9': 'Microglia',
'SIGLEC10': 'Microglia',
'SIGLEC14': 'Microglia',
'FOLH1': 'Myelinating Oligodendrocytes',
'CD22': 'Myelinating Oligodendrocytes',
'MAG': 'Myelinating Oligodendrocytes',
'GAD2': 'Neuron',
'SYT4': 'Neuron',
},
'Mus musculus': {
'Sumo2': 'Astrocyte',
'AU021092': 'Endothelia',
'OncoM': 'Microglia',
'Siglec-1': 'Microglia',
'Siglec1': 'Microglia',
'Cd22': 'Microglia',
'Cd33': 'Microglia',
'Siglec-3': 'Microglia',
'mSiglec-E': 'Microglia',
'Siglece': 'Microglia',
'Siglec5': 'Microglia',
'Siglec9': 'Microglia',
'Siglec-H': 'Microglia',
'Siglecl1': 'Microglia',
'Siglec12': 'Microglia',
'siglec-4a': 'Myelinating Oligodendrocytes',
'Otm': 'Myelinating Oligodendrocytes',
'Reln': 'Neuron',
},
}
for species, vals in items.items():
enrich = brs.enrichments.get_enrichments(
species=species,
backend='Barres',
)
self.assertIsInstance(
enrich,
dict,
)
for key, val in vals.items():
if key not in enrich:
print(key)
continue
self.assertEqual(
enrich[key],
val,
)
def test_hansen_enrichments(self):
items = {
'Homo sapiens': {
'AGT': 'Astrocyte',
'CD34': 'Endothelia',
'FOLH1': 'Myelinating Oligodendrocytes',
'GAD2': 'Neuron',
'SYT4': 'Neuron',
},
'Mus musculus': {
'AU021092': 'Endothelia',
'OncoM': 'Microglia',
'mSiglec-E': 'Microglia',
'Siglece': 'Microglia',
'Siglec5': 'Microglia',
'Siglec9': 'Microglia',
'Siglecl1': 'Microglia',
'Siglec12': 'Microglia',
'siglec-4a': 'Myelinating Oligodendrocytes',
'Otm': 'Myelinating Oligodendrocytes',
'Reln': 'Neuron',
},
}
for species, vals in items.items():
enrich = brs.enrichments.get_enrichments(
species=species,
backend='Hansen',
)
self.assertIsInstance(
enrich,
dict,
)
for key, val in vals.items():
if key not in enrich:
print(key)
continue
self.assertEqual(
enrich[key],
val,
)
|
white-lab/pyproteome
|
tests/brainrnaseq_test.py
|
Python
|
bsd-2-clause
| 7,370
|
[
"NEURON"
] |
bc11507dd6b77da6c7a0663a45b421b57d590d0ce5b87535e8b3bab92c072cc8
|
"""
Tests for user authorization password-related functionality.
"""
import json
import logging
import re
from datetime import datetime, timedelta
from unittest.mock import Mock, patch
import pytest
import ddt
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core import mail
from django.core.cache import cache
from django.test import TestCase
from django.test.client import RequestFactory
from django.urls import reverse
from freezegun import freeze_time
from oauth2_provider.models import AccessToken as dot_access_token
from oauth2_provider.models import RefreshToken as dot_refresh_token
from pytz import UTC
from testfixtures import LogCapture
from openedx.core.djangoapps.oauth_dispatch.tests import factories as dot_factories
from openedx.core.djangoapps.site_configuration.tests.factories import SiteFactory
from openedx.core.djangoapps.user_api.accounts.tests.test_api import CreateAccountMixin
from openedx.core.djangoapps.user_api.errors import UserAPIInternalError, UserNotFound
from openedx.core.djangoapps.user_authn.views.password_reset import request_password_change
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase, skip_unless_lms
LOGGER_NAME = 'audit'
User = get_user_model() # pylint:disable=invalid-name
class TestRequestPasswordChange(CreateAccountMixin, TestCase):
"""
Tests for users who request a password change.
"""
USERNAME = 'claire-underwood'
PASSWORD = 'ṕáśśẃőŕd'
EMAIL = 'claire+underwood@example.com'
IS_SECURE = False
@skip_unless_lms
def test_request_password_change(self):
# Create and activate an account
self.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
assert len(mail.outbox) == 1
request = RequestFactory().post('/password')
request.user = Mock()
request.site = SiteFactory()
with patch('crum.get_current_request', return_value=request):
# Request a password change
request_password_change(self.EMAIL, self.IS_SECURE)
# Verify that a new email message has been sent
assert len(mail.outbox) == 2
# Verify that the body of the message contains something that looks
# like an activation link
email_body = mail.outbox[0].body
result = re.search(r'(?P<url>https?://[^\s]+)', email_body)
assert result is not None
@skip_unless_lms
def test_request_password_change_invalid_user(self):
with pytest.raises(UserNotFound):
request_password_change(self.EMAIL, self.IS_SECURE)
# Verify that no email messages have been sent
assert len(mail.outbox) == 0
@skip_unless_lms
def test_request_password_change_inactive_user(self):
# Create an account, but do not activate it
self.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
assert len(mail.outbox) == 1
request = RequestFactory().post('/password')
request.user = Mock()
request.site = SiteFactory()
with patch('crum.get_current_request', return_value=request):
request_password_change(self.EMAIL, self.IS_SECURE)
# Verify that the password change email was still sent
assert len(mail.outbox) == 2
@skip_unless_lms
@ddt.ddt
class TestPasswordChange(CreateAccountMixin, CacheIsolationTestCase):
""" Tests for views that change the user's password. """
USERNAME = "heisenberg"
ALTERNATE_USERNAME = "walt"
OLD_PASSWORD = "ḅḷüëṡḳÿ"
NEW_PASSWORD = "B🄸🄶B🄻🅄🄴"
OLD_EMAIL = "walter@graymattertech.com"
NEW_EMAIL = "walt@savewalterwhite.com"
INVALID_KEY = "123abc"
ENABLED_CACHES = ['default']
def setUp(self):
super().setUp()
self.create_account(self.USERNAME, self.OLD_PASSWORD, self.OLD_EMAIL)
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
assert result
mail.outbox = []
cache.clear()
def test_password_change(self):
# Request a password change while logged in, simulating
# use of the password reset link from the account page
response = self._change_password()
assert response.status_code == 200
# Check that an email was sent
assert len(mail.outbox) == 1
# Retrieve the activation link from the email body
email_body = mail.outbox[0].body
result = re.search(r'(?P<url>https?://[^\s]+)', email_body)
assert result is not None
activation_link = result.group('url')
# Visit the activation link
response = self.client.get(activation_link)
assert response.status_code == 302
# Visit the redirect link
_ = self.client.get(response.url)
# Submit a new password and follow the redirect to the success page
response = self.client.post(
response.url,
# These keys are from the form on the current password reset confirmation page.
{'new_password1': self.NEW_PASSWORD, 'new_password2': self.NEW_PASSWORD},
follow=True
)
assert response.status_code == 200
self.assertContains(response, "Your password has been reset.")
# Log the user out to clear session data
self.client.logout()
# Verify that the new password can be used to log in
login_api_url = reverse('login_api')
response = self.client.post(login_api_url, {'email': self.OLD_EMAIL, 'password': self.NEW_PASSWORD})
assert response.status_code == 200
response_dict = json.loads(response.content.decode('utf-8'))
assert response_dict['success']
# Try reusing the activation link to change the password again
# Visit the activation link again.
response = self.client.get(activation_link)
assert response.status_code == 200
self.assertContains(response, "This password reset link is invalid. It may have been used already.")
self.client.logout()
# Verify that the old password cannot be used to log in
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
assert not result
# Verify that the new password continues to be valid
response = self.client.post(login_api_url, {'email': self.OLD_EMAIL, 'password': self.NEW_PASSWORD})
assert response.status_code == 200
response_dict = json.loads(response.content.decode('utf-8'))
assert response_dict['success']
def test_password_change_failure(self):
with patch(
'openedx.core.djangoapps.user_authn.views.password_reset.request_password_change',
side_effect=UserAPIInternalError,
):
self._change_password()
self.assertRaises(UserAPIInternalError)
@patch.dict(settings.FEATURES, {'ENABLE_PASSWORD_RESET_FAILURE_EMAIL': True})
def test_password_reset_failure_email(self):
"""Test that a password reset failure email notification is sent, when enabled."""
# Log the user out
self.client.logout()
bad_email = 'doesnotexist@example.com'
response = self._change_password(email=bad_email)
assert response.status_code == 200
# Check that an email was sent
assert len(mail.outbox) == 1
# Verify that the body contains the failed password reset message
sent_message = mail.outbox[0]
text_body = sent_message.body
html_body = sent_message.alternatives[0][0]
for email_body in [text_body, html_body]:
msg = 'However, there is currently no user account associated with your email address: {email}'.format(
email=bad_email
)
assert f'reset for your user account at {settings.PLATFORM_NAME}' in email_body
assert 'password_reset_confirm' not in email_body, 'The link should not be added if user was not found'
assert msg in email_body
@ddt.data(True, False)
def test_password_change_logged_out(self, send_email):
# Log the user out
self.client.logout()
# Request a password change while logged out, simulating
# use of the password reset link from the login page
if send_email:
response = self._change_password(email=self.OLD_EMAIL)
assert response.status_code == 200
else:
# Don't send an email in the POST data, simulating
# its (potentially accidental) omission in the POST
# data sent from the login page
response = self._change_password()
assert response.status_code == 400
def test_access_token_invalidation_logged_out(self):
self.client.logout()
user = User.objects.get(email=self.OLD_EMAIL)
self._create_dot_tokens(user)
response = self._change_password(email=self.OLD_EMAIL)
assert response.status_code == 200
self._assert_access_token_destroyed(user)
def test_access_token_invalidation_logged_in(self):
user = User.objects.get(email=self.OLD_EMAIL)
self._create_dot_tokens(user)
response = self._change_password()
assert response.status_code == 200
self._assert_access_token_destroyed(user)
def test_password_change_inactive_user(self):
# Log out the user created during test setup
self.client.logout()
# Create a second user, but do not activate it
self.create_account(self.ALTERNATE_USERNAME, self.OLD_PASSWORD, self.NEW_EMAIL)
mail.outbox = []
# Send the view the email address tied to the inactive user
response = self._change_password(email=self.NEW_EMAIL)
# Expect that the activation email is still sent,
# since the user may have lost the original activation email.
assert response.status_code == 200
assert len(mail.outbox) == 1
def test_password_change_no_user(self):
# Log out the user created during test setup
self.client.logout()
with LogCapture(LOGGER_NAME, level=logging.INFO) as logger:
# Send the view an email address not tied to any user
response = self._change_password(email=self.NEW_EMAIL)
assert response.status_code == 200
expected_logs = (
(LOGGER_NAME, 'INFO', f'Password reset initiated for email {self.NEW_EMAIL}.'),
(LOGGER_NAME, 'INFO', 'Invalid password reset attempt')
)
logger.check(*expected_logs)
def test_password_change_rate_limited(self):
"""
Tests that password reset requests are rate limited as expected.
"""
# Log out the user created during test setup, to prevent the view from
# selecting the logged-in user's email address over the email provided
# in the POST data
self.client.logout()
for status in [200, 403]:
response = self._change_password(email=self.NEW_EMAIL)
assert response.status_code == status
# now reset the time to 1 min from now in future and change the email and
# verify that it will allow another request from same IP
reset_time = datetime.now(UTC) + timedelta(seconds=61)
with freeze_time(reset_time):
response = self._change_password(email=self.OLD_EMAIL)
assert response.status_code == 200
@ddt.data(
('post', 'password_change_request', []),
)
@ddt.unpack
def test_require_http_method(self, correct_method, url_name, args):
wrong_methods = {'get', 'put', 'post', 'head', 'options', 'delete'} - {correct_method}
url = reverse(url_name, args=args)
for method in wrong_methods:
response = getattr(self.client, method)(url)
assert response.status_code == 405
def _change_password(self, email=None):
"""Request to change the user's password. """
data = {}
if email:
data['email'] = email
return self.client.post(path=reverse('password_change_request'), data=data)
def _create_dot_tokens(self, user=None):
"""Create dot access token for given user if user provided else for default user."""
if not user:
user = User.objects.get(email=self.OLD_EMAIL)
application = dot_factories.ApplicationFactory(user=user)
access_token = dot_factories.AccessTokenFactory(user=user, application=application)
dot_factories.RefreshTokenFactory(user=user, application=application, access_token=access_token)
def _assert_access_token_destroyed(self, user):
"""Assert all access tokens are destroyed."""
assert not dot_access_token.objects.filter(user=user).exists()
assert not dot_refresh_token.objects.filter(user=user).exists()
|
EDUlib/edx-platform
|
openedx/core/djangoapps/user_authn/views/tests/test_password.py
|
Python
|
agpl-3.0
| 12,929
|
[
"VisIt"
] |
2a9e1e8f50e00251014b77ab0b7b5e09eba5b9aa9ef206bad8b3725a63fee2d7
|
import pkg_resources
import string
import random
import os
from biomaj.mongo_connector import MongoConnector
from biomaj_core.config import BiomajConfig
from biomaj_core.utils import Utils
import logging
class SchemaVersion(object):
"""
BioMAJ database schema version. This package can be used to make some schema modification if needed during
incremental software version.
"""
@staticmethod
def migrate_pendings():
"""
Migrate database
3.0.18: Check the actual BioMAJ version and if older than 3.0.17, do the 'pending' key migration
"""
if BiomajConfig.global_config is None:
try:
BiomajConfig.load_config()
except Exception as err:
print("* SchemaVersion: Can't find config file: " + str(err))
return None
if MongoConnector.db is None:
MongoConnector(BiomajConfig.global_config.get('GENERAL', 'db.url'),
BiomajConfig.global_config.get('GENERAL', 'db.name'))
schema = MongoConnector.db_schema
banks = MongoConnector.banks
users = MongoConnector.users
schema_version = schema.find_one({'id': 1})
installed_version = pkg_resources.get_distribution("biomaj").version
if schema_version is None:
schema_version = {'id': 1, 'version': '3.0.0'}
schema.insert(schema_version)
moderate = int(schema_version['version'].split('.')[1])
minor = int(schema_version['version'].split('.')[2])
if moderate == 0 and minor <= 17:
print("Migrate from release: %s" % schema_version['version'])
# Update pending releases
bank_list = banks.find()
updated = 0
for bank in bank_list:
if 'pending' in bank:
# Check we have an old pending type
if type(bank['pending']) == dict:
updated += 1
pendings = []
for release in sorted(bank['pending'], key=lambda r: bank['pending'][r]):
pendings.append({'release': str(release), 'id': bank['pending'][str(release)]})
if len(pendings) > 0:
banks.update({'name': bank['name']},
{'$set': {'pending': pendings}})
else:
# We remove old type for 'pending'
banks.update({'name': bank['name']},
{'$unset': {'pending': ""}})
print("Migration: %d bank(s) updated" % updated)
if moderate < 1:
updated = 0
user_list = users.find()
for user in user_list:
if 'apikey' not in user:
updated += 1
api_key = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(10))
users.update({'_id': user['_id']}, {'$set': {'apikey': api_key}})
print("Migration: %d user(s) updated" % updated)
# production size
bank_list = banks.find()
updated = 0
for bank in bank_list:
for prod in bank['production']:
'''
{ "_id" : ObjectId("54edb10856e8bb11340b5f51"), "production" : [
{ "freeze" : false, "remoterelease" : "2003-11-26", "session" : 1427809848.560108,
"data_dir" : "/db", "formats" : [ ], "release" : "2003-11-26",
"dir_version" : "ncbi/blast/alu",
"prod_dir" : "alu-2003-11-26", "types" : [ ], "size" : 319432 } ] }
'''
if 'size' not in prod or prod['size'] == 0:
logging.info('Calculate size for bank %s' % (bank['name']))
if 'data_dir' not in prod or not prod['data_dir'] or 'prod_dir' not in prod or not prod['prod_dir'] or 'dir_version' not in prod or not prod['dir_version']:
logging.warn('no production directory information for %s, skipping...' % (bank['name']))
continue
prod_dir = os.path.join(prod['data_dir'], prod['dir_version'], prod['prod_dir'])
if not os.path.exists(prod_dir):
logging.warn('production directory %s does not exists for %s, skipping...' % (prod_dir, bank['name']))
continue
dir_size = Utils.get_folder_size(prod_dir)
banks.update({'name': bank['name'], 'production.release': prod['release']}, {'$set': {'production.$.size': dir_size}})
updated += 1
print("Migration: %d bank production info updated" % updated)
schema.update_one({'id': 1}, {'$set': {'version': installed_version}})
|
horkko/biomaj
|
biomaj/schema_version.py
|
Python
|
agpl-3.0
| 5,031
|
[
"BLAST"
] |
a7a54b3673633dfc290007269ed17e587d17a708e931640e6c6bd0128b7ed7cb
|
# Copyright iris-grib contributors
#
# This file is part of iris-grib and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Unit tests for `iris_grib.message.Section`.
"""
# Import iris_grib.tests first so that some things can be initialised before
# importing anything else.
import iris_grib.tests as tests
import gribapi
import numpy as np
from iris_grib.message import Section
@tests.skip_data
class Test___getitem__(tests.IrisGribTest):
def setUp(self):
filename = tests.get_data_path(('GRIB', 'uk_t', 'uk_t.grib2'))
with open(filename, 'rb') as grib_fh:
self.grib_id = gribapi.grib_new_from_file(grib_fh)
def test_scalar(self):
section = Section(self.grib_id, None, ['Ni'])
self.assertEqual(section['Ni'], 47)
def test_array(self):
section = Section(self.grib_id, None, ['codedValues'])
codedValues = section['codedValues']
self.assertEqual(codedValues.shape, (1551,))
self.assertArrayAlmostEqual(codedValues[:3],
[-1.78140259, -1.53140259, -1.28140259])
def test_typeOfFirstFixedSurface(self):
section = Section(self.grib_id, None, ['typeOfFirstFixedSurface'])
self.assertEqual(section['typeOfFirstFixedSurface'], 100)
def test_numberOfSection(self):
n = 4
section = Section(self.grib_id, n, ['numberOfSection'])
self.assertEqual(section['numberOfSection'], n)
def test_invalid(self):
section = Section(self.grib_id, None, ['Ni'])
with self.assertRaisesRegex(KeyError, 'Nii'):
section['Nii']
@tests.skip_data
class Test__getitem___pdt_31(tests.IrisGribTest):
def setUp(self):
filename = tests.get_data_path(('GRIB', 'umukv', 'ukv_chan9.grib2'))
with open(filename, 'rb') as grib_fh:
self.grib_id = gribapi.grib_new_from_file(grib_fh)
self.keys = ['satelliteSeries', 'satelliteNumber', 'instrumentType',
'scaleFactorOfCentralWaveNumber',
'scaledValueOfCentralWaveNumber']
def test_array(self):
section = Section(self.grib_id, None, self.keys)
for key in self.keys:
value = section[key]
self.assertIsInstance(value, np.ndarray)
self.assertEqual(value.shape, (1,))
@tests.skip_data
class Test_get_computed_key(tests.IrisGribTest):
def test_gdt40_computed(self):
fname = tests.get_data_path(('GRIB', 'gaussian', 'regular_gg.grib2'))
with open(fname, 'rb') as grib_fh:
self.grib_id = gribapi.grib_new_from_file(grib_fh)
section = Section(self.grib_id, None, [])
latitudes = section.get_computed_key('latitudes')
self.assertTrue(88.55 < latitudes[0] < 88.59)
if __name__ == '__main__':
tests.main()
|
SciTools/iris-grib
|
iris_grib/tests/unit/message/test_Section.py
|
Python
|
lgpl-3.0
| 2,906
|
[
"Gaussian"
] |
14e4fd90b5cb143a0bc5c58a797449a2348dbbac87fcd04d800d50503a930874
|
from chemfiles import Trajectory, UnitCell, Atom, Topology, Frame, Selection
name = "nt12.opt"
a = 24.2
# Read the frame
frame = Trajectory(name+".xyz").read()
# Set the topology
topo = Trajectory("topology.pdb").read()
frame.set_topology(topo.topology())
# Get the positions
positions = frame.positions()
# Set the cell
cell = UnitCell(a, a, 42.43, 90, 90, 120)
frame.set_cell(cell)
# Select all except hydroxyl groups
#selection = Selection("atoms: name Al or name Obr or name Si")
selection = Selection("atoms: name Hext or name Oext or name Al or name Obr or name Si")
framework = selection.evaluate(frame)
with open(name+".cris",'w') as cris:
with open(name+".slice.xyz",'w') as slic:
with open(name+".framework.xyz",'w') as out:
cris.write(" .false.\n")
cris.write('{} {} 8.486 90.0 90.0 120.0\n'.format(a, a))
cris.write('{}\n'.format(len(framework)/5))
slic.write('{}\n\n'.format(len(framework)/5))
out.write('{}\n\n'.format(len(framework)))
for (ind,i) in enumerate(framework):
atom = frame.atom(i)
if atom.name() == "Al":
atom.set_charge(1.5750)
num = 1
if atom.name() == "Obr":
atom.set_charge(-1.0500)
num = 2
if atom.name() == "Oext":
atom.set_charge(-0.9500)
num = 3
if atom.name() == "Hext":
atom.set_charge(0.4250)
num = 4
if atom.name() == "Si":
atom.set_charge(2.1000)
num = 5
out.write('{}\t'.format(atom.name()))
out.write('{:8.5f}\t'.format(positions[i][0]))
out.write('{:8.5f}\t'.format(positions[i][1]))
out.write('{:8.5f}\n'.format(positions[i][2]))
if (ind%5 == 0):
cris.write('{:d}\t'.format(num))
cris.write('{:8.5f}\t'.format(positions[i][0]))
cris.write('{:8.5f}\t'.format(positions[i][1]))
cris.write('{:8.5f}\t'.format(positions[i][2]))
cris.write('{:5.4f}\t'.format(atom.charge()))
cris.write('{}\n'.format(atom.name()))
slic.write('{}\t'.format(atom.name()))
slic.write('{:8.5f}\t'.format(positions[i][0]))
slic.write('{:8.5f}\t'.format(positions[i][1]))
slic.write('{:8.5f}\n'.format(positions[i][2]))
# Select all
selection = Selection("all")
nt = selection.evaluate(frame)
for i in nt:
positions[i][0] += a/4
positions[i][1] += a*0.4330127019
positions[i][2] += 21.215
with Trajectory(name+".gibbs.xyz",'w') as gibbs:
gibbs.write(frame)
# Select SiOH groups
selection = Selection("angles: name(#1) Si and name(#2) Oint and name(#3) Hint")
sioh_groups = selection.evaluate(frame)
print("{} SiOH groups found".format(len(sioh_groups)))
with open("sioh.coord",'w') as sioh_file:
sioh_file.write('{}\n'.format(len(sioh_groups)))
for (si, o, h) in sioh_groups:
sioh_file.write('{:8.5f}\t{:8.5f}\t{:8.5f}\n'.format(positions[si][0], positions[si][1], positions[si][2]))
sioh_file.write('{:8.5f}\t{:8.5f}\t{:8.5f}\n'.format(positions[o][0], positions[o][1], positions[o][2]))
sioh_file.write('{:8.5f}\t{:8.5f}\t{:8.5f}\n'.format(positions[h][0], positions[h][1], positions[h][2]))
#sioh_file.write('{:8.5f}\t{:8.5f}\t{:8.5f}\n'.format(positions[si][0], positions[si][1], positions[si][2]))
sioh_file.write('{:8.5f}\t{:8.5f}\t{:8.5f}\n'.format(positions[o][0], positions[o][1], positions[o][2]))
sioh_file.write('{:8.5f}\t{:8.5f}\t{:8.5f}\n'.format(positions[h][0], positions[h][1], positions[h][2]))
sioh_file.write("END\n")
with open(name+".sioh.xyz",'w') as sioh_file:
sioh_file.write('{}\n\n'.format(3*len(sioh_groups)))
for (si, o, h) in sioh_groups:
sioh_file.write('{}\t{:8.5f}\t{:8.5f}\t{:8.5f}\n'.format(frame.atom(si).type(), positions[si][0], positions[si][1], positions[si][2]))
sioh_file.write('{}\t{:8.5f}\t{:8.5f}\t{:8.5f}\n'.format(frame.atom(o).type(), positions[o][0], positions[o][1], positions[o][2]))
sioh_file.write('{}\t{:8.5f}\t{:8.5f}\t{:8.5f}\n'.format(frame.atom(h).type(), positions[h][0], positions[h][1], positions[h][2]))
# Select Al2OH groups
selection = Selection("angles: name(#1) Al and name(#2) Oext and name(#3) Hext")
aloh_groups = selection.evaluate(frame)
selection = Selection("atoms: name Hext")
hext = selection.evaluate(frame)
print("{} AlOH groups found".format(len(aloh_groups)))
with open("aloh.coord",'w') as aloh_file:
aloh_file.write('{:d}\n'.format(len(aloh_groups)/2))
for h in hext:
groups = filter(lambda u: u[2] == h, aloh_groups)
assert(len(groups) == 2)
assert(groups[0][1] == groups[1][1])
al1 = groups[0][0]
al2 = groups[1][0]
o = groups[0][1]
aloh_file.write('{:8.5f}\t{:8.5f}\t{:8.5f}\n'.format(positions[al1][0], positions[al1][1], positions[al1][2]))
aloh_file.write('{:8.5f}\t{:8.5f}\t{:8.5f}\n'.format(positions[o][0], positions[o][1], positions[o][2]))
aloh_file.write('{:8.5f}\t{:8.5f}\t{:8.5f}\n'.format(positions[al2][0], positions[al2][1], positions[al2][2]))
aloh_file.write('{:8.5f}\t{:8.5f}\t{:8.5f}\n'.format(positions[h][0], positions[h][1], positions[h][2]))
aloh_file.write('{:8.5f}\t{:8.5f}\t{:8.5f}\n'.format(positions[o][0], positions[o][1], positions[o][2]))
aloh_file.write('{:8.5f}\t{:8.5f}\t{:8.5f}\n'.format(positions[h][0], positions[h][1], positions[h][2]))
aloh_file.write("END\n")
with open(name+".aloh.xyz",'w') as aloh_file:
aloh_file.write('{:d}\n\n'.format(4*len(aloh_groups)/2))
for h in hext:
groups = filter(lambda u: u[2] == h, aloh_groups)
assert(len(groups) == 2)
assert(groups[0][1] == groups[1][1])
al1 = groups[0][0]
al2 = groups[1][0]
o = groups[0][1]
aloh_file.write('{}\t{:8.5f}\t{:8.5f}\t{:8.5f}\n'.format(frame.atom(al1).type(), positions[al1][0], positions[al1][1], positions[al1][2]))
aloh_file.write('{}\t{:8.5f}\t{:8.5f}\t{:8.5f}\n'.format(frame.atom(o).type(), positions[o][0], positions[o][1], positions[o][2]))
aloh_file.write('{}\t{:8.5f}\t{:8.5f}\t{:8.5f}\n'.format(frame.atom(al2).type(), positions[al2][0], positions[al2][1], positions[al2][2]))
aloh_file.write('{}\t{:8.5f}\t{:8.5f}\t{:8.5f}\n'.format(frame.atom(h).type(), positions[h][0], positions[h][1], positions[h][2]))
|
lscalfi/imogolite
|
2-Isotherms/0-UnitCell/create.py
|
Python
|
mit
| 6,723
|
[
"Chemfiles"
] |
50972aa5ca094fcb726f1ccb60b1b517c07c18676414fddd2bc35f32d507e807
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
from pyscf import lib
from pyscf import gto, scf
from pyscf.prop import efg
class KnownValues(unittest.TestCase):
def test_dhf_nr_limit(self):
mol = gto.M(atom='''
H .8 0. 0.
H 0. .5 0.''',
basis='ccpvdz')
with lib.temporary_env(lib.param, LIGHT_SPEED=5000):
r = scf.DHF(mol).run().EFG()
nr = scf.RHF(mol).run().EFG()
self.assertAlmostEqual(abs(r - nr).max(), 0, 7)
if __name__ == "__main__":
print("Full Tests for DHF EFGs")
unittest.main()
|
gkc1000/pyscf
|
pyscf/prop/efg/test/test_dhf.py
|
Python
|
apache-2.0
| 1,224
|
[
"PySCF"
] |
e81725adcb89aa5c158200db0560fe81c128d87da20bc6ef16f7ac9b050c011a
|
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Free University
# Berlin, 14195 Berlin, Germany.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
r"""User-API for the pyemma.coordinates package
.. currentmodule:: pyemma.coordinates.api
"""
from pyemma.util.annotators import deprecated
from pyemma.util.log import getLogger as _getLogger
from pyemma.util import types as _types
from pyemma.coordinates.pipelines import Discretizer as _Discretizer
from pyemma.coordinates.pipelines import Pipeline as _Pipeline
# io
from pyemma.coordinates.data.featurizer import MDFeaturizer as _MDFeaturizer
from pyemma.coordinates.data.feature_reader import FeatureReader as _FeatureReader
from pyemma.coordinates.data.data_in_memory import DataInMemory as _DataInMemory
from pyemma.coordinates.data.util.reader_utils import create_file_reader as _create_file_reader, \
preallocate_empty_trajectory as _preallocate_empty_trajectory, enforce_top as _enforce_top, \
copy_traj_attributes as _copy_traj_attributes
from pyemma.coordinates.data.frames_from_file import frames_from_file as _frames_from_file
# transforms
from pyemma.coordinates.transform.transformer import Transformer as _Transformer
from pyemma.coordinates.transform.pca import PCA as _PCA
from pyemma.coordinates.transform.tica import TICA as _TICA
# clustering
from pyemma.coordinates.clustering.kmeans import KmeansClustering as _KmeansClustering
from pyemma.coordinates.clustering.uniform_time import UniformTimeClustering as _UniformTimeClustering
from pyemma.coordinates.clustering.regspace import RegularSpaceClustering as _RegularSpaceClustering
from pyemma.coordinates.clustering.assign import AssignCenters as _AssignCenters
# stat
from pyemma.coordinates.util.stat import histogram
# types
from mdtraj import Topology as _Topology, Trajectory as _Trajectory
import numpy as _np
import itertools as _itertools
_logger = _getLogger('coordinates.api')
__docformat__ = "restructuredtext en"
__author__ = "Frank Noe, Martin Scherer"
__copyright__ = "Copyright 2015, Computational Molecular Biology Group, FU-Berlin"
__credits__ = ["Benjamin Trendelkamp-Schroer", "Martin Scherer", "Frank Noe"]
__license__ = "FreeBSD"
__maintainer__ = "Martin Scherer"
__email__ = "m.scherer AT fu-berlin DOT de"
__all__ = ['featurizer', # IO
'load',
'source',
'histogram',
'pipeline',
'discretizer',
'save_traj',
'save_trajs',
'pca', # transform
'tica',
'cluster_regspace', # cluster
'cluster_kmeans',
'cluster_uniform_time',
'assign_to_centers',
'feature_reader', # deprecated:
'memory_reader',
'kmeans',
'regspace',
'assign_centers',
'uniform_time']
# ==============================================================================
#
# DATA PROCESSING
#
# ==============================================================================
def featurizer(topfile):
r""" Featurizer to select features from MD data.
Parameters
----------
topfile : str
path to topology file (e.g pdb file)
Returns
-------
feat : :class:`Featurizer <pyemma.coordinates.data.featurizer.MDFeaturizer>`
See also
--------
data.MDFeaturizer
Examples
--------
Create a featurizer and add backbone torsion angles to active features.
Then use it in :func:`source`
>>> import pyemma.coordinates # doctest: +SKIP
>>> feat = pyemma.coordinates.featurizer('my_protein.pdb') # doctest: +SKIP
>>> feat.add_backbone_torsions() # doctest: +SKIP
>>> reader = pyemma.coordinates.source(["my_traj01.xtc", "my_traj02.xtc"], features=feat) # doctest: +SKIP
.. autoclass:: pyemma.coordinates.data.featurizer.MDFeaturizer
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.data.featurizer.MDFeaturizer
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.data.featurizer.MDFeaturizer
:attributes:
"""
return _MDFeaturizer(topfile)
# TODO: DOC - which topology file formats does mdtraj support? Find out and complete docstring
def load(trajfiles, features=None, top=None, stride=1, chunk_size=100):
r""" Loads coordinate features into memory.
If your memory is not big enough consider the use of **pipeline**, or use the stride option to subsample the data.
Parameters
----------
trajfiles : str or list of str
A filename or a list of filenames to trajectory files that can be processed by pyemma.
Both molecular dynamics trajectory files and raw data files (tabulated ASCII or binary) can be loaded.
When molecular dynamics trajectory files are loaded either a featurizer must be specified (for
reading specific quantities such as distances or dihedrals), or a topology file (in that case only
Cartesian coordinates will be read). In the latter case, the resulting feature vectors will have length
3N for each trajectory frame, with N being the number of atoms and (x1, y1, z1, x2, y2, z2, ...) being
the sequence of coordinates in the vector.
Molecular dynamics trajectory files are loaded through mdtraj (http://mdtraj.org/latest/),
and can possess any of the mdtraj-compatible trajectory formats including:
* CHARMM/NAMD (.dcd)
* Gromacs (.xtc)
* Gromacs (.trr)
* AMBER (.binpos)
* AMBER (.netcdf)
* PDB trajectory format (.pdb)
* TINKER (.arc),
* MDTRAJ (.hdf5)
* LAMMPS trajectory format (.lammpstrj)
Raw data can be in the following format:
* tabulated ASCII (.dat, .txt)
* binary python (.npy, .npz)
features : MDFeaturizer, optional, default = None
a featurizer object specifying how molecular dynamics files should be read (e.g. intramolecular distances,
angles, dihedrals, etc).
top : str, optional, default = None
A molecular topology file, e.g. in PDB (.pdb) format
stride : int, optional, default = 1
Load only every stride'th frame. By default, every frame is loaded
chunk_size: int, optional, default = 100
The chunk size at which the input file is being processed.
Returns
-------
data : ndarray or list of ndarray
If a single filename was given as an input (and unless the format is .npz), the return will be a single ndarray
of size (T, d), where T is the number of time steps in the trajectory and d is the number of features
(coordinates, observables). When reading from molecular dynamics data without a specific featurizer,
each feature vector will have size d=3N and will hold the Cartesian coordinates in the sequence
(x1, y1, z1, x2, y2, z2, ...).
If multiple filenames were given, or if the file is a .npz holding multiple arrays, the result is a list
of appropriately shaped arrays
See also
--------
:func:`pyemma.coordinates.pipeline`
if your memory is not big enough, use pipeline to process it in a streaming manner
Examples
--------
>>> from pyemma.coordinates import load
>>> files = ['traj01.xtc', 'traj02.xtc'] # doctest: +SKIP
>>> output = load(files, top='my_structure.pdb') # doctest: +SKIP
"""
if isinstance(trajfiles, basestring) or (
isinstance(trajfiles, (list, tuple))
and (any(isinstance(item, basestring) for item in trajfiles) or len(trajfiles) is 0)):
reader = _create_file_reader(trajfiles, top, features, chunk_size=chunk_size)
trajs = reader.get_output(stride=stride)
if len(trajs) == 1:
return trajs[0]
else:
return trajs
else:
raise ValueError('unsupported type (%s) of input' % type(trajfiles))
def source(inp, features=None, top=None, chunk_size=100):
r""" Wraps input as data source for pipeline.
Use this function to construct the first stage of a data processing :func:`pipeline`.
Parameters
----------
inp : str (file name) or ndarray or list of strings (file names) or list of ndarrays
The inp file names or input data. Can be given in any of these ways:
1. File name of a single trajectory. It can have any of the molecular dynamics trajectory formats or
raw data formats specified in :py:func:`load`.
2. List of trajectory file names. It can have any of the molecular dynamics trajectory formats or
raw data formats specified in :py:func:`load`.
3. Molecular dynamics trajectory in memory as a numpy array of shape (T, N, 3) with T time steps, N atoms
each having three (x,y,z) spatial coordinates.
4. List of molecular dynamics trajectories in memory, each given as a numpy array of shape (T_i, N, 3),
where trajectory i has T_i time steps and all trajectories have shape (N, 3).
5. Trajectory of some features or order parameters in memory
as a numpy array of shape (T, N) with T time steps and N dimensions.
6. List of trajectories of some features or order parameters in memory, each given as a numpy array
of shape (T_i, N), where trajectory i has T_i time steps and all trajectories have N dimensions.
7. List of NumPy array files (.npy) of shape (T, N). Note these
arrays are not being loaded completely, but mapped into memory (read-only).
8. List of tabulated ASCII files of shape (T, N).
features : MDFeaturizer, optional, default = None
a featurizer object specifying how molecular dynamics files should be read (e.g. intramolecular distances,
angles, dihedrals, etc). This parameter only makes sense if the input comes in the form of molecular dynamics
trajectories or data, and will otherwise create a warning and have no effect
top : str, optional, default = None
A topology file name. This is needed when molecular dynamics trajectories are given and no featurizer is given.
In this case, only the Cartesian coordinates will be read.
chunk_size: int, optional, default = 100
The chunk size at which the input file is being processed.
Returns
-------
reader obj: type depends on input data
1. :class:`FeatureReader <pyemma.coordinates.data.feature_reader.FeatureReader>` for MD-data
2. :class:`NumPyFileReader <pyemma.coordinates.data.numpy_filereader.NumPyFileReader>` for .npy files
3. :class:`PyCSVReader <pyemma.coordinates.data.py_csv_reader.PyCSVReader>` for csv files.
4. :class:`DataInMemory <pyemma.coordinates.data.data_in_memory.DataInMemory>` for already loaded data (e.g NumPy arrays)
See also
--------
:func:`pyemma.coordinates.pipeline`
The data input is the first stage for your pipeline. Add other stages to it and build a pipeline
to analyze big data in streaming mode.
Examples
--------
Create a reader for NumPy files:
>>> import numpy as np
>>> from pyemma.coordinates import source
>>> reader = source(['001.npy', '002.npy'] # doctest: +SKIP
Create a reader for trajectory files and select some distance as feature:
>>> reader = source(['traj01.xtc', 'traj02.xtc'], top='my_structure.pdb') # doctest: +SKIP
>>> reader.featurizer.add_distances([[0, 1], [5, 6]]) # doctest: +SKIP
>>> calculated_features = reader.get_output() # doctest: +SKIP
create a reader for a csv file:
>>> reader = source('data.csv') # doctest: +SKIP
Create a reader for huge NumPy in-memory arrays to process them in huge chunks
to avoid memory issues:
>>> data = np.random.random(int(1e7))
>>> reader = source(data, chunk_size=5000)
>>> from pyemma.coordinates import cluster_regspace
>>> regspace = cluster_regspace(reader, dmin=0.1)
"""
# CASE 1: input is a string or list of strings
# check: if single string create a one-element list
if isinstance(inp, basestring) or (isinstance(inp, (list, tuple))
and (any(isinstance(item, basestring) for item in inp) or len(inp) is 0)):
reader = _create_file_reader(inp, top, features, chunk_size=chunk_size)
elif isinstance(inp, _np.ndarray) or (isinstance(inp, (list, tuple))
and (any(isinstance(item, _np.ndarray) for item in inp) or len(inp) is 0)):
# CASE 2: input is a (T, N, 3) array or list of (T_i, N, 3) arrays
# check: if single array, create a one-element list
# check: do all arrays have compatible dimensions (*, N, 3)? If not: raise ValueError.
# check: if single array, create a one-element list
# check: do all arrays have compatible dimensions (*, N)? If not: raise ValueError.
# create MemoryReader
reader = _DataInMemory(inp)
else:
raise ValueError('unsupported type (%s) of input' % type(inp))
return reader
def pipeline(stages, run=True, stride=1, chunksize=100):
r""" Data analysis pipeline.
Constructs a data analysis :class:`Pipeline <pyemma.coordinates.pipelines.Pipeline>` and parametrizes it
(unless prevented).
If this function takes too long, consider loading data in memory. Alternatively if the data is to large to be loaded
into memory make use of the stride parameter.
Parameters
----------
stages : data input or list of pipeline stages
If given a single pipeline stage this must be a data input constructed by :py:func:`source`.
If a list of pipelining stages are given, the first stage must be a data input constructed by :py:func:`source`.
run : bool, optional, default = True
If True, the pipeline will be parametrized immediately with the given stages. If only an input stage is given,
the run flag has no effect at this time. True also means that the pipeline will be immediately re-parametrized
when further stages are added to it.
*Attention* True means this function may take a long time to compute.
If False, the pipeline will be passive, i.e. it will not do any computations before you call parametrize()
stride : int, optional, default = 1
If set to 1, all input data will be used throughout the pipeline to parametrize its stages. Note that this
could cause the parametrization step to be very slow for large data sets. Since molecular dynamics data is
usually correlated at short timescales, it is often sufficient to parametrize the pipeline at a longer stride.
See also stride option in the output functions of the pipeline.
chunksize : int, optiona, default = 100
how many datapoints to process as a batch at one step
Returns
-------
pipe : :class:`Pipeline <pyemma.coordinates.pipelines.Pipeline>`
A pipeline object that is able to conduct big data analysis with limited memory in streaming mode.
Examples
--------
>>> import numpy as np
>>> from pyemma.coordinates import source, tica, assign_to_centers, pipeline
Create some random data and cluster centers:
>>> data = np.random.random((1000, 3))
>>> centers = data[np.random.choice(1000, 10)]
>>> reader = source(data)
Define a TICA transformation with lag time 10:
>>> tica_obj = tica(lag=10)
Assign any input to given centers:
>>> assign = assign_to_centers(centers=centers)
>>> pipe = pipeline([reader, tica_obj, assign])
>>> pipe.parametrize()
.. autoclass:: pyemma.coordinates.pipelines.Pipeline
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.pipelines.Pipeline
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.pipelines.Pipeline
:attributes:
"""
if not isinstance(stages, list):
stages = [stages]
p = _Pipeline(stages, param_stride=stride, chunksize=chunksize)
if run:
p.parametrize()
return p
def discretizer(reader,
transform=None,
cluster=None,
run=True,
stride=1,
chunksize=100):
r""" Specialized pipeline: From trajectories to clustering.
Constructs a pipeline that consists of three stages:
1. an input stage (mandatory)
2. a transformer stage (optional)
3. a clustering stage (mandatory)
This function is identical to calling :func:`pipeline` with the three stages, it is only meant as a guidance
for the (probably) most common usage cases of a pipeline.
Parameters
----------
reader : instance of :class:`pyemma.coordinates.data.reader.ChunkedReader`
The reader instance provides access to the data. If you are working with
MD data, you most likely want to use a FeatureReader.
transform : instance of :class: `pyemma.coordinates.Transformer`
an optional transform like PCA/TICA etc.
cluster : instance of :class: `pyemma.coordinates.AbstractClustering` clustering Transformer (optional)
a cluster algorithm to assign transformed data to discrete states.
stride : int, optional, default = 1
If set to 1, all input data will be used throughout the pipeline to parametrize its stages. Note that this
could cause the parametrization step to be very slow for large data sets. Since molecular dynamics data is
usually correlated at short timescales, it is often sufficient to parametrize the pipeline at a longer stride.
See also stride option in the output functions of the pipeline.
chunksize : int, optiona, default = 100
how many datapoints to process as a batch at one step
Returns
-------
pipe : a :class:`Pipeline <pyemma.coordinates.pipelines.Discretizer>` object
A pipeline object that is able to streamline data analysis of large amounts of input data
with limited memory in streaming mode.
Examples
--------
Construct a discretizer pipeline processing all data
with a PCA transformation and cluster the principal components
with uniform time clustering:
>>> import numpy as np
>>> from pyemma.coordinates import source, pca, cluster_regspace, discretizer
>>> data = np.random.random((1000, 3))
>>> reader = source(data)
>>> transform = pca(dim=2)
>>> cluster = cluster_regspace(dmin=0.1)
>>> disc = discretizer(reader, transform, cluster)
Finally you want to run the pipeline:
>>> disc.parametrize()
Access the the discrete trajectories and saving them to files:
>>> disc.dtrajs # doctest: +ELLIPSIS
[array([...
This will store the discrete trajectory to "traj01.dtraj":
>>> disc.save_dtrajs() # doctest: +SKIP
"""
if cluster is None:
_logger.warning('You did not specify a cluster algorithm.'
' Defaulting to kmeans(k=100)')
cluster = _KmeansClustering(n_clusters=100)
disc = _Discretizer(reader, transform, cluster, param_stride=stride)
if run:
disc.parametrize()
return disc
@deprecated('Use either pyemma.coordinates.source() or pyemma.coordinates.load()')
def feature_reader(trajfiles, topfile):
r"""*Deprecated.* Constructs a molecular feature reader.
This funtion is deprecated. Use :func:`source` instead
Parameters
----------
trajfiles : list of str
list of filenames to read sequentially
topfile : str
path to a topology file (eg. pdb)
Returns
-------
obj : :class:`io.FeatureReader`
Notes
-----
To select features refer to the documentation of the :class:`io.featurizer.MDFeaturizer`
See also
--------
pyemma.coordinates.data.FeatureReader
Reader object
"""
return _FeatureReader(trajfiles, topfile)
@deprecated("Please use pyemma.coordinates.load()")
def memory_reader(data):
r"""*Deprecated.* Constructs a reader from an in-memory ndarray.
This funtion is deprecated. Use :func:`source` instead
Parameters
----------
data : (N,d) ndarray
array with N frames of d dimensions
Returns
-------
obj : :class:`DataInMemory`
See also
--------
pyemma.coordinates.data.DataInMemory
Reader object
"""
return _DataInMemory(data)
def save_traj(traj_inp, indexes, outfile, top=None, stride = 1, chunksize=1000, verbose=False):
r""" Saves a sequence of frames as a single trajectory.
Extracts the specified sequence of time/trajectory indexes from traj_inp
and saves it to one single molecular dynamics trajectory file. The output format will be determined
by the outfile name.
Parameters
----------
traj_inp :
traj_inp can be of two types.
1. a python list of strings containing the filenames associated with the indices in
:py:obj:`indexes`. With this type of input, a :py:obj:`topfile` is mandatory.
2. a :py:func:`pyemma.coordinates.data.feature_reader.FeatureReader` object containing the filename
list in :py:obj:`traj_inp.trajfiles`. Please use :py:func:`pyemma.coordinates.source` to construct it.
With this type of input, the input :py:obj:`topfile` will be ignored. and :py:obj:`traj_inp.topfile` will
be used instead
indexes : ndarray(T, 2) or list of ndarray(T_i, 2)
A (T x 2) array for writing a trajectory of T time steps. Each row contains two indexes (i, t), where
i is the index of the trajectory from the input and t is the index of the time step within the trajectory.
If a list of index arrays are given, these will be simply concatenated, i.e. they will be written
subsequently in the same trajectory file.
outfile : str.
The name of the output file. Its extension will determine the file type written. Example: "out.dcd"
If set to None, the trajectory object is returned to memory
top : str, mdtraj.Trajectory, or mdtraj.Topology
The topology needed to read the files in the list :py:obj:`traj_inp`. If :py:obj:`traj_inp` is not a list,
this parameter is ignored.
stride : integer, default is 1
This parameter informs :py:func:`save_traj` about the stride used in :py:obj:`indexes`. Typically, :py:obj:`indexes`
contains frame-indexes that match exactly the frames of the files contained in :py:obj:`traj_inp.trajfiles`.
However, in certain situations, that might not be the case. Examples are cases in which a stride value != 1
was used when reading/featurizing/transforming/discretizing the files contained in :py:obj:`traj_inp.trajfiles`.
chunksize : int. Default 1000.
The chunksize for reading input trajectory files. If :py:obj:`traj_inp` is a
:py:func:`pyemma.coordinates.data.feature_reader.FeatureReader` object, this input variable will be ignored and
:py:obj:`traj_inp.chunksize` will be used instead.
verbose : boolean, default is False
Verbose output while looking for :py:obj`indexes` in the :py:obj:`traj_inp.trajfiles`
Returns
-------
traj : :py:obj:`mdtraj.Trajectory` object
Will only return this object if :py:obj:`outfile` is None
"""
# Determine the type of input and extract necessary parameters
if isinstance(traj_inp, _FeatureReader):
trajfiles = traj_inp.trajfiles
top = traj_inp.topfile
chunksize = traj_inp.chunksize
else:
# Do we have what we need?
assert isinstance(traj_inp, list), "traj_inp has to be of type list, not %"%type(traj_inp)
assert isinstance(top,(str,_Topology, _Trajectory)), "traj_inp cannot be a list of files without an input " \
"top of type str (eg filename.pdb), mdtraj.Trajectory or mdtraj.Topology. " \
"Got type %s instead"%type(top)
trajfiles = traj_inp
# Enforce the input topology to actually be an md.Topology object
top = _enforce_top(top)
# Convert to index (T,2) array if parsed a list or a list of arrays
indexes = _np.vstack(indexes)
# Check that we've been given enough filenames
assert (len(trajfiles) >= indexes[:,0].max()), "traj_inp contains %u trajfiles, " \
"but indexes will ask for file nr. %u"%(len(trajfiles), indexes[0].max())
# Instantiate a list of iterables that will contain mdtraj trajectory objects
trajectory_iterator_list = []
# Cycle only over files that are actually mentioned in "indexes"
file_idxs, file_pos = _np.unique(indexes[:, 0], return_inverse=True)
for ii, ff in enumerate(file_idxs):
# Slice the indexes array (frame column) where file ff was mentioned
frames = indexes[file_pos == ii, 1]
# Store the trajectory object that comes out of _frames_from_file
# directly as an iterator in trajectory_iterator_list
trajectory_iterator_list.append(_itertools.islice(_frames_from_file(trajfiles[ff],
top,
frames, chunksize=chunksize,
verbose=verbose, stride = stride,
copy_not_join=True),
None)
)
# Prepare the trajectory object
traj = _preallocate_empty_trajectory(top, indexes.shape[0])
# Iterate directly over the index of files and pick the trajectory that you need from the iterator list
for ii, traj_idx in enumerate(file_pos):
# Append the trajectory from the respective list of iterators
# and advance that iterator
traj = _copy_traj_attributes(traj, trajectory_iterator_list[traj_idx].next(), ii)
# Return to memory as an mdtraj trajectory object
if outfile is None:
return traj
# or to disk as a molecular trajectory file
else:
traj.save(outfile)
_logger.info("Created file %s" % outfile)
def save_trajs(traj_inp, indexes, prefix = 'set_', fmt = None, outfiles = None,
inmemory = False, stride = 1, verbose = False):
r""" Saves sequences of frames as multiple trajectories.
Extracts a number of specified sequences of time/trajectory indexes from the input loader
and saves them in a set of molecular dynamics trajectories.
The output filenames are obtained by prefix + str(n) + .fmt, where n counts the output
trajectory and extension is either set by the user, or else determined from the input.
Example: When the input is in dcd format, and indexes is a list of length 3, the output will
by default go to files "set_1.dcd", "set_2.dcd", "set_3.dcd". If you want files to be stored
in a specific subfolder, simply specify the relative path in the prefix, e.g. prefix='~/macrostates/\pcca_'
Parameters
----------
traj_inp : :py:class:`pyemma.coordinates.data.feature_reader.FeatureReader`
A data source as provided by Please use :py:func:`pyemma.coordinates.source` to construct it.
indexes : list of ndarray(T_i, 2)
A list of N arrays, each of size (T_n x 2) for writing N trajectories of T_i time steps.
Each row contains two indexes (i, t), where i is the index of the trajectory from the input
and t is the index of the time step within the trajectory.
prefix : str, optional, default = `set_`
output filename prefix. Can include an absolute or relative path name.
fmt : str, optional, default = None
Outpuf file format. By default, the file extension and format. It will be determined from the input. If a
different format is desired, specify the corresponding file extension here without a dot, e.g. "dcd" or "xtc".
outfiles : list of str, optional, default = None
A list of output filenames. When given, this will override the settings of prefix and fmt, and output
will be written to these files.
inmemory : Boolean, default = False (untested for large files)
Instead of internally calling traj_save for every (T_i,2) array in "indexes", only one call is made. Internally,
this generates a potentially large molecular trajectory object in memory that is subsequently sliced into the
files of "outfiles". Should be faster for large "indexes" arrays and large files, though it is quite memory
intensive. The optimal situation is to avoid streaming two times through a huge file for "indexes" of type:
indexes = [[1 4000000],[1 4000001]]
stride : integer, default is 1
This parameter informs :py:func:`save_trajs` about the stride used in the indexes variable. Typically, the variable
indexes contains frame-indexes that match exactly the frames of the files contained in traj_inp.trajfiles.
However, in certain situations, that might not be the case. Examples of these situations are cases in
which stride value != 1 was used when reading/featurizing/transforming/discretizing the files contained in
traj_inp.trajfiles.
verbose : boolean, default is False
Verbose output while looking for "indexes" in the "traj_inp.trajfiles"
Returns
-------
outfiles : list of str
The list of absolute paths that the output files have been written to.
"""
# Make sure indexes is iterable
assert _types.is_iterable(indexes), "Indexes must be an iterable of matrices."
# only if 2d-array, convert into a list
if isinstance(indexes, _np.ndarray):
if indexes.ndim == 2:
indexes = [indexes]
# Make sure the elements of that lists are arrays, and that they are shaped properly
for i_indexes in indexes:
assert isinstance(i_indexes, _np.ndarray), "The elements in the 'indexes' variable must be numpy.ndarrays"
assert i_indexes.ndim == 2, \
"The elements in the 'indexes' variable must have ndim = 2, and not %u" % i_indexes.ndim
assert i_indexes.shape[1] == 2, \
"The elements in the 'indexes' variable must be of shape (T_i,2), and not (%u,%u)" % i_indexes.shape
# Determine output format of the molecular trajectory file
if fmt is None:
import os
_, fmt = os.path.splitext(traj_inp.trajfiles[0])
else:
fmt = '.' + fmt
# Prepare the list of outfiles before the loop
if outfiles is None:
outfiles = []
for ii in xrange(len(indexes)):
outfiles.append(prefix + '%06u' % ii + fmt)
# Check that we have the same name of outfiles as (T, 2)-indexes arrays
if len(indexes) != len(outfiles):
raise Exception('len(indexes) (%s) does not match len(outfiles) (%s)' % (len(indexes), len(outfiles)))
# This implementation looks for "i_indexes" separately, and thus one traj_inp.trajfile
# might be accessed more than once (less memory intensive)
if not inmemory:
for i_indexes, outfile in _itertools.izip(indexes, outfiles):
# TODO: use **kwargs to parse to save_traj
save_traj(traj_inp, i_indexes, outfile, stride = stride, verbose=verbose)
# This implementation is "one file - one pass" but might temporally create huge memory objects
else:
traj = save_traj(traj_inp, indexes, outfile=None, stride = stride, verbose=verbose)
i_idx = 0
for i_indexes, outfile in _itertools.izip(indexes, outfiles):
# Create indices for slicing the mdtraj trajectory object
f_idx = i_idx + len(i_indexes)
# print i_idx, f_idx
traj[i_idx:f_idx].save(outfile)
_logger.info("Created file %s" % outfile)
# update the initial frame index
i_idx = f_idx
return outfiles
# =========================================================================
#
# TRANSFORMATION ALGORITHMS
#
# =========================================================================
def _get_input_stage(previous_stage):
# this is a pipelining stage, so let's parametrize from it
if isinstance(previous_stage, _Transformer):
inputstage = previous_stage
# second option: data is array or list of arrays
else:
data = _types.ensure_traj_list(previous_stage)
inputstage = _DataInMemory(data)
return inputstage
def _param_stage(previous_stage, this_stage, stride=1):
r""" Parametrizes the given pipelining stage if a valid source is given.
Parameters
----------
source : one of the following: None, Transformer (subclass), ndarray, list of ndarrays
data source from which this transformer will be parametrized. If None,
there is no input data and the stage will be returned without any other action.
stage : the transformer object to be parametrized given the source input.
"""
# no input given - nothing to do
if previous_stage is None:
return this_stage
inputstage = _get_input_stage(previous_stage)
# parametrize transformer
this_stage.data_producer = inputstage
this_stage.chunksize = inputstage.chunksize
this_stage.parametrize(stride=stride)
return this_stage
def pca(data=None, dim=2, var_cutoff=1.0, stride=1, mean=None):
r""" Principal Component Analysis (PCA).
PCA is a linear transformation method that finds coordinates of maximal variance.
A linear projection onto the principal components thus makes a minimal error in terms
of variation in the data. Note, however, that this method is not optimal
for Markov model construction because for that purpose the main objective is to
preserve the slow processes which can sometimes be associated with small variance.
It estimates a PCA transformation from data. When input data is given as an
argument, the estimation will be carried out right away, and the resulting
object can be used to obtain eigenvalues, eigenvectors or project input data
onto the principal components. If data is not given, this object is an
empty estimator and can be put into a :func:`pipeline` in order to use PCA
in streaming mode.
Parameters
----------
data : ndarray (T, d) or list of ndarray (T_i, d) or a reader created by source function
data array or list of data arrays. T or T_i are the number of time steps in a
trajectory. When data is given, the PCA is immediately parametrized by estimating
the covariance matrix and computing its eigenvectors.
dim : int, optional, default -1
the number of dimensions (principal components) to project onto. A call to the
:func:`map <pyemma.coordinates.transform.PCA.map>` function reduces the d-dimensional
input to only dim dimensions such that the data preserves the maximum possible variance
amongst dim-dimensional linear projections.
-1 means all numerically available dimensions will be used unless reduced by var_cutoff.
Setting dim to a positive value is exclusive with var_cutoff.
var_cutoff : float in the range [0,1], optional, default 1
Determines the number of output dimensions by including dimensions until their cumulative kinetic variance
exceeds the fraction subspace_variance. var_cutoff=1.0 means all numerically available dimensions
(see epsilon) will be used, unless set by dim. Setting var_cutoff smaller than 1.0 is exclusive with dim
stride : int, optional, default = 1
If set to 1, all input data will be used for estimation. Note that this could cause this calculation
to be very slow for large data sets. Since molecular dynamics data is usually
correlated at short timescales, it is often sufficient to estimate transformations at a longer stride.
Note that the stride option in the get_output() function of the returned object is independent, so
you can parametrize at a long stride, and still map all frames through the transformer.
mean : ndarray, optional, default None
Optionally pass pre-calculated means to avoid their re-computation.
The shape has to match the input dimension.
Returns
-------
pca : a :class:`PCA<pyemma.coordinates.transform.PCA>` transformation object
Object for Principle component analysis (PCA) analysis.
It contains PCA eigenvalues and eigenvectors, and the projection of input data to the dominant PCA
Notes
-----
Given a sequence of multivariate data :math:`X_t`,
computes the mean-free covariance matrix.
.. math:: C = (X - \mu)^T (X - \mu)
and solves the eigenvalue problem
.. math:: C r_i = \sigma_i r_i,
where :math:`r_i` are the principal components and :math:`\sigma_i` are
their respective variances.
When used as a dimension reduction method, the input data is projected onto
the dominant principal components.
See `Wiki page <http://en.wikipedia.org/wiki/Principal_component_analysis>`_ for more theory and references.
Examples
--------
Create some input data:
>>> import numpy as np
>>> from pyemma.coordinates import pca
>>> data = np.ones((1000, 2))
>>> data[0, -1] = 0
Project all input data on the first principal component:
>>> pca_obj = pca(data, dim=1)
>>> pca_obj.get_output() # doctest: +ELLIPSIS
[array([[-0.99900001],
[ 0.001 ],
[ 0.001 ],...
.. autoclass:: pyemma.coordinates.transform.pca.PCA
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.transform.pca.PCA
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.transform.pca.PCA
:attributes:
See also
--------
:class:`PCA <pyemma.coordinates.transform.PCA>` : pca object
:func:`tica <pyemma.coordinates.tica>` : for time-lagged independent component analysis
References
----------
.. [1] Hotelling, H. 1933.
Analysis of a complex of statistical variables into principal components.
J. Edu. Psych. 24, 417-441 and 498-520.
"""
if mean is not None:
data = _get_input_stage(data)
indim = data.dimension()
mean = _types.ensure_ndarray(mean, shape=(indim,), dtype=_np.float)
res = _PCA(dim=dim, var_cutoff=var_cutoff)
return _param_stage(data, res, stride=stride)
def tica(data=None, lag=10, dim=-1, var_cutoff=1.0, kinetic_map=False, stride=1,
force_eigenvalues_le_one=False, mean=None):
r""" Time-lagged independent component analysis (TICA).
TICA is a linear transformation method. In contrast to PCA, which finds
coordinates of maximal variance, TICA finds coordinates of maximal autocorrelation
at the given lag time. Therefore, TICA is useful in order to find the *slow* components
in a dataset and thus an excellent choice to transform molecular dynamics
data before clustering data for the construction of a Markov model.
When the input data is the result of a Markov process (such as thermostatted
molecular dynamics), TICA finds in fact an approximation to the eigenfunctions and
eigenvalues of the underlying Markov operator [1]_.
It estimates a TICA transformation from *data*. When input data is given as an
argument, the estimation will be carried out straight away, and the resulting
object can be used to obtain eigenvalues, eigenvectors or project input data
onto the slowest TICA components. If no data is given, this object is an
empty estimator and can be put into a :func:`pipeline` in order to use TICA
in the streaming mode.
Parameters
----------
data : ndarray (T, d) or list of ndarray (T_i, d) or a reader created by source function
array with the data, if available. When given, the TICA transformation
is immediately computed and can be used to transform data.
lag : int, optional, default = 10
the lag time, in multiples of the input time step
dim : int, optional, default -1
the number of dimensions (independent components) to project onto. A call to the
:func:`map <pyemma.coordinates.transform.TICA.map>` function reduces the d-dimensional
input to only dim dimensions such that the data preserves the maximum possible autocorrelation
amongst dim-dimensional linear projections.
-1 means all numerically available dimensions will be used unless reduced by var_cutoff.
Setting dim to a positive value is exclusive with var_cutoff.
var_cutoff : float in the range [0,1], optional, default 1
Determines the number of output dimensions by including dimensions until their cumulative kinetic variance
exceeds the fraction subspace_variance. var_cutoff=1.0 means all numerically available dimensions
(see epsilon) will be used, unless set by dim. Setting var_cutoff smaller than 1.0 is exclusive with dim
kinetic_map : bool, optional, default False
Eigenvectors will be scaled by eigenvalues. As a result, Euclidean distances in the transformed data
approximate kinetic distances [4]_. This is a good choice when the data is further processed by clustering.
stride : int, optional, default = 1
If set to 1, all input data will be used for estimation. Note that this could cause this calculation
to be very slow for large data sets. Since molecular dynamics data is usually
correlated at short timescales, it is often sufficient to estimate transformations at a longer stride.
Note that the stride option in the get_output() function of the returned object is independent, so
you can parametrize at a long stride, and still map all frames through the transformer.
force_eigenvalues_le_one : boolean
Compute covariance matrix and time-lagged covariance matrix such
that the generalized eigenvalues are always guaranteed to be <= 1.
mean : ndarray, optional, default None
Optionally pass pre-calculated means to avoid their re-computation.
The shape has to match the input dimension.
Returns
-------
tica : a :class:`TICA <pyemma.coordinates.transform.TICA>` transformation object
Object for time-lagged independent component (TICA) analysis.
it contains TICA eigenvalues and eigenvectors, and the projection of input data to the dominant TICA
Notes
-----
Given a sequence of multivariate data :math:`X_t`, it computes the mean-free
covariance and time-lagged covariance matrix:
.. math::
C_0 &= (X_t - \mu)^T (X_t - \mu) \\
C_{\tau} &= (X_t - \mu)^T (X_t + \tau - \mu)
and solves the eigenvalue problem
.. math:: C_{\tau} r_i = C_0 \lambda_i r_i,
where :math:`r_i` are the independent components and :math:`\lambda_i` are
their respective normalized time-autocorrelations. The eigenvalues are
related to the relaxation timescale by
.. math::
t_i = -\frac{\tau}{\ln |\lambda_i|}.
When used as a dimension reduction method, the input data is projected
onto the dominant independent components.
TICA was originally introduced for signal processing in [2]_. It was introduced
to molecular dynamics and as a method for the construction of Markov models in
[1]_ and [3]_. It was shown in [1]_ that when applied to molecular dynamics data,
TICA is an approximation to the eigenvalues and eigenvectors of the true underlying
dynamics.
Examples
--------
Invoke TICA transformation with a given lag time and output dimension:
>>> import numpy as np
>>> from pyemma.coordinates import tica
>>> data = np.random.random((100,3))
>>> projected_data = tica(data, lag=2, dim=1).get_output()[0]
For a brief explaination why TICA outperforms PCA to extract a good reaction
coordinate have a look `here
<http://docs.markovmodel.org/lecture_tica.html#Example:-TICA-versus-PCA-in-a-stretched-double-well-potential>`_.
.. autoclass:: pyemma.coordinates.transform.tica.TICA
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.transform.tica.TICA
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.transform.tica.TICA
:attributes:
See also
--------
:class:`TICA <pyemma.coordinates.transform.TICA>` : tica object
:func:`pca <pyemma.coordinates.pca>` : for principal component analysis
References
----------
.. [1] Perez-Hernandez G, F Paul, T Giorgino, G De Fabritiis and F Noe. 2013.
Identification of slow molecular order parameters for Markov model construction
J. Chem. Phys. 139, 015102. doi:10.1063/1.4811489
.. [2] L. Molgedey and H. G. Schuster. 1994.
Separation of a mixture of independent signals using time delayed correlations
Phys. Rev. Lett. 72, 3634.
.. [3] Schwantes C, V S Pande. 2013.
Improvements in Markov State Model Construction Reveal Many Non-Native Interactions in the Folding of NTL9
J. Chem. Theory. Comput. 9, 2000-2009. doi:10.1021/ct300878a
.. [4] Noe, F. and C. Clementi. 2015.
Kinetic distance and kinetic maps from molecular dynamics simulation
(in preparation).
"""
if mean is not None:
data = _get_input_stage(data)
indim = data.dimension()
mean = _types.ensure_ndarray(mean, shape=(indim,), dtype=_np.float)
res = _TICA(lag, dim=dim, var_cutoff=var_cutoff, kinetic_map=kinetic_map,
force_eigenvalues_le_one=force_eigenvalues_le_one, mean=mean)
return _param_stage(data, res, stride=stride)
# =========================================================================
#
# CLUSTERING ALGORITHMS
#
# =========================================================================
@deprecated("Please use pyemma.coordinates.cluster_kmeans()")
def kmeans(data=None, k=100, max_iter=1000, stride=1):
return cluster_kmeans(data, k, max_iter, stride=stride)
def cluster_kmeans(data=None, k=100, max_iter=10, stride=1, metric='euclidean', init_strategy='kmeans++'):
r"""k-means clustering
If data is given, it performs a k-means clustering and then assigns the data using a Voronoi discretization.
It returns a :class:`KmeansClustering <pyemma.coordinates.clustering.KmeansClustering>` object
that can be used to extract the discretized
data sequences, or to assign other data points to the same partition. If data is not given, an
empty :class:`KmeansClustering <pyemma.coordinates.clustering.KmeansClustering>` will be created that
still needs to be parametrized, e.g. in a :func:`pipeline`.
.. seealso:: **Theoretical background**: `Wiki page <http://en.wikipedia.org/wiki/K-means_clustering>`_
Parameters
----------
data: ndarray (T, d) or list of ndarray (T_i, d) or a reader created by source function
input data, if available in memory
k: int
the number of cluster centers
stride : int, optional, default = 1
If set to 1, all input data will be used for estimation. Note that this could cause this calculation
to be very slow for large data sets. Since molecular dynamics data is usually
correlated at short timescales, it is often sufficient to estimate transformations at a longer stride.
Note that the stride option in the get_output() function of the returned object is independent, so
you can parametrize at a long stride, and still map all frames through the transformer.
metric : str
metric to use during clustering ('euclidean', 'minRMSD')
init_strategy : str
determines if the initial cluster centers are chosen according to the kmeans++-algorithm
or uniformly distributed
Returns
-------
kmeans : a :class:`KmeansClustering <pyemma.coordinates.clustering.KmeansClustering>` clustering object
Object for kmeans clustering.
It holds discrete trajectories and cluster center information.
Examples
--------
>>> import numpy as np
>>> import pyemma.coordinates as coor
>>> traj_data = [np.random.random((100, 3)), np.random.random((100,3))]
>>> cluster_obj = coor.cluster_kmeans(traj_data, k=20, stride=1)
>>> cluster_obj.get_output() # doctest: +ELLIPSIS
[array([...
.. autoclass:: pyemma.coordinates.clustering.kmeans.KmeansClustering
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.clustering.kmeans.KmeansClustering
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.clustering.kmeans.KmeansClustering
:attributes:
"""
res = _KmeansClustering(n_clusters=k, max_iter=max_iter, metric=metric, init_strategy=init_strategy)
return _param_stage(data, res, stride=stride)
@deprecated("Please use pyemma.coordinates.cluster_uniform_time()")
def uniform_time(data=None, k=100, stride=1):
return cluster_uniform_time(data, k, stride=stride)
def cluster_uniform_time(data=None, k=100, stride=1, metric='euclidean'):
r"""Uniform time clustering
If given data, performs a clustering that selects data points uniformly in time and then assigns the data
using a Voronoi discretization. Returns a
:class:`UniformTimeClustering <pyemma.coordinates.clustering.UniformTimeClustering>` object
that can be used to extract the discretized data sequences, or to assign other data points to the same partition.
If data is not given, an empty
:class:`UniformTimeClustering <pyemma.coordinates.clustering.UniformTimeClustering>` will be created that
still needs to be parametrized, e.g. in a :func:`pipeline`.
Parameters
----------
data : ndarray (T, d) or list of ndarray (T_i, d) or a reader created by source function
input data, if available in memory
k : int
the number of cluster centers
stride : int, optional, default = 1
If set to 1, all input data will be used for estimation. Note that this could cause this calculation
to be very slow for large data sets. Since molecular dynamics data is usually
correlated at short timescales, it is often sufficient to estimate transformations at a longer stride.
Note that the stride option in the get_output() function of the returned object is independent, so
you can parametrize at a long stride, and still map all frames through the transformer.
Returns
-------
uniformTime : a :class:`UniformTimeClustering <pyemma.coordinates.clustering.UniformTimeClustering>` clustering object
Object for uniform time clustering.
It holds discrete trajectories and cluster center information.
.. autoclass:: pyemma.coordinates.clustering.uniform_time.UniformTimeClustering
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.clustering.uniform_time.UniformTimeClustering
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.clustering.uniform_time.UniformTimeClustering
:attributes:
"""
res = _UniformTimeClustering(k, metric=metric)
return _param_stage(data, res)
@deprecated("Please use pyemma.coordinates.cluster_regspace()")
def regspace(data=None, dmin=-1, max_centers=1000, stride=1):
return cluster_regspace(data, dmin, max_centers, stride=stride)
def cluster_regspace(data=None, dmin=-1, max_centers=1000, stride=1, metric='euclidean'):
r"""Regular space clustering
If given data, it performs a regular space clustering [1]_ and returns a
:class:`RegularSpaceClustering <pyemma.coordinates.clustering.RegularSpaceClustering>` object that
can be used to extract the discretized data sequences, or to assign other data points to the same partition.
If data is not given, an empty
:class:`RegularSpaceClustering <pyemma.coordinates.clustering.RegularSpaceClustering>` will be created
that still needs to be parametrized, e.g. in a :func:`pipeline`.
Regular space clustering is very similar to Hartigan's leader algorithm [2]_. It consists of two passes through
the data. Initially, the first data point is added to the list of centers. For every subsequent data point, if
it has a greater distance than dmin from every center, it also becomes a center. In the second pass, a Voronoi
discretization with the computed centers is used to partition the data.
Parameters
----------
data : ndarray (T, d) or list of ndarray (T_i, d) or a reader created by source function
input data, if available in memory
dmin : float
the minimal distance between cluster centers
max_centers : int (optional), default=1000
If max_centers is reached, the algorithm will stop to find more centers,
but it is possible that parts of the state space are not properly discretized. This will generate a
warning. If that happens, it is suggested to increase dmin such that the number of centers stays below
max_centers.
stride : int, optional, default = 1
If set to 1, all input data will be used for estimation. Note that this could cause this calculation
to be very slow for large data sets. Since molecular dynamics data is usually
correlated at short timescales, it is often sufficient to estimate transformations at a longer stride.
Note that the stride option in the get_output() function of the returned object is independent, so
you can parametrize at a long stride, and still map all frames through the transformer.
metric : str
metric to use during clustering ('euclidean', 'minRMSD')
Returns
-------
regSpace : a :class:`RegularSpaceClustering <pyemma.coordinates.clustering.RegularSpaceClustering>` clustering object
Object for regular space clustering.
It holds discrete trajectories and cluster center information.
.. autoclass:: pyemma.coordinates.clustering.regspace.RegularSpaceClustering
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.clustering.regspace.RegularSpaceClustering
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.clustering.regspace.RegularSpaceClustering
:attributes:
References
----------
.. [1] Prinz J-H, Wu H, Sarich M, Keller B, Senne M, Held M, Chodera JD, Schuette Ch and Noe F. 2011.
Markov models of molecular kinetics: Generation and Validation.
J. Chem. Phys. 134, 174105.
.. [2] Hartigan J. Clustering algorithms.
New York: Wiley; 1975.
"""
if dmin == -1:
raise ValueError("provide a minimum distance for clustering, e.g. 2.0")
res = _RegularSpaceClustering(dmin, max_centers, metric=metric)
return _param_stage(data, res, stride=stride)
@deprecated("Please use pyemma.coordinates.assign_to_centers()")
def assign_centers(data=None, centers=None, stride=1):
return assign_to_centers(data, centers, stride=stride)
def assign_to_centers(data=None, centers=None, stride=1, return_dtrajs=True,
metric='euclidean'):
r"""Assigns data to the nearest cluster centers
Creates a Voronoi partition with the given cluster centers. If given trajectories as data, this function
will by default discretize the trajectories and return discrete trajectories of corresponding lengths.
Otherwise, an assignment object will be returned that can be used to assign data later or can serve
as a pipeline stage.
Parameters
----------
data : ndarray or list of arrays or reader created by source function
data to be assigned
centers : path to file or ndarray or a reader created by source function
cluster centers to use in assignment of data
stride : int, optional, default = 1
If set to 1, all input data will be used for estimation. Note that this could cause this calculation
to be very slow for large data sets. Since molecular dynamics data is usually
correlated at short timescales, it is often sufficient to estimate transformations at a longer stride.
Note that the stride option in the get_output() function of the returned object is independent, so
you can parametrize at a long stride, and still map all frames through the transformer.
return_dtrajs : bool, optional, default = True
If True, it will return the discretized trajectories obtained from assigning the coordinates in the data
input. This will only have effect if data is given. When data is not given or return_dtrajs is False,
the :class:'AssignCenters <_AssignCenters>' object will be returned.
metric : str
metric to use during clustering ('euclidean', 'minRMSD')
Returns
-------
assignment : list of integer arrays or an :class:`AssignCenters <pyemma.coordinates.clustering.AssignCenters>` object
assigned data
Examples
--------
Load data to assign to clusters from 'my_data.csv' by using the cluster
centers from file 'my_centers.csv'
>>> import numpy as np
Generate some random data and choose 10 random centers:
>>> data = np.random.random((100, 3))
>>> cluster_centers = data[np.random.randint(0, 99, size=10)]
>>> dtrajs = assign_to_centers(data, cluster_centers)
>>> print dtrajs # doctest: +ELLIPSIS
[array([...
"""
if centers is None:
raise ValueError('You have to provide centers in form of a filename'
' or NumPy array or a reader created by source function')
res = _AssignCenters(centers, metric=metric)
parametrized_stage = _param_stage(data, res, stride=stride)
if return_dtrajs and data is not None:
return parametrized_stage.dtrajs
return parametrized_stage
|
arokem/PyEMMA
|
pyemma/coordinates/api.py
|
Python
|
bsd-2-clause
| 59,936
|
[
"Amber",
"CHARMM",
"Gromacs",
"LAMMPS",
"MDTraj",
"NAMD",
"NetCDF",
"TINKER"
] |
6e9bc450bd44b6e8624686b268fdf207f56c9e38fbd03b210495afd094cbf29b
|
# coding: utf-8
#!/usr/bin/env python
"""
A convenience script engine to read Gaussian output in a directory tree.
"""
from __future__ import division, print_function
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Jul 9, 2012"
import argparse
import os
import logging
import re
from pymatgen.util.string_utils import str_aligned
from pymatgen.apps.borg.hive import GaussianToComputedEntryDrone
from pymatgen.apps.borg.queen import BorgQueen
import multiprocessing
save_file = "gau_data.gz"
def get_energies(rootdir, reanalyze, verbose, pretty):
if verbose:
FORMAT = "%(relativeCreated)d msecs : %(message)s"
logging.basicConfig(level=logging.INFO, format=FORMAT)
drone = GaussianToComputedEntryDrone(inc_structure=True,
parameters=['filename'])
ncpus = multiprocessing.cpu_count()
logging.info('Detected {} cpus'.format(ncpus))
queen = BorgQueen(drone, number_of_drones=ncpus)
if os.path.exists(save_file) and not reanalyze:
msg = 'Using previously assimilated data from {}. ' + \
'Use -f to force re-analysis'.format(save_file)
queen.load_data(save_file)
else:
queen.parallel_assimilate(rootdir)
msg = 'Results saved to {} for faster reloading.'.format(save_file)
queen.save_data(save_file)
entries = queen.get_data()
entries = sorted(entries, key=lambda x: x.parameters['filename'])
all_data = [(e.parameters['filename'].replace("./", ""),
re.sub("\s+", "", e.composition.formula),
"{}".format(e.parameters['charge']),
"{}".format(e.parameters['spin_mult']),
"{:.5f}".format(e.energy), "{:.5f}".format(e.energy_per_atom),
) for e in entries]
headers = ("Directory", "Formula", "Charge", "Spin Mult.", "Energy",
"E/Atom")
if pretty:
from prettytable import PrettyTable
t = PrettyTable(headers)
t.set_field_align("Directory", "l")
for d in all_data:
t.add_row(d)
print(t)
else:
print(str_aligned(all_data, headers))
print(msg)
desc = '''
Convenient Gaussian run analyzer which can recursively go into a directory
to search results.
Author: Shyue Ping Ong
Version: 1.0
Last updated: Jul 6 2012'''
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('directories', metavar='dir', default='.', type=str,
nargs='*', help='directory to process')
parser.add_argument('-v', '--verbose', dest="verbose",
action='store_const', const=True,
help='verbose mode. Provides detailed output ' +
'on progress.')
parser.add_argument('-p', '--pretty', dest="pretty", action='store_const',
const=True,
help='pretty mode. Uses prettytable to format ' +
'output. Must have prettytable module installed.')
parser.add_argument('-f', '--force', dest="reanalyze",
action='store_const',
const=True,
help='force reanalysis. Typically, gaussian_analyzer' +
' will just reuse a gaussian_analyzer_data.gz if ' +
'present. This forces the analyzer to reanalyze.')
args = parser.parse_args()
for d in args.directories:
get_energies(d, args.reanalyze, args.verbose, args.pretty)
|
rousseab/pymatgen
|
scripts/gaussian_analyzer.py
|
Python
|
mit
| 3,689
|
[
"Gaussian",
"pymatgen"
] |
889c8f801593b8f281be4076cfbbe1b9fe9e8c6342708294a7e4c412adb19539
|
'''
diacamma.condominium tests package
@author: Laurent GAY
@organization: sd-libre.fr
@contact: info@sd-libre.fr
@copyright: 2015 sd-libre.fr
@license: This file is part of Lucterios.
Lucterios is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Lucterios is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Lucterios. If not, see <http://www.gnu.org/licenses/>.
'''
from __future__ import unicode_literals
from shutil import rmtree
from lucterios.framework.test import LucteriosTest
from lucterios.framework.filetools import get_user_dir
from lucterios.framework.model_fields import LucteriosScheduler
from lucterios.CORE.models import Parameter
from lucterios.CORE.parameters import Params
from lucterios.mailing.test_tools import decode_b64
from lucterios.mailing.models import Message
from diacamma.accounting.models import ChartsAccount, FiscalYear, Budget
from diacamma.accounting.views_entries import EntryAccountList
from diacamma.accounting.test_tools import initial_thirds_fr, default_compta_fr, default_costaccounting, initial_thirds_be, default_compta_be
from diacamma.payoff.views import PayoffAddModify, PayableEmail
from diacamma.payoff.test_tools import default_bankaccount_fr, default_bankaccount_be, check_pdfreport
from diacamma.condominium.views_callfunds import CallFundsList, CallFundsAddModify, CallFundsDel, \
CallFundsShow, CallDetailAddModify, CallFundsTransition, CallFundsPrint, CallFundsAddCurrent, CallFundsPayableEmail
from diacamma.condominium.test_tools import default_setowner_fr, old_accounting, default_setowner_be, add_test_callfunds
from diacamma.condominium.models import Set, CallFunds
from diacamma.condominium.views import PaymentVentilatePay, OwnerShow
class CallFundsTest(LucteriosTest):
def setUp(self):
initial_thirds_fr()
LucteriosTest.setUp(self)
default_compta_fr(with12=False)
default_costaccounting()
default_bankaccount_fr()
default_setowner_fr()
rmtree(get_user_dir(), True)
def test_create(self):
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_grid_equal('callfunds', {"num": "N°", "date": "date", "owner": "propriétaire", "comment": "commentaire", "total": "total"}, 0)
self.assert_json_equal('', '#callfunds/headers/@4/@0', 'total')
self.assert_json_equal('', '#callfunds/headers/@4/@2', 'C2EUR')
self.factory.xfer = CallFundsAddModify()
self.calljson('/diacamma.condominium/callFundsAddModify', {}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsAddModify')
self.assert_count_equal('', 4)
self.factory.xfer = CallFundsAddModify()
self.calljson('/diacamma.condominium/callFundsAddModify', {'SAVE': 'YES', "date": '2015-06-10', "comment": 'abc 123'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddModify')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_grid_equal('callfunds', {"num": "N°", "date": "date", "owner": "propriétaire", "comment": "commentaire", "total": "total", "supporting.total_rest_topay": "reste à payer"}, 0)
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_grid_equal('callfunds', {"num": "N°", "date": "date", "owner": "propriétaire", "comment": "commentaire", "total": "total"}, 1)
self.factory.xfer = CallFundsDel()
self.calljson('/diacamma.condominium/callFundsDel', {'CONFIRME': 'YES', "callfunds": 1}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsDel')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 0)
def test_add(self):
self.factory.xfer = CallFundsAddModify()
self.calljson('/diacamma.condominium/callFundsAddModify', {'SAVE': 'YES', "date": '2015-06-10', "comment": 'abc 123'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddModify')
self.factory.xfer = CallFundsShow()
self.calljson('/diacamma.condominium/callFundsShow', {'callfunds': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsShow')
self.assert_count_equal('', 8)
self.assertEqual(len(self.json_actions), 2)
self.assert_grid_equal('calldetail', {"type_call_ex": "type d'appel", "set": "catégorie de charges", "designation": "désignation", "set.total_part": "somme des tantièmes", "price": "montant", }, 0)
self.assert_json_equal('', '#calldetail/headers/@4/@0', 'price')
self.assert_json_equal('', '#calldetail/headers/@4/@2', 'C2EUR')
self.assert_count_equal('#calldetail/actions', 3)
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'callfunds': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callDetailAddModify')
self.assert_count_equal('', 5)
self.assert_json_equal('SELECT', 'set', '1')
self.assert_json_equal('FLOAT', 'price', '250.00')
self.assert_select_equal('type_call', {0: 'charge courante', 1: 'charge exceptionnelle', 2: 'avance de fonds', 4: 'fonds travaux'})
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'callfunds': 1, 'type_call': 0, 'set': 2}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callDetailAddModify')
self.assert_json_equal('SELECT', 'set', '2')
self.assert_json_equal('FLOAT', 'price', '25.00')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'callfunds': 1, 'type_call': 1, 'set': 3}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callDetailAddModify')
self.assert_json_equal('SELECT', 'set', '3')
self.assert_json_equal('FLOAT', 'price', '500.00')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, 'type_call': 0, 'set': 1, 'price': '340.00', 'comment': 'set 1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, 'type_call': 0, 'set': 2, 'price': '25.00', 'comment': 'set 2'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, 'type_call': 1, 'set': 3, 'price': '100.00', 'comment': 'set 2'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallFundsShow()
self.calljson('/diacamma.condominium/callFundsShow', {'callfunds': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsShow')
self.assertEqual(len(self.json_actions), 3)
self.assert_count_equal('calldetail', 3)
self.assert_json_equal('', 'calldetail/@0/type_call_ex', 'charge courante')
self.assert_json_equal('', 'calldetail/@0/set', '[1] AAA')
self.assert_json_equal('', 'calldetail/@0/price', 340.00)
self.assert_json_equal('', 'calldetail/@1/type_call_ex', 'charge courante')
self.assert_json_equal('', 'calldetail/@1/set', '[2] BBB')
self.assert_json_equal('', 'calldetail/@1/price', 25.00)
self.assert_json_equal('', 'calldetail/@2/type_call_ex', 'charge exceptionnelle')
self.assert_json_equal('', 'calldetail/@2/set', '[3] CCC')
self.assert_json_equal('', 'calldetail/@2/price', 100.00)
self.assert_json_equal('LABELFORM', 'total', 465.00)
self.factory.xfer = CallFundsAddModify()
self.calljson('/diacamma.condominium/callFundsAddModify', {'SAVE': 'YES', "date": '2015-06-10', "comment": 'abc 123'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'callfunds': 2}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callDetailAddModify')
self.assert_count_equal('', 5)
self.assert_json_equal('SELECT', 'set', '1')
self.assert_json_equal('FLOAT', 'price', '220.00')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'callfunds': 2, 'type_call': 1, 'set': 3}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callDetailAddModify')
self.assert_json_equal('SELECT', 'set', '3')
self.assert_json_equal('FLOAT', 'price', '400.00')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'callfunds': 2, 'type_call': 2, 'set': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callDetailAddModify')
self.assert_json_equal('SELECT', 'set', '1')
self.assert_json_equal('FLOAT', 'price', '0.00')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'callfunds': 2, 'type_call': 4, 'set': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callDetailAddModify')
self.assert_json_equal('SELECT', 'set', '1')
self.assert_json_equal('FLOAT', 'price', '50.00')
def test_add_default_current(self):
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 0)
self.assertEqual(len(self.json_actions), 2)
self.factory.xfer = CallFundsAddCurrent()
self.calljson('/diacamma.condominium/callFundsAddCurrent', {'CONFIRME': 'YES'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddCurrent')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 4)
self.assert_json_equal('', 'callfunds/@0/date', "2015-01-01")
self.assert_json_equal('', 'callfunds/@1/date', "2015-04-01")
self.assert_json_equal('', 'callfunds/@2/date', "2015-07-01")
self.assert_json_equal('', 'callfunds/@3/date', "2015-10-01")
self.assert_json_equal('', 'callfunds/@0/total', 275.00)
self.assert_json_equal('', 'callfunds/@1/total', 275.00)
self.assert_json_equal('', 'callfunds/@2/total', 275.00)
self.assert_json_equal('', 'callfunds/@3/total', 275.00)
self.assertEqual(len(self.json_actions), 1)
self.factory.xfer = CallFundsDel()
self.calljson('/diacamma.condominium/callFundsDel', {'callfunds': '2;4'}, False)
self.assert_observer('core.dialogbox', 'diacamma.condominium', 'callFundsDel')
self.assert_json_equal('', 'text', "Voulez-vous supprimer ces 2 appels de fonds?")
self.factory.xfer = CallFundsDel()
self.calljson('/diacamma.condominium/callFundsDel', {'callfunds': '2;4', 'CONFIRME': 'YES'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsDel')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 2)
def test_add_default_current_monthly(self):
Parameter.change_value('condominium-mode-current-callfunds', 1)
Params.clear()
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 0)
self.assertEqual(len(self.json_actions), 2)
self.factory.xfer = CallFundsAddCurrent()
self.calljson('/diacamma.condominium/callFundsAddCurrent', {'CONFIRME': 'YES'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddCurrent')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 12)
self.assert_json_equal('', 'callfunds/@0/date', "2015-01-01")
self.assert_json_equal('', 'callfunds/@1/date', "2015-02-01")
self.assert_json_equal('', 'callfunds/@2/date', "2015-03-01")
self.assert_json_equal('', 'callfunds/@3/date', "2015-04-01")
self.assert_json_equal('', 'callfunds/@4/date', "2015-05-01")
self.assert_json_equal('', 'callfunds/@5/date', "2015-06-01")
self.assert_json_equal('', 'callfunds/@6/date', "2015-07-01")
self.assert_json_equal('', 'callfunds/@7/date', "2015-08-01")
self.assert_json_equal('', 'callfunds/@8/date', "2015-09-01")
self.assert_json_equal('', 'callfunds/@9/date', "2015-10-01")
self.assert_json_equal('', 'callfunds/@10/date', "2015-11-01")
self.assert_json_equal('', 'callfunds/@11/date', "2015-12-01")
self.assert_json_equal('', 'callfunds/@0/total', 91.66)
self.assert_json_equal('', 'callfunds/@1/total', 91.66)
self.assert_json_equal('', 'callfunds/@2/total', 91.66)
self.assert_json_equal('', 'callfunds/@3/total', 91.66)
self.assert_json_equal('', 'callfunds/@4/total', 91.66)
self.assert_json_equal('', 'callfunds/@5/total', 91.66)
self.assert_json_equal('', 'callfunds/@6/total', 91.66)
self.assert_json_equal('', 'callfunds/@7/total', 91.66)
self.assert_json_equal('', 'callfunds/@8/total', 91.66)
self.assert_json_equal('', 'callfunds/@9/total', 91.66)
self.assert_json_equal('', 'callfunds/@10/total', 91.66)
self.assert_json_equal('', 'callfunds/@11/total', 91.66)
self.assertEqual(len(self.json_actions), 1)
def test_valid_current(self):
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 0)
self.assert_json_equal('LABELFORM', 'result', [0.00, 0.00, 0.00, 0.00, 0.00])
self.factory.xfer = CallFundsAddModify()
self.calljson('/diacamma.condominium/callFundsAddModify', {'SAVE': 'YES', "date": '2015-06-10', "comment": 'abc 123'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, "type_call": 0, 'set': 1, 'price': '250.00', 'designation': 'set 1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, "type_call": 0, 'set': 2, 'price': '25.00', 'designation': 'set 2'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 1)
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 0)
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 2}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 0)
self.factory.xfer = CallFundsTransition()
self.calljson('/diacamma.condominium/callFundsTransition', {'CONFIRME': 'YES', 'callfunds': 1, 'TRANSITION': 'valid'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsTransition')
CallFunds.objects.get(id=2).calldetail_set.first().entry.closed()
self.factory.xfer = CallFundsDel()
self.calljson('/diacamma.condominium/callFundsDel', {'CONFIRME': 'YES', "callfunds": 2}, False)
self.assert_observer('core.exception', 'diacamma.condominium', 'callFundsDel')
self.factory.xfer = CallFundsShow()
self.calljson('/diacamma.condominium/callFundsShow', {'callfunds': 3}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsShow')
self.assert_count_equal('', 11)
self.assertEqual(len(self.json_actions), 3)
self.assert_count_equal('calldetail', 1) # nb=6
self.assert_count_equal('#calldetail/actions', 0)
self.assert_json_equal('', '#calldetail/headers/@3/@0', 'total_amount')
self.assert_json_equal('', '#calldetail/headers/@3/@2', 'C2EUR')
self.assert_json_equal('', '#calldetail/headers/@6/@0', 'price')
self.assert_json_equal('', '#calldetail/headers/@6/@2', 'C2EUR')
self.assert_json_equal('', 'calldetail/@0/type_call_ex', 'charge courante')
self.assert_json_equal('', 'calldetail/@0/set', "[1] AAA")
self.assert_json_equal('', 'calldetail/@0/designation', "set 1")
self.assert_json_equal('', 'calldetail/@0/total_amount', 250.00)
self.assert_json_equal('', 'calldetail/@0/set.total_part', "100")
self.assert_json_equal('', 'calldetail/@0/owner_part', "35.00")
self.assert_json_equal('', 'calldetail/@0/price', 87.50)
self.assert_count_equal('payoff', 0)
self.assert_count_equal('#payoff/actions', 0)
self.assert_json_equal('LABELFORM', 'status', 1)
self.assert_json_equal('LABELFORM', 'total', 87.50)
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 0)
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 2}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 0)
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 3)
self.assert_json_equal('', 'callfunds/@0/owner', "Minimum") # 250*45%+25*75%
self.assert_json_equal('', 'callfunds/@0/total', 131.25)
self.assert_json_equal('', 'callfunds/@1/owner', "Dalton William") # 250*35%+25*0%
self.assert_json_equal('', 'callfunds/@1/total', 87.50)
self.assert_json_equal('', 'callfunds/@2/owner', "Dalton Joe") # 250*20%+25*25%
self.assert_json_equal('', 'callfunds/@2/total', 56.25)
self.factory.xfer = CallFundsPrint()
self.calljson('/diacamma.condominium/callFundsPrint', {'callfunds': 3}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsPrint')
self.assert_json_equal('SELECT', 'MODEL', '8')
self.factory.xfer = CallFundsPrint()
self.calljson('/diacamma.condominium/callFundsPrint', {'callfunds': 3, 'PRINT_MODE': 0, 'MODEL': 8}, False)
self.assert_observer('core.print', 'diacamma.condominium', 'callFundsPrint')
self.save_pdf()
check_pdfreport(self, 'CallFundsSupporting', 5, False) # CallFunds #3 => CallFundsSupporting #5
self.factory.xfer = CallFundsPrint()
self.calljson('/diacamma.condominium/callFundsPrint', {'callfunds': 3, 'PRINT_PERSITENT': True, 'PRINT_MODE': 0, 'MODEL': 8}, False)
self.assert_observer('core.print', 'diacamma.condominium', 'callFundsPrint')
self.save_pdf()
check_pdfreport(self, 'CallFundsSupporting', 5, True) # CallFunds #3 => CallFundsSupporting #5
self.factory.xfer = CallFundsTransition()
self.calljson('/diacamma.condominium/callFundsTransition', {'CONFIRME': 'YES', 'callfunds': 3, 'TRANSITION': 'close'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsTransition')
self.factory.xfer = CallFundsDel()
self.calljson('/diacamma.condominium/callFundsDel', {'CONFIRME': 'YES', "callfunds": 3}, False)
self.assert_observer('core.exception', 'diacamma.condominium', 'callFundsDel')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 0)
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 2}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 1)
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 2)
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 8)
self.assert_json_equal('', 'entryline/@0/costaccounting', None)
self.assert_json_equal('', 'entryline/@0/entry_account', '[4501 Minimum]')
self.assert_json_equal('', 'entryline/@1/costaccounting', '[1] AAA 2015')
self.assert_json_equal('', 'entryline/@1/entry_account', '[701] 701')
self.assert_json_equal('', 'entryline/@2/costaccounting', '[2] BBB 2015')
self.assert_json_equal('', 'entryline/@2/entry_account', '[701] 701')
self.assert_json_equal('', 'entryline/@3/costaccounting', None)
self.assert_json_equal('', 'entryline/@3/entry_account', '[4501 Dalton William]')
self.assert_json_equal('', 'entryline/@4/costaccounting', '[1] AAA 2015')
self.assert_json_equal('', 'entryline/@4/entry_account', '[701] 701')
self.assert_json_equal('', 'entryline/@5/costaccounting', None)
self.assert_json_equal('', 'entryline/@5/entry_account', '[4501 Dalton Joe]')
self.assert_json_equal('', 'entryline/@6/costaccounting', '[1] AAA 2015')
self.assert_json_equal('', 'entryline/@6/entry_account', '[701] 701')
self.assert_json_equal('', 'entryline/@7/costaccounting', '[2] BBB 2015')
self.assert_json_equal('', 'entryline/@7/entry_account', '[701] 701')
self.assert_json_equal('LABELFORM', 'result', [275.00, 0.00, 275.00, 0.00, 0.00])
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '3', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 8)
self.factory.xfer = PayoffAddModify()
self.calljson('/diacamma.payoff/payoffAddModify', {'SAVE': 'YES', 'supporting': 4, 'amount': '100.0', 'payer': "Minimum", 'date': '2015-06-12', 'mode': 0, 'reference': 'abc', 'bank_account': 0}, False)
self.assert_observer('core.acknowledge', 'diacamma.payoff', 'payoffAddModify')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 10)
self.assert_json_equal('', 'entryline/@8/entry_account', '[4501 Minimum]')
self.assert_json_equal('', 'entryline/@8/costaccounting', None)
self.assert_json_equal('', 'entryline/@9/entry_account', '[531] 531')
self.assert_json_equal('', 'entryline/@9/costaccounting', None)
def test_valid_exceptional(self):
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 0)
self.assert_json_equal('LABELFORM', 'result', [0.00, 0.00, 0.00, 0.00, 0.00])
self.factory.xfer = CallFundsAddModify()
self.calljson('/diacamma.condominium/callFundsAddModify', {'SAVE': 'YES', "date": '2015-06-10', "comment": 'abc 123'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, "type_call": 1, 'set': 3, 'price': '250.00', 'comment': 'set 3'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallFundsShow()
self.calljson('/diacamma.condominium/callFundsShow', {'callfunds': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsShow')
self.assert_json_equal('', 'calldetail/@0/type_call_ex', 'charge exceptionnelle')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 1)
self.factory.xfer = CallFundsTransition()
self.calljson('/diacamma.condominium/callFundsTransition', {'CONFIRME': 'YES', 'callfunds': 1, 'TRANSITION': 'valid'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsTransition')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 3)
self.assert_json_equal('', 'callfunds/@0/owner', "Minimum") # 250*45%
self.assert_json_equal('', 'callfunds/@0/total', 112.50)
self.assert_json_equal('', 'callfunds/@1/owner', "Dalton William") # 250*35%
self.assert_json_equal('', 'callfunds/@1/total', 87.50)
self.assert_json_equal('', 'callfunds/@2/owner', "Dalton Joe") # 250*20%
self.assert_json_equal('', 'callfunds/@2/total', 50.00)
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 6)
self.assert_json_equal('', 'entryline/@0/costaccounting', None)
self.assert_json_equal('', 'entryline/@0/entry_account', '[120] 120')
self.assert_json_equal('', 'entryline/@1/costaccounting', None)
self.assert_json_equal('', 'entryline/@1/entry_account', '[4502 Minimum]')
self.assert_json_equal('', 'entryline/@2/costaccounting', None)
self.assert_json_equal('', 'entryline/@2/entry_account', '[120] 120')
self.assert_json_equal('', 'entryline/@3/costaccounting', None)
self.assert_json_equal('', 'entryline/@3/entry_account', '[4502 Dalton William]')
self.assert_json_equal('', 'entryline/@4/costaccounting', None)
self.assert_json_equal('', 'entryline/@4/entry_account', '[120] 120')
self.assert_json_equal('', 'entryline/@5/costaccounting', None)
self.assert_json_equal('', 'entryline/@5/entry_account', '[4502 Dalton Joe]')
self.assert_json_equal('LABELFORM', 'result',
[0.00, 0.00, 0.00, 0.00, 0.00])
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '3', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 6)
self.factory.xfer = PayoffAddModify()
self.calljson('/diacamma.payoff/payoffAddModify', {'SAVE': 'YES', 'supporting': 4, 'amount': '100.0', 'payer': "Minimum", 'date': '2015-06-12', 'mode': 0, 'reference': 'abc', 'bank_account': 0}, False)
self.assert_observer('core.acknowledge', 'diacamma.payoff', 'payoffAddModify')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 8)
self.assert_json_equal('', 'entryline/@6/entry_account', '[4502 Minimum]')
self.assert_json_equal('', 'entryline/@7/entry_account', '[531] 531')
self.assert_json_equal('LABELFORM', 'result',
[0.00, 0.00, 0.00, 100.00, 0.00])
def test_valid_advance(self):
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 0)
self.assert_json_equal('LABELFORM', 'result', [0.00, 0.00, 0.00, 0.00, 0.00])
self.factory.xfer = CallFundsAddModify()
self.calljson('/diacamma.condominium/callFundsAddModify', {'SAVE': 'YES', "date": '2015-06-10', "comment": 'abc 123'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, "type_call": 2, 'set': 1, 'price': '100.00', 'comment': 'set 1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallFundsShow()
self.calljson('/diacamma.condominium/callFundsShow', {'callfunds': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsShow')
self.assert_json_equal('', 'calldetail/@0/type_call_ex', 'avance de fonds')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 1)
self.factory.xfer = CallFundsTransition()
self.calljson('/diacamma.condominium/callFundsTransition', {'CONFIRME': 'YES', 'callfunds': 1, 'TRANSITION': 'valid'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsTransition')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 3)
self.assert_json_equal('', 'callfunds/@0/owner', "Minimum") # 100*45%
self.assert_json_equal('', 'callfunds/@0/total', 45.00)
self.assert_json_equal('', 'callfunds/@1/owner', "Dalton William") # 100*35%
self.assert_json_equal('', 'callfunds/@1/total', 35.00)
self.assert_json_equal('', 'callfunds/@2/owner', "Dalton Joe") # 100*20%
self.assert_json_equal('', 'callfunds/@2/total', 20.00)
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 6)
self.assert_json_equal('', 'entryline/@0/costaccounting', None)
self.assert_json_equal('', 'entryline/@0/entry_account', '[103] 103')
self.assert_json_equal('', 'entryline/@1/costaccounting', None)
self.assert_json_equal('', 'entryline/@1/entry_account', '[4503 Minimum]')
self.assert_json_equal('', 'entryline/@2/costaccounting', None)
self.assert_json_equal('', 'entryline/@2/entry_account', '[103] 103')
self.assert_json_equal('', 'entryline/@3/costaccounting', None)
self.assert_json_equal('', 'entryline/@3/entry_account', '[4503 Dalton William]')
self.assert_json_equal('', 'entryline/@4/costaccounting', None)
self.assert_json_equal('', 'entryline/@4/entry_account', '[103] 103')
self.assert_json_equal('', 'entryline/@5/costaccounting', None)
self.assert_json_equal('', 'entryline/@5/entry_account', '[4503 Dalton Joe]')
self.assert_json_equal('LABELFORM', 'result',
[0.00, 0.00, 0.00, 0.00, 0.00])
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '3', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 6)
self.factory.xfer = PayoffAddModify()
self.calljson('/diacamma.payoff/payoffAddModify', {'SAVE': 'YES', 'supporting': 4, 'amount': '100.0', 'payer': "Minimum", 'date': '2015-06-12', 'mode': 0, 'reference': 'abc', 'bank_account': 0}, False)
self.assert_observer('core.acknowledge', 'diacamma.payoff', 'payoffAddModify')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 8)
self.assert_json_equal('', 'entryline/@6/entry_account', '[4503 Minimum]')
self.assert_json_equal('', 'entryline/@7/entry_account', '[531] 531')
self.assert_json_equal('LABELFORM', 'result',
[0.00, 0.00, 0.00, 100.00, 0.00])
def test_valid_fundforworks(self):
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 0)
self.assert_json_equal('LABELFORM', 'result', [0.00, 0.00, 0.00, 0.00, 0.00])
self.factory.xfer = CallFundsAddModify()
self.calljson('/diacamma.condominium/callFundsAddModify', {'SAVE': 'YES', "date": '2015-06-10', "comment": 'abc 123'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, "type_call": 4, 'set': 1, 'price': '100.00', 'comment': 'set 1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallFundsShow()
self.calljson('/diacamma.condominium/callFundsShow', {'callfunds': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsShow')
self.assert_json_equal('', 'calldetail/@0/type_call_ex', 'fonds travaux')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 1)
self.factory.xfer = CallFundsTransition()
self.calljson('/diacamma.condominium/callFundsTransition', {'CONFIRME': 'YES', 'callfunds': 1, 'TRANSITION': 'valid'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsTransition')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 3)
self.assert_json_equal('', 'callfunds/@0/owner', "Minimum") # 100*45%
self.assert_json_equal('', 'callfunds/@0/total', 45.00)
self.assert_json_equal('', 'callfunds/@1/owner', "Dalton William") # 100*35%
self.assert_json_equal('', 'callfunds/@1/total', 35.00)
self.assert_json_equal('', 'callfunds/@2/owner', "Dalton Joe") # 100*20%
self.assert_json_equal('', 'callfunds/@2/total', 20.00)
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 6)
self.assert_json_equal('', 'entryline/@0/costaccounting', None)
self.assert_json_equal('', 'entryline/@0/entry_account', '[105] 105')
self.assert_json_equal('', 'entryline/@1/costaccounting', None)
self.assert_json_equal('', 'entryline/@1/entry_account', '[4505 Minimum]')
self.assert_json_equal('', 'entryline/@2/costaccounting', None)
self.assert_json_equal('', 'entryline/@2/entry_account', '[105] 105')
self.assert_json_equal('', 'entryline/@3/costaccounting', None)
self.assert_json_equal('', 'entryline/@3/entry_account', '[4505 Dalton William]')
self.assert_json_equal('', 'entryline/@4/costaccounting', None)
self.assert_json_equal('', 'entryline/@4/entry_account', '[105] 105')
self.assert_json_equal('', 'entryline/@5/costaccounting', None)
self.assert_json_equal('', 'entryline/@5/entry_account', '[4505 Dalton Joe]')
self.assert_json_equal('LABELFORM', 'result',
[0.00, 0.00, 0.00, 0.00, 0.00])
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '3', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 6)
self.factory.xfer = PayoffAddModify()
self.calljson('/diacamma.payoff/payoffAddModify', {'SAVE': 'YES', 'supporting': 4, 'amount': '100.0', 'payer': "Minimum", 'date': '2015-06-12', 'mode': 0, 'reference': 'abc', 'bank_account': 0}, False)
self.assert_observer('core.acknowledge', 'diacamma.payoff', 'payoffAddModify')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 8)
self.assert_json_equal('', 'entryline/@6/entry_account', '[4505 Minimum]')
self.assert_json_equal('', 'entryline/@7/entry_account', '[531] 531')
self.assert_json_equal('LABELFORM', 'result',
[0.00, 0.00, 0.00, 100.00, 0.00])
def test_valid_multi(self):
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 0)
self.assert_json_equal('LABELFORM', 'result', [0.00, 0.00, 0.00, 0.00, 0.00])
self.factory.xfer = CallFundsAddModify()
self.calljson('/diacamma.condominium/callFundsAddModify', {'SAVE': 'YES', "date": '2015-06-10', "comment": 'Multi'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, "type_call": 0, 'set': 1, 'price': '250.00', 'designation': 'set 1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, "type_call": 1, 'set': 3, 'price': '100.00', 'designation': 'set 3'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, "type_call": 4, 'set': 1, 'price': '150.00', 'designation': 'font'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallFundsTransition()
self.calljson('/diacamma.condominium/callFundsTransition', {'CONFIRME': 'YES', 'callfunds': 1, 'TRANSITION': 'valid'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsTransition')
self.factory.xfer = CallFundsShow()
self.calljson('/diacamma.condominium/callFundsShow', {'callfunds': 3}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsShow')
self.assert_count_equal('', 11)
self.assertEqual(len(self.json_actions), 3)
self.assert_count_equal('calldetail', 3)
self.assert_count_equal('#calldetail/actions', 0)
self.assert_json_equal('', 'calldetail/@0/set', "[1] AAA")
self.assert_json_equal('', 'calldetail/@0/designation', "set 1")
self.assert_json_equal('', 'calldetail/@0/total_amount', 250.00)
self.assert_json_equal('', 'calldetail/@0/set.total_part', "100")
self.assert_json_equal('', 'calldetail/@0/owner_part', "35.00")
self.assert_json_equal('', 'calldetail/@0/price', 87.50)
self.assert_json_equal('', 'calldetail/@0/type_call_ex', 'charge courante')
self.assert_json_equal('', 'calldetail/@1/set', "[3] CCC")
self.assert_json_equal('', 'calldetail/@1/designation', "set 3")
self.assert_json_equal('', 'calldetail/@1/total_amount', 100.00)
self.assert_json_equal('', 'calldetail/@1/set.total_part', "100")
self.assert_json_equal('', 'calldetail/@1/owner_part', "35.00")
self.assert_json_equal('', 'calldetail/@1/price', 35.00)
self.assert_json_equal('', 'calldetail/@1/type_call_ex', 'charge exceptionnelle')
self.assert_json_equal('', 'calldetail/@2/set', "[1] AAA")
self.assert_json_equal('', 'calldetail/@2/designation', "font")
self.assert_json_equal('', 'calldetail/@2/total_amount', 150.00)
self.assert_json_equal('', 'calldetail/@2/set.total_part', "100")
self.assert_json_equal('', 'calldetail/@2/owner_part', "35.00")
self.assert_json_equal('', 'calldetail/@2/price', 52.50)
self.assert_json_equal('', 'calldetail/@2/type_call_ex', 'fonds travaux')
self.assert_count_equal('payoff', 0)
self.assert_count_equal('#payoff/actions', 0)
self.assert_json_equal('LABELFORM', 'status', 1)
self.assert_json_equal('LABELFORM', 'total', 175.00)
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 3)
self.assert_json_equal('', 'callfunds/@0/owner', "Minimum") # 250*45% + 100*45% + 150*45%
self.assert_json_equal('', 'callfunds/@0/total', 225.00)
self.assert_json_equal('', 'callfunds/@1/owner', "Dalton William") # 250*35% + 100*35%+ 150*35%
self.assert_json_equal('', 'callfunds/@1/total', 175.00)
self.assert_json_equal('', 'callfunds/@2/owner', "Dalton Joe") # 250*20% + 100*20% + 150*20%
self.assert_json_equal('', 'callfunds/@2/total', 100.00)
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 18)
self.assert_json_equal('', 'entryline/@0/costaccounting', None)
self.assert_json_equal('', 'entryline/@0/entry_account', '[4501 Minimum]')
self.assert_json_equal('', 'entryline/@0/debit', -112.50)
self.assert_json_equal('', 'entryline/@1/costaccounting', '[1] AAA 2015')
self.assert_json_equal('', 'entryline/@1/entry_account', '[701] 701')
self.assert_json_equal('', 'entryline/@2/costaccounting', None)
self.assert_json_equal('', 'entryline/@2/entry_account', '[120] 120')
self.assert_json_equal('', 'entryline/@3/costaccounting', None)
self.assert_json_equal('', 'entryline/@3/entry_account', '[4502 Minimum]')
self.assert_json_equal('', 'entryline/@3/debit', -45.00)
self.assert_json_equal('', 'entryline/@4/costaccounting', None)
self.assert_json_equal('', 'entryline/@4/entry_account', '[105] 105')
self.assert_json_equal('', 'entryline/@5/costaccounting', None)
self.assert_json_equal('', 'entryline/@5/entry_account', '[4505 Minimum]')
self.assert_json_equal('', 'entryline/@5/debit', -67.50)
self.assert_json_equal('', 'entryline/@6/costaccounting', None)
self.assert_json_equal('', 'entryline/@6/entry_account', '[4501 Dalton William]')
self.assert_json_equal('', 'entryline/@7/costaccounting', '[1] AAA 2015')
self.assert_json_equal('', 'entryline/@7/entry_account', '[701] 701')
self.assert_json_equal('', 'entryline/@8/costaccounting', None)
self.assert_json_equal('', 'entryline/@8/entry_account', '[120] 120')
self.assert_json_equal('', 'entryline/@9/costaccounting', None)
self.assert_json_equal('', 'entryline/@9/entry_account', '[4502 Dalton William]')
self.assert_json_equal('', 'entryline/@10/costaccounting', None)
self.assert_json_equal('', 'entryline/@10/entry_account', '[105] 105')
self.assert_json_equal('', 'entryline/@11/costaccounting', None)
self.assert_json_equal('', 'entryline/@11/entry_account', '[4505 Dalton William]')
self.assert_json_equal('', 'entryline/@12/costaccounting', None)
self.assert_json_equal('', 'entryline/@12/entry_account', '[4501 Dalton Joe]')
self.assert_json_equal('', 'entryline/@13/costaccounting', '[1] AAA 2015')
self.assert_json_equal('', 'entryline/@13/entry_account', '[701] 701')
self.assert_json_equal('', 'entryline/@14/costaccounting', None)
self.assert_json_equal('', 'entryline/@14/entry_account', '[120] 120')
self.assert_json_equal('', 'entryline/@15/costaccounting', None)
self.assert_json_equal('', 'entryline/@15/entry_account', '[4502 Dalton Joe]')
self.assert_json_equal('', 'entryline/@16/costaccounting', None)
self.assert_json_equal('', 'entryline/@16/entry_account', '[105] 105')
self.assert_json_equal('', 'entryline/@17/costaccounting', None)
self.assert_json_equal('', 'entryline/@17/entry_account', '[4505 Dalton Joe]')
self.assert_json_equal('LABELFORM', 'result', [250.00, 0.00, 250.00, 0.00, 0.00])
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '3', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 18)
self.factory.xfer = PayoffAddModify()
self.calljson('/diacamma.payoff/payoffAddModify', {'SAVE': 'YES', 'supporting': 4, 'amount': '100.0', 'payer': "Minimum", 'date': '2015-06-12', 'mode': 0, 'reference': 'abc', 'bank_account': 0}, False)
self.assert_observer('core.acknowledge', 'diacamma.payoff', 'payoffAddModify')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 22)
self.assert_json_equal('', 'entryline/@18/entry_account', '[4501 Minimum]') # 112.5 + 45 + 67.5 = 225 => 100 = 44.444444%
self.assert_json_equal('', 'entryline/@18/costaccounting', None)
self.assert_json_equal('', 'entryline/@18/credit', 50.00)
self.assert_json_equal('', 'entryline/@19/entry_account', '[4502 Minimum]')
self.assert_json_equal('', 'entryline/@19/costaccounting', None)
self.assert_json_equal('', 'entryline/@19/credit', 20.00)
self.assert_json_equal('', 'entryline/@20/entry_account', '[4505 Minimum]')
self.assert_json_equal('', 'entryline/@20/costaccounting', None)
self.assert_json_equal('', 'entryline/@20/credit', 30.00)
self.assert_json_equal('', 'entryline/@21/entry_account', '[531] 531')
self.assert_json_equal('', 'entryline/@21/costaccounting', None)
self.assert_json_equal('', 'entryline/@21/debit', -100.00)
def test_payoff(self):
self.assertEqual(0.00, ChartsAccount.get_current_total_from_code('4501'), '4501')
self.assertEqual(0.00, ChartsAccount.get_current_total_from_code('512'), '512')
self.assertEqual(0.00, ChartsAccount.get_current_total_from_code('531'), '531')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 0)
self.assert_json_equal('LABELFORM', 'result', [0.00, 0.00, 0.00, 0.00, 0.00])
self.factory.xfer = CallFundsAddModify()
self.calljson('/diacamma.condominium/callFundsAddModify', {'SAVE': 'YES', "date": '2015-06-10', "comment": 'abc 123'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, "type_call": 0, 'set': 1, 'price': '250.00', 'comment': 'set 1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, "type_call": 0, 'set': 2, 'price': '25.00', 'comment': 'set 2'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallFundsTransition()
self.calljson('/diacamma.condominium/callFundsTransition', {'CONFIRME': 'YES', 'callfunds': 1, 'TRANSITION': 'valid'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsTransition')
self.assertEqual(-275.00, ChartsAccount.get_current_total_from_code('4501'), '4501')
self.assertEqual(0.00, ChartsAccount.get_current_total_from_code('512'), '512')
self.assertEqual(0.00, ChartsAccount.get_current_total_from_code('531'), '531')
self.factory.xfer = PayoffAddModify()
self.calljson('/diacamma.payoff/payoffAddModify', {'SAVE': 'YES', 'supporting': 4, 'amount': '100.0',
'payer': "Nous", 'date': '2015-04-03', 'mode': 0, 'reference': 'abc', 'bank_account': 0}, False)
self.assert_observer('core.acknowledge', 'diacamma.payoff', 'payoffAddModify')
self.assertEqual(-175.00, ChartsAccount.get_current_total_from_code('4501'), '4501')
self.assertEqual(0.00, ChartsAccount.get_current_total_from_code('512'), '512')
self.assertEqual(-100.00, ChartsAccount.get_current_total_from_code('531'), '531')
def test_payoff_multiple(self):
self.assertEqual(0.00, ChartsAccount.get_current_total_from_code('4501'), '4501')
self.assertEqual(0.00, ChartsAccount.get_current_total_from_code('4502'), '4502')
self.assertEqual(0.00, ChartsAccount.get_current_total_from_code('4505'), '4505')
self.assertEqual(0.00, ChartsAccount.get_current_total_from_code('512'), '512')
self.assertEqual(0.00, ChartsAccount.get_current_total_from_code('531'), '531')
self.factory.xfer = CallFundsAddModify()
self.calljson('/diacamma.condominium/callFundsAddModify', {'SAVE': 'YES', "date": '2015-06-10', "comment": 'abc 123'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, "type_call": 0, 'set': 1, 'price': '300.00', 'comment': 'set 1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, "type_call": 1, 'set': 3, 'price': '200.00', 'comment': 'set 3'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, "type_call": 4, 'set': 1, 'price': '100.00', 'designation': 'font'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallFundsTransition()
self.calljson('/diacamma.condominium/callFundsTransition', {'CONFIRME': 'YES', 'callfunds': 1, 'TRANSITION': 'valid'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsTransition')
self.assertEqual(-300.00, ChartsAccount.get_current_total_from_code('4501'), '4501')
self.assertEqual(-200.00, ChartsAccount.get_current_total_from_code('4502'), '4502')
self.assertEqual(-100.00, ChartsAccount.get_current_total_from_code('4505'), '4505')
self.assertEqual(0.00, ChartsAccount.get_current_total_from_code('512'), '512')
self.assertEqual(0.00, ChartsAccount.get_current_total_from_code('531'), '531')
self.factory.xfer = PayoffAddModify()
self.calljson('/diacamma.payoff/payoffAddModify', {'SAVE': 'YES', 'supporting': 1, 'amount': '60.0',
'payer': "Nous", 'date': '2015-04-03', 'mode': 0, 'reference': 'abc', 'bank_account': 0}, False)
self.assert_observer('core.acknowledge', 'diacamma.payoff', 'payoffAddModify')
self.assertEqual(-240.00, ChartsAccount.get_current_total_from_code('4501'), '4501')
self.assertEqual(-200.00, ChartsAccount.get_current_total_from_code('4502'), '4502')
self.assertEqual(-100.00, ChartsAccount.get_current_total_from_code('4505'), '4505')
self.assertEqual(0.00, ChartsAccount.get_current_total_from_code('512'), '512')
self.assertEqual(-60.00, ChartsAccount.get_current_total_from_code('531'), '531')
self.factory.xfer = PaymentVentilatePay()
self.calljson('/diacamma.condominium/paymentVentilatePay', {'CONFIRME': 'YES', 'owner': 1}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'paymentVentilatePay')
self.assertEqual(-270.00, ChartsAccount.get_current_total_from_code('4501'), '4501')
self.assertEqual(-180.00, ChartsAccount.get_current_total_from_code('4502'), '4502')
self.assertEqual(-90.00, ChartsAccount.get_current_total_from_code('4505'), '4505')
self.assertEqual(0.00, ChartsAccount.get_current_total_from_code('512'), '512')
self.assertEqual(-60.00, ChartsAccount.get_current_total_from_code('531'), '531')
def test_send(self):
from lucterios.mailing.tests import configSMTP, TestReceiver
add_test_callfunds()
configSMTP('localhost', 4025)
self.factory.xfer = CallFundsShow()
self.calljson('/diacamma.condominium/callFundsShow', {'callfunds': 2}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsShow')
self.assert_json_equal('LABELFORM', 'num', 1)
self.assert_json_equal('LABELFORM', 'owner', "Minimum")
self.assertEqual(len(self.json_actions), 4)
self.assert_action_equal('POST', self.json_actions[2], ('Envoyer', 'lucterios.mailing/images/email.png', 'diacamma.condominium', 'callFundsPayableEmail', 0, 1, 0))
self.factory.xfer = CallFundsPayableEmail()
self.calljson('/diacamma.condominium/callFundsPayableEmail', {'callfunds': 2}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsPayableEmail')
self.assertEqual(self.response_json['action']['id'], "diacamma.payoff/payableEmail")
self.assertEqual(self.response_json['action']['params'], {'item_name': 'callfunds', 'modelname': 'condominium.CallFunds'})
server = TestReceiver()
server.start(4025)
try:
self.assertEqual(0, server.count())
self.factory.xfer = PayableEmail()
self.calljson('/diacamma.payoff/payableEmail',
{'item_name': 'callfunds', 'callfunds': 2, 'modelname': 'condominium.CallFunds'}, False)
self.assert_observer('core.custom', 'diacamma.payoff', 'payableEmail')
self.assert_count_equal('', 6)
self.factory.xfer = PayableEmail()
self.calljson('/diacamma.payoff/payableEmail',
{'callfunds': 2, 'OK': 'YES', 'item_name': 'callfunds', 'modelname': 'condominium.CallFunds', 'subject': 'my call of funds', 'message': 'this is a call of funds.', 'model': 8}, False)
self.assert_observer('core.acknowledge', 'diacamma.payoff', 'payableEmail')
self.assertEqual(1, server.count())
self.assertEqual('mr-sylvestre@worldcompany.com', server.get(0)[1])
self.assertEqual(['Minimum@worldcompany.com', 'mr-sylvestre@worldcompany.com'], server.get(0)[2])
msg, msg_txt, msg_file = server.check_first_message('my call of funds', 3, {'To': 'Minimum@worldcompany.com'})
self.assertEqual('text/plain', msg_txt.get_content_type())
self.assertEqual('text/html', msg.get_content_type())
self.assertEqual('base64', msg.get('Content-Transfer-Encoding', ''))
self.assertEqual('<html>this is a call of funds.</html>', decode_b64(msg.get_payload()))
self.assertIn('appel_de_fonds_N%C2%B01_Minimum.pdf', msg_file.get('Content-Type', ''))
self.save_pdf(base64_content=msg_file.get_payload())
finally:
server.stop()
def _test_multi_send(self):
from lucterios.mailing.tests import configSMTP, TestReceiver
add_test_callfunds()
configSMTP('localhost', 4125)
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 3)
self.assert_json_equal('', 'callfunds/@0/id', 2)
self.assert_json_equal('', 'callfunds/@1/id', 3)
self.assert_json_equal('', 'callfunds/@2/id', 4)
self.assert_count_equal("#callfunds/actions", 4)
self.assert_action_equal("#callfunds/actions/@2", ('Envoyer', 'lucterios.mailing/images/email.png', 'diacamma.condominium', 'callFundsPayableEmail', 0, 1, 2))
self.factory.xfer = CallFundsPayableEmail()
self.calljson('/diacamma.condominium/callFundsPayableEmail', {'callfunds': '2;3;4'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsPayableEmail')
self.assertEqual(self.response_json['action']['id'], "diacamma.payoff/payableEmail")
self.assertEqual(self.response_json['action']['params'], {'item_name': 'callfunds', 'modelname': 'condominium.CallFunds'})
server = TestReceiver()
server.start(4125)
try:
self.assertEqual(0, server.count())
self.factory.xfer = PayableEmail()
self.calljson('/diacamma.payoff/payableEmail',
{'item_name': 'callfunds', 'callfunds': '2;3;4', 'modelname': 'condominium.CallFunds'}, False)
self.assert_observer('core.custom', 'diacamma.payoff', 'payableEmail')
self.assert_count_equal('', 7)
self.assert_json_equal('LABELFORM', "nb_item", '3')
self.factory.xfer = PayableEmail()
self.calljson('/diacamma.payoff/payableEmail',
{'callfunds': '2;3;4', 'OK': 'YES', 'item_name': 'callfunds', 'modelname': 'condominium.CallFunds', 'subject': '#reference', 'message': 'this is a call of funds.', 'model': 8}, False)
self.assert_observer('core.acknowledge', 'diacamma.payoff', 'payableEmail')
email_msg = Message.objects.get(id=1)
self.assertEqual(email_msg.subject, '#reference')
self.assertEqual(email_msg.body, 'this is a call of funds.')
self.assertEqual(email_msg.status, 2)
self.assertEqual(email_msg.recipients, "condominium.CallFunds id||8||2;3;4\n")
self.assertEqual(email_msg.email_to_send, "condominium.CallFunds:2:8\ncondominium.CallFunds:3:8\ncondominium.CallFunds:4:8")
self.assertEqual(1, len(LucteriosScheduler.get_list()))
LucteriosScheduler.stop_scheduler()
email_msg.sendemail(10, "http://testserver")
self.assertEqual(3, server.count())
self.assertEqual(['Minimum@worldcompany.com', 'mr-sylvestre@worldcompany.com'], server.get(0)[2])
_msg, _msg_txt, msg_file = server.get_msg_index(0, "=?utf-8?q?appel_de_fonds_N=C2=B01?=")
self.save_pdf(base64_content=msg_file.get_payload(), ident=1)
self.assertEqual(['William.Dalton@worldcompany.com', 'mr-sylvestre@worldcompany.com'], server.get(1)[2])
_msg, _msg_txt, msg_file = server.get_msg_index(1, "=?utf-8?q?appel_de_fonds_N=C2=B01?=")
self.save_pdf(base64_content=msg_file.get_payload(), ident=2)
self.assertEqual(['Joe.Dalton@worldcompany.com', 'mr-sylvestre@worldcompany.com'], server.get(2)[2])
_msg, _msg_txt, msg_file = server.get_msg_index(2, "=?utf-8?q?appel_de_fonds_N=C2=B01?=")
self.save_pdf(base64_content=msg_file.get_payload(), ident=3)
finally:
server.stop()
def test_delete(self):
# check initial empty
self.assertEqual(0.00, ChartsAccount.get_current_total_from_code('4501'), '4501')
self.assertEqual(0.00, ChartsAccount.get_current_total_from_code('4502'), '4502')
self.assertEqual(0.00, ChartsAccount.get_current_total_from_code('531'), '531')
self.factory.xfer = OwnerShow()
self.calljson('/diacamma.condominium/ownerShow', {'owner': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'ownerShow')
self.assert_json_equal('LABELFORM', 'third', 'Minimum')
self.assert_count_equal('payoff', 0)
self.factory.xfer = OwnerShow()
self.calljson('/diacamma.condominium/ownerShow', {'owner': 3}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'ownerShow')
self.assert_json_equal('LABELFORM', 'third', 'Dalton Joe')
self.assert_count_equal('payoff', 0)
# add calls of funds and payoff
add_test_callfunds(False, True)
self.factory.xfer = PayoffAddModify()
self.calljson('/diacamma.payoff/payoffAddModify', {'SAVE': 'YES', 'supportings': "3;6;9", 'repartition': 1, 'NO_REPARTITION': 'yes', 'amount': '70.0',
'payer': "Nous", 'date': '2015-04-03', 'mode': 0, 'reference': 'abc', 'bank_account': 0}, False)
self.assert_observer('core.acknowledge', 'diacamma.payoff', 'payoffAddModify')
# check values
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 0)
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 6)
self.assert_json_equal('', 'callfunds/@0/id', 2)
self.assert_json_equal('', 'callfunds/@0/num', 1)
self.assert_json_equal('', 'callfunds/@0/owner', "Minimum")
self.assert_json_equal('', 'callfunds/@0/total', 131.25)
self.assert_json_equal('', 'callfunds/@0/supporting.total_rest_topay', 31.25)
self.assert_json_equal('', 'callfunds/@1/id', 3)
self.assert_json_equal('', 'callfunds/@1/num', 1)
self.assert_json_equal('', 'callfunds/@1/owner', "Dalton William")
self.assert_json_equal('', 'callfunds/@1/total', 87.5)
self.assert_json_equal('', 'callfunds/@1/supporting.total_rest_topay', 87.5)
self.assert_json_equal('', 'callfunds/@2/id', 4)
self.assert_json_equal('', 'callfunds/@2/num', 1)
self.assert_json_equal('', 'callfunds/@2/owner', "Dalton Joe")
self.assert_json_equal('', 'callfunds/@2/total', 56.25)
self.assert_json_equal('', 'callfunds/@2/supporting.total_rest_topay', 0.0)
self.assert_json_equal('', 'callfunds/@3/id', 6)
self.assert_json_equal('', 'callfunds/@3/num', 2)
self.assert_json_equal('', 'callfunds/@3/owner', "Minimum")
self.assert_json_equal('', 'callfunds/@3/total', 45.0)
self.assert_json_equal('', 'callfunds/@3/supporting.total_rest_topay', 15.0)
self.assert_json_equal('', 'callfunds/@4/id', 7)
self.assert_json_equal('', 'callfunds/@4/num', 2)
self.assert_json_equal('', 'callfunds/@4/owner', "Dalton William")
self.assert_json_equal('', 'callfunds/@4/total', 35.0)
self.assert_json_equal('', 'callfunds/@4/supporting.total_rest_topay', 35.0)
self.assert_json_equal('', 'callfunds/@5/id', 8)
self.assert_json_equal('', 'callfunds/@5/num', 2)
self.assert_json_equal('', 'callfunds/@5/owner', "Dalton Joe")
self.assert_json_equal('', 'callfunds/@5/total', 20.0)
self.assert_json_equal('', 'callfunds/@5/supporting.total_rest_topay', 6.25)
self.assertEqual(-275.00 + 100 + 56.25, ChartsAccount.get_current_total_from_code('4501'), '4501')
self.assertEqual(-100.00 + 30 + 13.75, ChartsAccount.get_current_total_from_code('4502'), '4502')
self.assertEqual(-200.00, ChartsAccount.get_current_total_from_code('531'), '531')
self.factory.xfer = OwnerShow()
self.calljson('/diacamma.condominium/ownerShow', {'owner': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'ownerShow')
self.assert_json_equal('LABELFORM', 'third', 'Minimum')
self.assert_count_equal('payoff', 0)
self.factory.xfer = OwnerShow()
self.calljson('/diacamma.condominium/ownerShow', {'owner': 3}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'ownerShow')
self.assert_json_equal('LABELFORM', 'third', 'Dalton Joe')
self.assert_count_equal('payoff', 0)
# testing deleting
self.factory.xfer = CallFundsDel()
self.calljson('/diacamma.condominium/callFundsDel', {'callfunds': '2;4'}, False)
self.assert_observer('core.dialogbox', 'diacamma.condominium', 'callFundsDel')
self.assert_json_equal('', 'text', "Voulez vous supprimer les appels de fonds N°1 et les suivants ?")
self.factory.xfer = CallFundsDel()
self.calljson('/diacamma.condominium/callFundsDel', {'callfunds': '2;4', 'CONFIRME': 'YES'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsDel')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 0)
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 2)
self.assert_json_equal('', 'callfunds/@0/total', 275.0)
self.assert_json_equal('', 'callfunds/@1/total', 100.0)
self.assertEqual(200.00, ChartsAccount.get_current_total_from_code('4501'), '4501')
self.assertEqual(0.00, ChartsAccount.get_current_total_from_code('4502'), '4502')
self.assertEqual(-200.00, ChartsAccount.get_current_total_from_code('531'), '531')
self.factory.xfer = OwnerShow()
self.calljson('/diacamma.condominium/ownerShow', {'owner': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'ownerShow')
self.assert_json_equal('LABELFORM', 'third', 'Minimum')
self.assert_count_equal('payoff', 2)
self.assert_json_equal('', 'payoff/@0/amount', 100.0)
self.assert_json_equal('', 'payoff/@1/amount', 30.0)
self.factory.xfer = OwnerShow()
self.calljson('/diacamma.condominium/ownerShow', {'owner': 3}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'ownerShow')
self.assert_json_equal('LABELFORM', 'third', 'Dalton Joe')
self.assert_count_equal('payoff', 1)
self.assert_json_equal('', 'payoff/@0/amount', 70.0)
class CallFundsBelgiumTest(LucteriosTest):
def setUp(self):
LucteriosTest.setUp(self)
default_compta_be(with12=False)
initial_thirds_be()
default_bankaccount_be()
default_setowner_be()
rmtree(get_user_dir(), True)
def test_create(self):
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_grid_equal('callfunds', {"num": "N°", "date": "date", "owner": "propriétaire", "comment": "commentaire", "total": "total"}, 0)
self.factory.xfer = CallFundsAddModify()
self.calljson('/diacamma.condominium/callFundsAddModify', {}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsAddModify')
self.assert_count_equal('', 4)
self.factory.xfer = CallFundsAddModify()
self.calljson('/diacamma.condominium/callFundsAddModify', {'SAVE': 'YES', "date": '2015-06-10', "comment": 'abc 123'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddModify')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_grid_equal('callfunds', {"num": "N°", "date": "date", "owner": "propriétaire", "comment": "commentaire", "total": "total", "supporting.total_rest_topay": "reste à payer"}, 0)
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_grid_equal('callfunds', {"num": "N°", "date": "date", "owner": "propriétaire", "comment": "commentaire", "total": "total"}, 1)
self.factory.xfer = CallFundsDel()
self.calljson('/diacamma.condominium/callFundsDel', {'CONFIRME': 'YES', "callfunds": 1}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsDel')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 0)
def test_add(self):
self.factory.xfer = CallFundsAddModify()
self.calljson('/diacamma.condominium/callFundsAddModify', {'SAVE': 'YES', "date": '2015-06-10', "comment": 'abc 123'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddModify')
self.factory.xfer = CallFundsShow()
self.calljson('/diacamma.condominium/callFundsShow', {'callfunds': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsShow')
self.assert_count_equal('', 8)
self.assertEqual(len(self.json_actions), 2)
self.assert_grid_equal('calldetail', {"type_call_ex": "type d'appel", "set": "catégorie de charges", "designation": "désignation", "set.total_part": "somme des tantièmes", "price": "montant", }, 0)
self.assert_count_equal('#calldetail/actions', 3)
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'callfunds': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callDetailAddModify')
self.assert_count_equal('', 5)
self.assert_json_equal('SELECT', 'set', '1')
self.assert_json_equal('FLOAT', 'price', '100.00')
self.assert_select_equal('type_call', {0: 'charge courante', 1: 'charge de travaux', 2: 'roulement', 4: 'réserve'})
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'callfunds': 1, 'type_call': 0, 'set': 2}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callDetailAddModify')
self.assert_json_equal('SELECT', 'set', '2')
self.assert_json_equal('FLOAT', 'price', '10.00')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, 'type_call': 0, 'set': 1, 'price': '100.00', 'comment': 'set 1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, 'type_call': 0, 'set': 2, 'price': '10.00', 'comment': 'set 2'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallFundsShow()
self.calljson('/diacamma.condominium/callFundsShow', {'callfunds': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsShow')
self.assertEqual(len(self.json_actions), 3)
self.assert_json_equal('', 'calldetail/@0/type_call_ex', 'charge courante')
self.assert_json_equal('', 'calldetail/@0/set', '[1] AAA')
self.assert_json_equal('', 'calldetail/@0/price', 100.00)
self.assert_json_equal('', 'calldetail/@1/type_call_ex', 'charge courante')
self.assert_json_equal('', 'calldetail/@1/set', '[2] BBB')
self.assert_json_equal('', 'calldetail/@1/price', 10.00)
self.assert_json_equal('LABELFORM', 'total', 110.00)
def test_add_default_current(self):
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 0)
self.assertEqual(len(self.json_actions), 2)
self.factory.xfer = CallFundsAddCurrent()
self.calljson('/diacamma.condominium/callFundsAddCurrent', {'CONFIRME': 'YES'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddCurrent')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 1)
self.assert_json_equal('', 'callfunds/@0/date', "2015-01-01")
self.assert_json_equal('', 'callfunds/@0/total', 110.00)
self.assertEqual(len(self.json_actions), 2)
self.factory.xfer = CallFundsAddCurrent()
self.calljson('/diacamma.condominium/callFundsAddCurrent', {'CONFIRME': 'YES'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddCurrent')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 2)
self.assert_json_equal('', 'callfunds/@0/date', "2015-01-01")
self.assert_json_equal('', 'callfunds/@0/total', 110.00)
self.assert_json_equal('', 'callfunds/@1/date', "2015-02-01")
self.assert_json_equal('', 'callfunds/@1/total', 110.00)
self.assertEqual(len(self.json_actions), 2)
setitem = Set.objects.get(id=1)
year = FiscalYear.get_current()
cost = setitem.current_cost_accounting
Budget.objects.create(cost_accounting=cost, year=year, code='610', amount=1200)
setitem.change_budget_product(cost, year.id)
self.factory.xfer = CallFundsAddCurrent()
self.calljson('/diacamma.condominium/callFundsAddCurrent', {'CONFIRME': 'YES'}, False)
self.factory.xfer = CallFundsAddCurrent()
self.calljson('/diacamma.condominium/callFundsAddCurrent', {'CONFIRME': 'YES'}, False)
self.factory.xfer = CallFundsAddCurrent()
self.calljson('/diacamma.condominium/callFundsAddCurrent', {'CONFIRME': 'YES'}, False)
self.factory.xfer = CallFundsAddCurrent()
self.calljson('/diacamma.condominium/callFundsAddCurrent', {'CONFIRME': 'YES'}, False)
self.factory.xfer = CallFundsAddCurrent()
self.calljson('/diacamma.condominium/callFundsAddCurrent', {'CONFIRME': 'YES'}, False)
self.factory.xfer = CallFundsAddCurrent()
self.calljson('/diacamma.condominium/callFundsAddCurrent', {'CONFIRME': 'YES'}, False)
self.factory.xfer = CallFundsAddCurrent()
self.calljson('/diacamma.condominium/callFundsAddCurrent', {'CONFIRME': 'YES'}, False)
self.factory.xfer = CallFundsAddCurrent()
self.calljson('/diacamma.condominium/callFundsAddCurrent', {'CONFIRME': 'YES'}, False)
self.factory.xfer = CallFundsAddCurrent()
self.calljson('/diacamma.condominium/callFundsAddCurrent', {'CONFIRME': 'YES'}, False)
self.factory.xfer = CallFundsAddCurrent()
self.calljson('/diacamma.condominium/callFundsAddCurrent', {'CONFIRME': 'YES'}, False)
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 12)
self.assert_json_equal('', 'callfunds/@0/date', "2015-01-01")
self.assert_json_equal('', 'callfunds/@0/total', 110.00)
self.assert_json_equal('', 'callfunds/@1/date', "2015-02-01")
self.assert_json_equal('', 'callfunds/@1/total', 110.00)
self.assert_json_equal('', 'callfunds/@2/date', "2015-03-01")
self.assert_json_equal('', 'callfunds/@2/total', 230.00)
self.assert_json_equal('', 'callfunds/@3/date', "2015-04-01")
self.assert_json_equal('', 'callfunds/@3/total', 230.00)
self.assert_json_equal('', 'callfunds/@4/date', "2015-05-01")
self.assert_json_equal('', 'callfunds/@4/total', 230.00)
self.assert_json_equal('', 'callfunds/@5/date', "2015-06-01")
self.assert_json_equal('', 'callfunds/@5/total', 230.00)
self.assert_json_equal('', 'callfunds/@6/date', "2015-07-01")
self.assert_json_equal('', 'callfunds/@6/total', 230.00)
self.assert_json_equal('', 'callfunds/@7/date', "2015-08-01")
self.assert_json_equal('', 'callfunds/@7/total', 230.00)
self.assert_json_equal('', 'callfunds/@8/date', "2015-09-01")
self.assert_json_equal('', 'callfunds/@8/total', 230.00)
self.assert_json_equal('', 'callfunds/@9/date', "2015-10-01")
self.assert_json_equal('', 'callfunds/@9/total', 230.00)
self.assert_json_equal('', 'callfunds/@10/date', "2015-11-01")
self.assert_json_equal('', 'callfunds/@10/total', 230.0)
self.assert_json_equal('', 'callfunds/@11/date', "2015-12-01")
self.assert_json_equal('', 'callfunds/@11/total', 230.0)
self.assertEqual(len(self.json_actions), 1)
def test_add_default_current_quartly(self):
Parameter.change_value('condominium-mode-current-callfunds', 0)
Params.clear()
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 0)
self.assertEqual(len(self.json_actions), 2)
self.factory.xfer = CallFundsAddCurrent()
self.calljson('/diacamma.condominium/callFundsAddCurrent', {'CONFIRME': 'YES'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddCurrent')
self.factory.xfer = CallFundsAddCurrent()
self.calljson('/diacamma.condominium/callFundsAddCurrent', {'CONFIRME': 'YES'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddCurrent')
self.factory.xfer = CallFundsAddCurrent()
self.calljson('/diacamma.condominium/callFundsAddCurrent', {'CONFIRME': 'YES'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddCurrent')
self.factory.xfer = CallFundsAddCurrent()
self.calljson('/diacamma.condominium/callFundsAddCurrent', {'CONFIRME': 'YES'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddCurrent')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 4)
self.assert_json_equal('', 'callfunds/@0/date', "2015-01-01")
self.assert_json_equal('', 'callfunds/@0/total', 330.00)
self.assert_json_equal('', 'callfunds/@1/date', "2015-04-01")
self.assert_json_equal('', 'callfunds/@1/total', 330.00)
self.assert_json_equal('', 'callfunds/@2/date', "2015-07-01")
self.assert_json_equal('', 'callfunds/@2/total', 330.00)
self.assert_json_equal('', 'callfunds/@3/date', "2015-10-01")
self.assert_json_equal('', 'callfunds/@3/total', 330.00)
self.assertEqual(len(self.json_actions), 1)
def test_valid_current(self):
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 0)
self.assert_json_equal('LABELFORM', 'result', [0.00, 0.00, 0.00, 0.00, 0.00])
self.factory.xfer = CallFundsAddModify()
self.calljson('/diacamma.condominium/callFundsAddModify', {'SAVE': 'YES', "date": '2015-06-10', "comment": 'abc 123'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, 'type_call': 0, 'set': 1, 'price': '250.00', 'designation': 'set 1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, 'type_call': 0, 'set': 2, 'price': '25.00', 'designation': 'set 2'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallFundsTransition()
self.calljson('/diacamma.condominium/callFundsTransition', {'CONFIRME': 'YES', 'callfunds': 1, 'TRANSITION': 'valid'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsTransition')
self.factory.xfer = CallFundsShow()
self.calljson('/diacamma.condominium/callFundsShow', {'callfunds': 3}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsShow')
self.assert_count_equal('', 11)
self.assertEqual(len(self.json_actions), 3)
self.assert_count_equal('calldetail', 1) # nb=6
self.assert_count_equal('#calldetail/actions', 0)
self.assert_json_equal('', 'calldetail/@0/type_call_ex', 'charge courante')
self.assert_count_equal('payoff', 0)
self.assert_count_equal('#payoff/actions', 0)
self.assert_json_equal('LABELFORM', 'status', 1)
self.assert_json_equal('LABELFORM', 'total', 87.50)
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 3)
self.assert_json_equal('', 'callfunds/@0/owner', "Minimum") # 250*45%+25*75%
self.assert_json_equal('', 'callfunds/@0/total', 131.25)
self.assert_json_equal('', 'callfunds/@1/owner', "Dalton William") # 250*35%+25*0%
self.assert_json_equal('', 'callfunds/@1/total', 87.50)
self.assert_json_equal('', 'callfunds/@2/owner', "Dalton Joe") # 250*20%+25*25%
self.assert_json_equal('', 'callfunds/@2/total', 56.25)
self.factory.xfer = CallFundsTransition()
self.calljson('/diacamma.condominium/callFundsTransition', {'CONFIRME': 'YES', 'callfunds': 3, 'TRANSITION': 'close'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsTransition')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 8)
self.assert_json_equal('', 'entryline/@0/costaccounting', None)
self.assert_json_equal('', 'entryline/@0/entry_account', '[410100 Minimum]')
self.assert_json_equal('', 'entryline/@1/costaccounting', '[1] AAA 2015')
self.assert_json_equal('', 'entryline/@1/entry_account', '[701100] 701100')
self.assert_json_equal('', 'entryline/@2/costaccounting', '[2] BBB 2015')
self.assert_json_equal('', 'entryline/@2/entry_account', '[701100] 701100')
self.assert_json_equal('', 'entryline/@3/costaccounting', None)
self.assert_json_equal('', 'entryline/@3/entry_account', '[410100 Dalton William]')
self.assert_json_equal('', 'entryline/@4/costaccounting', '[1] AAA 2015')
self.assert_json_equal('', 'entryline/@4/entry_account', '[701100] 701100')
self.assert_json_equal('', 'entryline/@5/costaccounting', None)
self.assert_json_equal('', 'entryline/@5/entry_account', '[410100 Dalton Joe]')
self.assert_json_equal('', 'entryline/@6/costaccounting', '[1] AAA 2015')
self.assert_json_equal('', 'entryline/@6/entry_account', '[701100] 701100')
self.assert_json_equal('', 'entryline/@7/costaccounting', '[2] BBB 2015')
self.assert_json_equal('', 'entryline/@7/entry_account', '[701100] 701100')
self.assert_json_equal('LABELFORM', 'result', [275.00, 0.00, 275.00, 0.00, 0.00])
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '3', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 8)
self.factory.xfer = PayoffAddModify()
self.calljson('/diacamma.payoff/payoffAddModify', {'SAVE': 'YES', 'supporting': 4, 'amount': '100.0', 'payer': "Minimum", 'date': '2015-06-12', 'mode': 2, 'reference': 'abc', 'bank_account': 1}, False)
self.assert_observer('core.acknowledge', 'diacamma.payoff', 'payoffAddModify')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 10)
self.assert_json_equal('', 'entryline/@8/entry_account', '[410100 Minimum]')
self.assert_json_equal('', 'entryline/@8/costaccounting', None)
self.assert_json_equal('', 'entryline/@9/entry_account', '[550000] 550000')
self.assert_json_equal('', 'entryline/@9/costaccounting', None)
def test_valid_working(self):
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 0)
self.assert_json_equal('LABELFORM', 'result', [0.00, 0.00, 0.00, 0.00, 0.00])
self.factory.xfer = CallFundsAddModify()
self.calljson('/diacamma.condominium/callFundsAddModify', {'SAVE': 'YES', "date": '2015-06-10', "comment": 'abc 123'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, "type_call": 1, 'set': 3, 'price': '250.00', 'comment': 'set 3'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallFundsShow()
self.calljson('/diacamma.condominium/callFundsShow', {'callfunds': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsShow')
self.assert_json_equal('', 'calldetail/@0/type_call_ex', 'charge de travaux')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 1)
self.factory.xfer = CallFundsTransition()
self.calljson('/diacamma.condominium/callFundsTransition', {'CONFIRME': 'YES', 'callfunds': 1, 'TRANSITION': 'valid'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsTransition')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 3)
self.assert_json_equal('', 'callfunds/@0/owner', "Minimum") # 250*45%
self.assert_json_equal('', 'callfunds/@0/total', 112.50)
self.assert_json_equal('', 'callfunds/@1/owner', "Dalton William") # 250*35%
self.assert_json_equal('', 'callfunds/@1/total', 87.50)
self.assert_json_equal('', 'callfunds/@2/owner', "Dalton Joe") # 250*20%
self.assert_json_equal('', 'callfunds/@2/total', 50.00)
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 6)
self.assert_json_equal('', 'entryline/@0/costaccounting', None)
self.assert_json_equal('', 'entryline/@0/entry_account', '[410000 Minimum]')
self.assert_json_equal('', 'entryline/@1/costaccounting', '[3] CCC')
self.assert_json_equal('', 'entryline/@1/entry_account', '[700100] 700100')
self.assert_json_equal('', 'entryline/@2/costaccounting', None)
self.assert_json_equal('', 'entryline/@2/entry_account', '[410000 Dalton William]')
self.assert_json_equal('', 'entryline/@3/costaccounting', '[3] CCC')
self.assert_json_equal('', 'entryline/@3/entry_account', '[700100] 700100')
self.assert_json_equal('', 'entryline/@4/costaccounting', None)
self.assert_json_equal('', 'entryline/@4/entry_account', '[410000 Dalton Joe]')
self.assert_json_equal('', 'entryline/@5/costaccounting', '[3] CCC')
self.assert_json_equal('', 'entryline/@5/entry_account', '[700100] 700100')
self.assert_json_equal('LABELFORM', 'result', [250.00, 0.00, 250.00, 0.00, 0.00])
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '3', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 6)
self.factory.xfer = PayoffAddModify()
self.calljson('/diacamma.payoff/payoffAddModify', {'SAVE': 'YES', 'supporting': 4, 'amount': '100.0', 'payer': "Minimum", 'date': '2015-06-12', 'mode': 2, 'reference': 'abc', 'bank_account': 1}, False)
self.assert_observer('core.acknowledge', 'diacamma.payoff', 'payoffAddModify')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 8)
self.assert_json_equal('', 'entryline/@6/entry_account', '[410000 Minimum]')
self.assert_json_equal('', 'entryline/@7/entry_account', '[550000] 550000')
self.assert_json_equal('LABELFORM', 'result', [250.00, 0.00, 250.00, 100.00, 0.00])
def test_valid_rolling(self):
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 0)
self.assert_json_equal('LABELFORM', 'result', [0.00, 0.00, 0.00, 0.00, 0.00])
self.factory.xfer = CallFundsAddModify()
self.calljson('/diacamma.condominium/callFundsAddModify', {'SAVE': 'YES', "date": '2015-06-10', "comment": 'abc 123'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, "type_call": 2, 'set': 1, 'price': '100.00', 'comment': 'set 1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallFundsShow()
self.calljson('/diacamma.condominium/callFundsShow', {'callfunds': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsShow')
self.assert_json_equal('', 'calldetail/@0/type_call_ex', 'roulement')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 1)
self.factory.xfer = CallFundsTransition()
self.calljson('/diacamma.condominium/callFundsTransition', {'CONFIRME': 'YES', 'callfunds': 1, 'TRANSITION': 'valid'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsTransition')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 3)
self.assert_json_equal('', 'callfunds/@0/owner', "Minimum") # 100*45%
self.assert_json_equal('', 'callfunds/@0/total', 45.00)
self.assert_json_equal('', 'callfunds/@1/owner', "Dalton William") # 100*35%
self.assert_json_equal('', 'callfunds/@1/total', 35.00)
self.assert_json_equal('', 'callfunds/@2/owner', "Dalton Joe") # 100*20%
self.assert_json_equal('', 'callfunds/@2/total', 20.00)
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 6)
self.assert_json_equal('', 'entryline/@0/costaccounting', None)
self.assert_json_equal('', 'entryline/@0/entry_account', '[410100 Minimum]')
self.assert_json_equal('', 'entryline/@1/costaccounting', '[1] AAA 2015')
self.assert_json_equal('', 'entryline/@1/entry_account', '[701200] 701200')
self.assert_json_equal('', 'entryline/@2/costaccounting', None)
self.assert_json_equal('', 'entryline/@2/entry_account', '[410100 Dalton William]')
self.assert_json_equal('', 'entryline/@3/costaccounting', '[1] AAA 2015')
self.assert_json_equal('', 'entryline/@3/entry_account', '[701200] 701200')
self.assert_json_equal('', 'entryline/@4/costaccounting', None)
self.assert_json_equal('', 'entryline/@4/entry_account', '[410100 Dalton Joe]')
self.assert_json_equal('', 'entryline/@5/costaccounting', '[1] AAA 2015')
self.assert_json_equal('', 'entryline/@5/entry_account', '[701200] 701200')
self.assert_json_equal('LABELFORM', 'result', [100.00, 0.00, 100.00, 0.00, 0.00])
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '3', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 6)
self.factory.xfer = PayoffAddModify()
self.calljson('/diacamma.payoff/payoffAddModify', {'SAVE': 'YES', 'supporting': 4, 'amount': '100.0', 'payer': "Minimum", 'date': '2015-06-12', 'mode': 2, 'reference': 'abc', 'bank_account': 1}, False)
self.assert_observer('core.acknowledge', 'diacamma.payoff', 'payoffAddModify')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 8)
self.assert_json_equal('', 'entryline/@6/entry_account', '[410100 Minimum]')
self.assert_json_equal('', 'entryline/@7/entry_account', '[550000] 550000')
self.assert_json_equal('LABELFORM', 'result', [100.00, 0.00, 100.00, 100.00, 0.00])
def test_valid_reserved(self):
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 0)
self.assert_json_equal('LABELFORM', 'result', [0.00, 0.00, 0.00, 0.00, 0.00])
self.factory.xfer = CallFundsAddModify()
self.calljson('/diacamma.condominium/callFundsAddModify', {'SAVE': 'YES', "date": '2015-06-10', "comment": 'abc 123'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, "type_call": 4, 'set': 1, 'price': '100.00', 'comment': 'set 1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallFundsShow()
self.calljson('/diacamma.condominium/callFundsShow', {'callfunds': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsShow')
self.assert_json_equal('', 'calldetail/@0/type_call_ex', 'réserve')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 0}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 1)
self.factory.xfer = CallFundsTransition()
self.calljson('/diacamma.condominium/callFundsTransition', {'CONFIRME': 'YES', 'callfunds': 1, 'TRANSITION': 'valid'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsTransition')
self.factory.xfer = CallFundsList()
self.calljson('/diacamma.condominium/callFundsList', {'status_filter': 1}, False)
self.assert_observer('core.custom', 'diacamma.condominium', 'callFundsList')
self.assert_count_equal('callfunds', 3)
self.assert_json_equal('', 'callfunds/@0/owner', "Minimum") # 100*45%
self.assert_json_equal('', 'callfunds/@0/total', 45.00)
self.assert_json_equal('', 'callfunds/@1/owner', "Dalton William") # 100*35%
self.assert_json_equal('', 'callfunds/@1/total', 35.00)
self.assert_json_equal('', 'callfunds/@2/owner', "Dalton Joe") # 100*20%
self.assert_json_equal('', 'callfunds/@2/total', 20.00)
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 6)
self.assert_json_equal('', 'entryline/@0/costaccounting', None)
self.assert_json_equal('', 'entryline/@0/entry_account', '[410000 Minimum]')
self.assert_json_equal('', 'entryline/@1/costaccounting', '[1] AAA 2015')
self.assert_json_equal('', 'entryline/@1/entry_account', '[700000] 700000')
self.assert_json_equal('', 'entryline/@2/costaccounting', None)
self.assert_json_equal('', 'entryline/@2/entry_account', '[410000 Dalton William]')
self.assert_json_equal('', 'entryline/@3/costaccounting', '[1] AAA 2015')
self.assert_json_equal('', 'entryline/@3/entry_account', '[700000] 700000')
self.assert_json_equal('', 'entryline/@4/costaccounting', None)
self.assert_json_equal('', 'entryline/@4/entry_account', '[410000 Dalton Joe]')
self.assert_json_equal('', 'entryline/@5/costaccounting', '[1] AAA 2015')
self.assert_json_equal('', 'entryline/@5/entry_account', '[700000] 700000')
self.assert_json_equal('LABELFORM', 'result', [100.00, 0.00, 100.00, 0.00, 0.00])
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '3', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 6)
self.factory.xfer = PayoffAddModify()
self.calljson('/diacamma.payoff/payoffAddModify', {'SAVE': 'YES', 'supporting': 4, 'amount': '100.0', 'payer': "Minimum", 'date': '2015-06-12', 'mode': 2, 'reference': 'abc', 'bank_account': 1}, False)
self.assert_observer('core.acknowledge', 'diacamma.payoff', 'payoffAddModify')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 8)
self.assert_json_equal('', 'entryline/@6/entry_account', '[410000 Minimum]')
self.assert_json_equal('', 'entryline/@7/entry_account', '[550000] 550000')
self.assert_json_equal('LABELFORM', 'result', [100.00, 0.00, 100.00, 100.00, 0.00])
class CallFundsTestOldAccounting(LucteriosTest):
def setUp(self):
initial_thirds_fr()
old_accounting()
LucteriosTest.setUp(self)
default_compta_fr(with12=False)
default_costaccounting()
default_bankaccount_fr()
default_setowner_fr()
rmtree(get_user_dir(), True)
def test_valid_current(self):
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 0)
self.assert_json_equal('LABELFORM', 'result', [0.00, 0.00, 0.00, 0.00, 0.00])
self.factory.xfer = CallFundsAddModify()
self.calljson('/diacamma.condominium/callFundsAddModify', {'SAVE': 'YES', "date": '2015-06-10', "comment": 'abc 123'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, "type_call": 0, 'set': 1, 'price': '250.00', 'comment': 'set 1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, "type_call": 0, 'set': 2, 'price': '25.00', 'comment': 'set 2'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallFundsTransition()
self.calljson('/diacamma.condominium/callFundsTransition', {'CONFIRME': 'YES', 'callfunds': 1, 'TRANSITION': 'valid'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsTransition')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 0)
self.assert_json_equal('LABELFORM', 'result', [0.00, 0.00, 0.00, 0.00, 0.00])
self.factory.xfer = PayoffAddModify()
self.calljson('/diacamma.payoff/payoffAddModify', {'SAVE': 'YES', 'supporting': 4, 'amount': '100.0', 'payer': "Minimum", 'date': '2015-06-12', 'mode': 0, 'reference': 'abc', 'bank_account': 0}, False)
self.assert_observer('core.acknowledge', 'diacamma.payoff', 'payoffAddModify')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 2)
self.assert_json_equal('', 'entryline/@0/entry_account', '[450 Minimum]')
self.assert_json_equal('LABELFORM', 'result',
[0.00, 0.00, 0.00, 100.00, 0.00])
def test_valid_exceptional(self):
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 0)
self.assert_json_equal('LABELFORM', 'result', [0.00, 0.00, 0.00, 0.00, 0.00])
self.factory.xfer = CallFundsAddModify()
self.calljson('/diacamma.condominium/callFundsAddModify', {'SAVE': 'YES', "date": '2015-06-10', "comment": 'abc 123'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, "type_call": 1, 'set': 1, 'price': '250.00', 'comment': 'set 1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, "type_call": 1, 'set': 2, 'price': '25.00', 'comment': 'set 2'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallFundsTransition()
self.calljson('/diacamma.condominium/callFundsTransition', {'CONFIRME': 'YES', 'callfunds': 1, 'TRANSITION': 'valid'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsTransition')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 0)
self.assert_json_equal('LABELFORM', 'result', [0.00, 0.00, 0.00, 0.00, 0.00])
self.factory.xfer = PayoffAddModify()
self.calljson('/diacamma.payoff/payoffAddModify', {'SAVE': 'YES', 'supporting': 4, 'amount': '100.0', 'payer': "Minimum", 'date': '2015-06-12', 'mode': 0, 'reference': 'abc', 'bank_account': 0}, False)
self.assert_observer('core.acknowledge', 'diacamma.payoff', 'payoffAddModify')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 2)
self.assert_json_equal('', 'entryline/@0/entry_account', '[450 Minimum]')
self.assert_json_equal('LABELFORM', 'result',
[0.00, 0.00, 0.00, 100.00, 0.00])
def test_valid_advance(self):
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 0)
self.assert_json_equal('LABELFORM', 'result', [0.00, 0.00, 0.00, 0.00, 0.00])
self.factory.xfer = CallFundsAddModify()
self.calljson('/diacamma.condominium/callFundsAddModify', {'SAVE': 'YES', "date": '2015-06-10', "comment": 'abc 123'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, "type_call": 2, 'set': 1, 'price': '250.00', 'comment': 'set 1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallDetailAddModify()
self.calljson('/diacamma.condominium/callDetailAddModify', {'SAVE': 'YES', 'callfunds': 1, "type_call": 2, 'set': 2, 'price': '25.00', 'comment': 'set 2'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callDetailAddModify')
self.factory.xfer = CallFundsTransition()
self.calljson('/diacamma.condominium/callFundsTransition', {'CONFIRME': 'YES', 'callfunds': 1, 'TRANSITION': 'valid'}, False)
self.assert_observer('core.acknowledge', 'diacamma.condominium', 'callFundsTransition')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 0)
self.assert_json_equal('LABELFORM', 'result', [0.00, 0.00, 0.00, 0.00, 0.00])
self.factory.xfer = PayoffAddModify()
self.calljson('/diacamma.payoff/payoffAddModify', {'SAVE': 'YES', 'supporting': 4, 'amount': '100.0', 'payer': "Minimum", 'date': '2015-06-12', 'mode': 0, 'reference': 'abc', 'bank_account': 0}, False)
self.assert_observer('core.acknowledge', 'diacamma.payoff', 'payoffAddModify')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 2)
self.assert_json_equal('', 'entryline/@0/entry_account', '[450 Minimum]')
self.assert_json_equal('LABELFORM', 'result', [0.00, 0.00, 0.00, 100.00, 0.00])
|
Diacamma2/syndic
|
diacamma/condominium/tests_callfunds.py
|
Python
|
gpl-3.0
| 117,010
|
[
"Dalton"
] |
61ab34525f10c9102259e3f7f48956b915ba18f03882502a0d47d2ae541381f5
|
from __future__ import division
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.patches as patches
import matplotlib.path as path
import subprocess
plt.style.use('classic')
# Parameters for animation
aL=4
cH=0.25
lam=2
eta=2
aH=lam*aL
cL=eta*cH
p = 0.25
aBar = p*aH+(1-p)*aL
eMin = 0
eMax = 16
eStep=0.05
eRange = np.arange(eMin-eStep,eMax+eStep,eStep)
#Low type
eLstar = aL/2/cL
uLstar = aL*eLstar-cL*eLstar**2
coeffsL = [-cL,aH,-uLstar]
eLmax = np.max(np.roots(coeffsL))
costL = cL*eRange**2
uLowL = aL*eRange-costL
uHighL= aH*eRange-costL
yMinL,yMaxL = -10,33
##########################################
# Set up formatting for the movie files
Writer = animation.writers['ffmpeg']
writer = Writer(fps=6, metadata=dict(artist='Brian C Jenkins'), bitrate=1000)
fig = plt.figure(figsize=(16,9))
ax1 = fig.add_subplot(1, 1, 1)
ax1.grid()
plt.plot(eRange,uHighL,'--',lw=2,color='b')
plt.plot(eRange,uLowL,'--',lw=2,color='r')
line1, = ax1.plot([], [],'k', lw=3)
line2, = ax1.plot([], [],'k', lw=3)
line3, = ax1.plot([], [],'ok', lw=4)
ax1.set_xlim(eMin, eMax)
ax1.set_ylim(yMinL,yMaxL)
ax1.set_xlabel('Level of education ($e$)')
ax1.set_ylabel('\n\n')
ax1.legend(['$m_He - c_L e^2$','$m_L e - c_L e^2$','$u_L(e)$'],ncol=1,fontsize=20,loc='center left', bbox_to_anchor=(1, 0.5))
ax1.set_title('Type L Worker Utility ($u_L)$',fontsize=20,pad = 10)
# Initialize the shaded rectangle
left = 0
right = 0
bottom = yMinL
top = bottom - yMinL + yMaxL
nverts = 5
verts = np.zeros((nverts, 2))
codes = np.ones(nverts, int) * path.Path.LINETO
codes[0] = path.Path.MOVETO
codes[4] = path.Path.CLOSEPOLY
verts[0,0] = left
verts[0,1] = bottom
verts[1,0] = left
verts[1,1] = top
verts[2,0] = right
verts[2,1] = top
verts[3,0] = right
verts[3,1] = bottom
rectPath = path.Path(verts, codes)
patch = patches.PathPatch(rectPath, facecolor='green', edgecolor='green', alpha=0.5)
ax1.add_patch(patch)
fig.tight_layout()
##########################################
z,n=10000,0
def run(*args):
global z,n
e1 = 0.25
e = eRange[n]
util =[]
for m,x in enumerate(eRange):
if x<e:
util.append(uLowL[m])
else:
util.append(uHighL[m])
if e<e1:
ax1.set_xticks([eRange[n]])
ax1.set_yticks([0,aH*e - costL[n]])
xlabels = ['$\\bar{e}$'] #% np.round(eRange[n],1)
ylabels = ['','$u_L(\\bar{e})$'] #% np.round(eRange[n],1)
elif e1<=e<eLstar:
ax1.set_xticks([eRange[n]])
ax1.set_yticks([0,aH*e - costL[n]])
xlabels = ['$\\bar{e}$'] #% np.round(eRange[n],1)
ylabels = ['','$u_L(\\bar{e})$'] #% np.round(eRange[n],1)
elif eLstar<=e<eLmax:
ax1.set_xticks([eLstar,eRange[n]])
ax1.set_yticks([0,uLstar,aH*e - costL[n]])
xlabels = ['$e_L^*$','$\\bar{e}$'] #% np.round(eRange[n],1)
ylabels = ['','$u_L(e_L^*)$','$u_L(\\bar{e})$'] #% np.round(eRange[n],1)
else:
ax1.set_xticks([eLstar,eLmax,eRange[n]])
ax1.set_yticks([0,uLstar,aH*e - costL[n]])
xlabels = ['$e_L^*$','$\\bar{e}_{min}$','$\\bar{e}$'] #% np.round(eRange[n],1)
ylabels = ['','$u_L(e_L^*)$','$u_L(\\bar{e})$'] #% np.round(eRange[n],1)
left = eLmax
right = e
verts[0,0] = left
verts[1,0] = left
verts[2,0] = right
verts[3,0] = right
eRange1=eRange[0:n]
util1 = util[:n]
eRange2=eRange[n:]
util2 = util[n:]
eRange3=eRange[n]
util3=util[n]
# fig.savefig("%d.png"%z)
z+=1
if n==len(eRange)-1:
n=0
n = len(eRange)-1
else:
n+=1
ax1.set_xticklabels(xlabels,fontsize=20)
ax1.set_yticklabels(ylabels,fontsize=20)
line1.set_data(eRange1, util1)
line2.set_data(eRange2, util2)
line3.set_data(eRange3, util3)
return line1,line2,line3
ani = animation.FuncAnimation(fig, run, eRange, blit=False,repeat=True,interval=1)
ani.save('../video/signalingSeparatingLow.mp4',writer=writer)
plt.show()
# Convert the mp4 video to ogg format
makeOgg = 'ffmpeg -i ../video/signalingSeparatingLow.mp4 -c:v libtheora -c:a libvorbis -q:v 6 -q:a 5 ../video/signalingSeparatingLow.ogv'
subprocess.call(makeOgg,shell=True)
|
letsgoexploring/signalingAnimation
|
code/signalingAnimationSeparatingLowType.py
|
Python
|
mit
| 4,284
|
[
"Brian"
] |
47828f09b93a4b7627b5f6d6c558570a1c9babb124d0dbd2b54ea8d9fde66f63
|
"""Rewrite assertion AST to produce nice error messages"""
from __future__ import absolute_import, division, print_function
import ast
import _ast
import errno
import itertools
import imp
import marshal
import os
import re
import struct
import sys
import types
import py
from _pytest.assertion import util
# pytest caches rewritten pycs in __pycache__.
if hasattr(imp, "get_tag"):
PYTEST_TAG = imp.get_tag() + "-PYTEST"
else:
if hasattr(sys, "pypy_version_info"):
impl = "pypy"
elif sys.platform == "java":
impl = "jython"
else:
impl = "cpython"
ver = sys.version_info
PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1])
del ver, impl
PYC_EXT = ".py" + (__debug__ and "c" or "o")
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2)
ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
if sys.version_info >= (3, 5):
ast_Call = ast.Call
else:
def ast_Call(a, b, c):
return ast.Call(a, b, c, None, None)
class AssertionRewritingHook(object):
"""PEP302 Import hook which rewrites asserts."""
def __init__(self, config):
self.config = config
self.fnpats = config.getini("python_files")
self.session = None
self.modules = {}
self._rewritten_names = set()
self._register_with_pkg_resources()
self._must_rewrite = set()
def set_session(self, session):
self.session = session
def find_module(self, name, path=None):
state = self.config._assertstate
state.trace("find_module called for: %s" % name)
names = name.rsplit(".", 1)
lastname = names[-1]
pth = None
if path is not None:
# Starting with Python 3.3, path is a _NamespacePath(), which
# causes problems if not converted to list.
path = list(path)
if len(path) == 1:
pth = path[0]
if pth is None:
try:
fd, fn, desc = imp.find_module(lastname, path)
except ImportError:
return None
if fd is not None:
fd.close()
tp = desc[2]
if tp == imp.PY_COMPILED:
if hasattr(imp, "source_from_cache"):
try:
fn = imp.source_from_cache(fn)
except ValueError:
# Python 3 doesn't like orphaned but still-importable
# .pyc files.
fn = fn[:-1]
else:
fn = fn[:-1]
elif tp != imp.PY_SOURCE:
# Don't know what this is.
return None
else:
fn = os.path.join(pth, name.rpartition(".")[2] + ".py")
fn_pypath = py.path.local(fn)
if not self._should_rewrite(name, fn_pypath, state):
return None
self._rewritten_names.add(name)
# The requested module looks like a test file, so rewrite it. This is
# the most magical part of the process: load the source, rewrite the
# asserts, and load the rewritten source. We also cache the rewritten
# module code in a special pyc. We must be aware of the possibility of
# concurrent pytest processes rewriting and loading pycs. To avoid
# tricky race conditions, we maintain the following invariant: The
# cached pyc is always a complete, valid pyc. Operations on it must be
# atomic. POSIX's atomic rename comes in handy.
write = not sys.dont_write_bytecode
cache_dir = os.path.join(fn_pypath.dirname, "__pycache__")
if write:
try:
os.mkdir(cache_dir)
except OSError:
e = sys.exc_info()[1].errno
if e == errno.EEXIST:
# Either the __pycache__ directory already exists (the
# common case) or it's blocked by a non-dir node. In the
# latter case, we'll ignore it in _write_pyc.
pass
elif e in [errno.ENOENT, errno.ENOTDIR]:
# One of the path components was not a directory, likely
# because we're in a zip file.
write = False
elif e in [errno.EACCES, errno.EROFS, errno.EPERM]:
state.trace("read only directory: %r" % fn_pypath.dirname)
write = False
else:
raise
cache_name = fn_pypath.basename[:-3] + PYC_TAIL
pyc = os.path.join(cache_dir, cache_name)
# Notice that even if we're in a read-only directory, I'm going
# to check for a cached pyc. This may not be optimal...
co = _read_pyc(fn_pypath, pyc, state.trace)
if co is None:
state.trace("rewriting %r" % (fn,))
source_stat, co = _rewrite_test(self.config, fn_pypath)
if co is None:
# Probably a SyntaxError in the test.
return None
if write:
_make_rewritten_pyc(state, source_stat, pyc, co)
else:
state.trace("found cached rewritten pyc for %r" % (fn,))
self.modules[name] = co, pyc
return self
def _should_rewrite(self, name, fn_pypath, state):
# always rewrite conftest files
fn = str(fn_pypath)
if fn_pypath.basename == 'conftest.py':
state.trace("rewriting conftest file: %r" % (fn,))
return True
if self.session is not None:
if self.session.isinitpath(fn):
state.trace("matched test file (was specified on cmdline): %r" %
(fn,))
return True
# modules not passed explicitly on the command line are only
# rewritten if they match the naming convention for test files
for pat in self.fnpats:
if fn_pypath.fnmatch(pat):
state.trace("matched test file %r" % (fn,))
return True
for marked in self._must_rewrite:
if name.startswith(marked):
state.trace("matched marked file %r (from %r)" % (name, marked))
return True
return False
def mark_rewrite(self, *names):
"""Mark import names as needing to be re-written.
The named module or package as well as any nested modules will
be re-written on import.
"""
already_imported = set(names).intersection(set(sys.modules))
if already_imported:
for name in already_imported:
if name not in self._rewritten_names:
self._warn_already_imported(name)
self._must_rewrite.update(names)
def _warn_already_imported(self, name):
self.config.warn(
'P1',
'Module already imported so can not be re-written: %s' % name)
def load_module(self, name):
# If there is an existing module object named 'fullname' in
# sys.modules, the loader must use that existing module. (Otherwise,
# the reload() builtin will not work correctly.)
if name in sys.modules:
return sys.modules[name]
co, pyc = self.modules.pop(name)
# I wish I could just call imp.load_compiled here, but __file__ has to
# be set properly. In Python 3.2+, this all would be handled correctly
# by load_compiled.
mod = sys.modules[name] = imp.new_module(name)
try:
mod.__file__ = co.co_filename
# Normally, this attribute is 3.2+.
mod.__cached__ = pyc
mod.__loader__ = self
py.builtin.exec_(co, mod.__dict__)
except:
if name in sys.modules:
del sys.modules[name]
raise
return sys.modules[name]
def is_package(self, name):
try:
fd, fn, desc = imp.find_module(name)
except ImportError:
return False
if fd is not None:
fd.close()
tp = desc[2]
return tp == imp.PKG_DIRECTORY
@classmethod
def _register_with_pkg_resources(cls):
"""
Ensure package resources can be loaded from this loader. May be called
multiple times, as the operation is idempotent.
"""
try:
import pkg_resources
# access an attribute in case a deferred importer is present
pkg_resources.__name__
except ImportError:
return
# Since pytest tests are always located in the file system, the
# DefaultProvider is appropriate.
pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
def get_data(self, pathname):
"""Optional PEP302 get_data API.
"""
with open(pathname, 'rb') as f:
return f.read()
def _write_pyc(state, co, source_stat, pyc):
# Technically, we don't have to have the same pyc format as
# (C)Python, since these "pycs" should never be seen by builtin
# import. However, there's little reason deviate, and I hope
# sometime to be able to use imp.load_compiled to load them. (See
# the comment in load_module above.)
try:
fp = open(pyc, "wb")
except IOError:
err = sys.exc_info()[1].errno
state.trace("error writing pyc file at %s: errno=%s" % (pyc, err))
# we ignore any failure to write the cache file
# there are many reasons, permission-denied, __pycache__ being a
# file etc.
return False
try:
fp.write(imp.get_magic())
mtime = int(source_stat.mtime)
size = source_stat.size & 0xFFFFFFFF
fp.write(struct.pack("<ll", mtime, size))
marshal.dump(co, fp)
finally:
fp.close()
return True
RN = "\r\n".encode("utf-8")
N = "\n".encode("utf-8")
cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")
BOM_UTF8 = '\xef\xbb\xbf'
def _rewrite_test(config, fn):
"""Try to read and rewrite *fn* and return the code object."""
state = config._assertstate
try:
stat = fn.stat()
source = fn.read("rb")
except EnvironmentError:
return None, None
if ASCII_IS_DEFAULT_ENCODING:
# ASCII is the default encoding in Python 2. Without a coding
# declaration, Python 2 will complain about any bytes in the file
# outside the ASCII range. Sadly, this behavior does not extend to
# compile() or ast.parse(), which prefer to interpret the bytes as
# latin-1. (At least they properly handle explicit coding cookies.) To
# preserve this error behavior, we could force ast.parse() to use ASCII
# as the encoding by inserting a coding cookie. Unfortunately, that
# messes up line numbers. Thus, we have to check ourselves if anything
# is outside the ASCII range in the case no encoding is explicitly
# declared. For more context, see issue #269. Yay for Python 3 which
# gets this right.
end1 = source.find("\n")
end2 = source.find("\n", end1 + 1)
if (not source.startswith(BOM_UTF8) and
cookie_re.match(source[0:end1]) is None and
cookie_re.match(source[end1 + 1:end2]) is None):
if hasattr(state, "_indecode"):
# encodings imported us again, so don't rewrite.
return None, None
state._indecode = True
try:
try:
source.decode("ascii")
except UnicodeDecodeError:
# Let it fail in real import.
return None, None
finally:
del state._indecode
# On Python versions which are not 2.7 and less than or equal to 3.1, the
# parser expects *nix newlines.
if REWRITE_NEWLINES:
source = source.replace(RN, N) + N
try:
tree = ast.parse(source)
except SyntaxError:
# Let this pop up again in the real import.
state.trace("failed to parse: %r" % (fn,))
return None, None
rewrite_asserts(tree, fn, config)
try:
co = compile(tree, fn.strpath, "exec", dont_inherit=True)
except SyntaxError:
# It's possible that this error is from some bug in the
# assertion rewriting, but I don't know of a fast way to tell.
state.trace("failed to compile: %r" % (fn,))
return None, None
return stat, co
def _make_rewritten_pyc(state, source_stat, pyc, co):
"""Try to dump rewritten code to *pyc*."""
if sys.platform.startswith("win"):
# Windows grants exclusive access to open files and doesn't have atomic
# rename, so just write into the final file.
_write_pyc(state, co, source_stat, pyc)
else:
# When not on windows, assume rename is atomic. Dump the code object
# into a file specific to this process and atomically replace it.
proc_pyc = pyc + "." + str(os.getpid())
if _write_pyc(state, co, source_stat, proc_pyc):
os.rename(proc_pyc, pyc)
def _read_pyc(source, pyc, trace=lambda x: None):
"""Possibly read a pytest pyc containing rewritten code.
Return rewritten code if successful or None if not.
"""
try:
fp = open(pyc, "rb")
except IOError:
return None
with fp:
try:
mtime = int(source.mtime())
size = source.size()
data = fp.read(12)
except EnvironmentError as e:
trace('_read_pyc(%s): EnvironmentError %s' % (source, e))
return None
# Check for invalid or out of date pyc file.
if (len(data) != 12 or data[:4] != imp.get_magic() or
struct.unpack("<ll", data[4:]) != (mtime, size)):
trace('_read_pyc(%s): invalid or out of date pyc' % source)
return None
try:
co = marshal.load(fp)
except Exception as e:
trace('_read_pyc(%s): marshal.load error %s' % (source, e))
return None
if not isinstance(co, types.CodeType):
trace('_read_pyc(%s): not a code object' % source)
return None
return co
def rewrite_asserts(mod, module_path=None, config=None):
"""Rewrite the assert statements in mod."""
AssertionRewriter(module_path, config).run(mod)
def _saferepr(obj):
"""Get a safe repr of an object for assertion error messages.
The assertion formatting (util.format_explanation()) requires
newlines to be escaped since they are a special character for it.
Normally assertion.util.format_explanation() does this but for a
custom repr it is possible to contain one of the special escape
sequences, especially '\n{' and '\n}' are likely to be present in
JSON reprs.
"""
repr = py.io.saferepr(obj)
if py.builtin._istext(repr):
t = py.builtin.text
else:
t = py.builtin.bytes
return repr.replace(t("\n"), t("\\n"))
from _pytest.assertion.util import format_explanation as _format_explanation # noqa
def _format_assertmsg(obj):
"""Format the custom assertion message given.
For strings this simply replaces newlines with '\n~' so that
util.format_explanation() will preserve them instead of escaping
newlines. For other objects py.io.saferepr() is used first.
"""
# reprlib appears to have a bug which means that if a string
# contains a newline it gets escaped, however if an object has a
# .__repr__() which contains newlines it does not get escaped.
# However in either case we want to preserve the newline.
if py.builtin._istext(obj) or py.builtin._isbytes(obj):
s = obj
is_repr = False
else:
s = py.io.saferepr(obj)
is_repr = True
if py.builtin._istext(s):
t = py.builtin.text
else:
t = py.builtin.bytes
s = s.replace(t("\n"), t("\n~")).replace(t("%"), t("%%"))
if is_repr:
s = s.replace(t("\\n"), t("\n~"))
return s
def _should_repr_global_name(obj):
return not hasattr(obj, "__name__") and not py.builtin.callable(obj)
def _format_boolop(explanations, is_or):
explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")"
if py.builtin._istext(explanation):
t = py.builtin.text
else:
t = py.builtin.bytes
return explanation.replace(t('%'), t('%%'))
def _call_reprcompare(ops, results, expls, each_obj):
for i, res, expl in zip(range(len(ops)), results, expls):
try:
done = not res
except Exception:
done = True
if done:
break
if util._reprcompare is not None:
custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1])
if custom is not None:
return custom
return expl
unary_map = {
ast.Not: "not %s",
ast.Invert: "~%s",
ast.USub: "-%s",
ast.UAdd: "+%s"
}
binop_map = {
ast.BitOr: "|",
ast.BitXor: "^",
ast.BitAnd: "&",
ast.LShift: "<<",
ast.RShift: ">>",
ast.Add: "+",
ast.Sub: "-",
ast.Mult: "*",
ast.Div: "/",
ast.FloorDiv: "//",
ast.Mod: "%%", # escaped for string formatting
ast.Eq: "==",
ast.NotEq: "!=",
ast.Lt: "<",
ast.LtE: "<=",
ast.Gt: ">",
ast.GtE: ">=",
ast.Pow: "**",
ast.Is: "is",
ast.IsNot: "is not",
ast.In: "in",
ast.NotIn: "not in"
}
# Python 3.5+ compatibility
try:
binop_map[ast.MatMult] = "@"
except AttributeError:
pass
# Python 3.4+ compatibility
if hasattr(ast, "NameConstant"):
_NameConstant = ast.NameConstant
else:
def _NameConstant(c):
return ast.Name(str(c), ast.Load())
def set_location(node, lineno, col_offset):
"""Set node location information recursively."""
def _fix(node, lineno, col_offset):
if "lineno" in node._attributes:
node.lineno = lineno
if "col_offset" in node._attributes:
node.col_offset = col_offset
for child in ast.iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, lineno, col_offset)
return node
class AssertionRewriter(ast.NodeVisitor):
"""Assertion rewriting implementation.
The main entrypoint is to call .run() with an ast.Module instance,
this will then find all the assert statements and re-write them to
provide intermediate values and a detailed assertion error. See
http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html
for an overview of how this works.
The entry point here is .run() which will iterate over all the
statements in an ast.Module and for each ast.Assert statement it
finds call .visit() with it. Then .visit_Assert() takes over and
is responsible for creating new ast statements to replace the
original assert statement: it re-writes the test of an assertion
to provide intermediate values and replace it with an if statement
which raises an assertion error with a detailed explanation in
case the expression is false.
For this .visit_Assert() uses the visitor pattern to visit all the
AST nodes of the ast.Assert.test field, each visit call returning
an AST node and the corresponding explanation string. During this
state is kept in several instance attributes:
:statements: All the AST statements which will replace the assert
statement.
:variables: This is populated by .variable() with each variable
used by the statements so that they can all be set to None at
the end of the statements.
:variable_counter: Counter to create new unique variables needed
by statements. Variables are created using .variable() and
have the form of "@py_assert0".
:on_failure: The AST statements which will be executed if the
assertion test fails. This is the code which will construct
the failure message and raises the AssertionError.
:explanation_specifiers: A dict filled by .explanation_param()
with %-formatting placeholders and their corresponding
expressions to use in the building of an assertion message.
This is used by .pop_format_context() to build a message.
:stack: A stack of the explanation_specifiers dicts maintained by
.push_format_context() and .pop_format_context() which allows
to build another %-formatted string while already building one.
This state is reset on every new assert statement visited and used
by the other visitors.
"""
def __init__(self, module_path, config):
super(AssertionRewriter, self).__init__()
self.module_path = module_path
self.config = config
def run(self, mod):
"""Find all assert statements in *mod* and rewrite them."""
if not mod.body:
# Nothing to do.
return
# Insert some special imports at the top of the module but after any
# docstrings and __future__ imports.
aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
ast.alias("_pytest.assertion.rewrite", "@pytest_ar")]
expect_docstring = True
pos = 0
lineno = 0
for item in mod.body:
if (expect_docstring and isinstance(item, ast.Expr) and
isinstance(item.value, ast.Str)):
doc = item.value.s
if "PYTEST_DONT_REWRITE" in doc:
# The module has disabled assertion rewriting.
return
lineno += len(doc) - 1
expect_docstring = False
elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
item.module != "__future__"):
lineno = item.lineno
break
pos += 1
imports = [ast.Import([alias], lineno=lineno, col_offset=0)
for alias in aliases]
mod.body[pos:pos] = imports
# Collect asserts.
nodes = [mod]
while nodes:
node = nodes.pop()
for name, field in ast.iter_fields(node):
if isinstance(field, list):
new = []
for i, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
new.extend(self.visit(child))
else:
new.append(child)
if isinstance(child, ast.AST):
nodes.append(child)
setattr(node, name, new)
elif (isinstance(field, ast.AST) and
# Don't recurse into expressions as they can't contain
# asserts.
not isinstance(field, ast.expr)):
nodes.append(field)
def variable(self):
"""Get a new variable."""
# Use a character invalid in python identifiers to avoid clashing.
name = "@py_assert" + str(next(self.variable_counter))
self.variables.append(name)
return name
def assign(self, expr):
"""Give *expr* a name."""
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.Name(name, ast.Load())
def display(self, expr):
"""Call py.io.saferepr on the expression."""
return self.helper("saferepr", expr)
def helper(self, name, *args):
"""Call a helper in this module."""
py_name = ast.Name("@pytest_ar", ast.Load())
attr = ast.Attribute(py_name, "_" + name, ast.Load())
return ast_Call(attr, list(args), [])
def builtin(self, name):
"""Return the builtin called *name*."""
builtin_name = ast.Name("@py_builtins", ast.Load())
return ast.Attribute(builtin_name, name, ast.Load())
def explanation_param(self, expr):
"""Return a new named %-formatting placeholder for expr.
This creates a %-formatting placeholder for expr in the
current formatting context, e.g. ``%(py0)s``. The placeholder
and expr are placed in the current format context so that it
can be used on the next call to .pop_format_context().
"""
specifier = "py" + str(next(self.variable_counter))
self.explanation_specifiers[specifier] = expr
return "%(" + specifier + ")s"
def push_format_context(self):
"""Create a new formatting context.
The format context is used for when an explanation wants to
have a variable value formatted in the assertion message. In
this case the value required can be added using
.explanation_param(). Finally .pop_format_context() is used
to format a string of %-formatted values as added by
.explanation_param().
"""
self.explanation_specifiers = {}
self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr):
"""Format the %-formatted string with current format context.
The expl_expr should be an ast.Str instance constructed from
the %-placeholders created by .explanation_param(). This will
add the required code to format said string to .on_failure and
return the ast.Name instance of the formatted string.
"""
current = self.stack.pop()
if self.stack:
self.explanation_specifiers = self.stack[-1]
keys = [ast.Str(key) for key in current.keys()]
format_dict = ast.Dict(keys, list(current.values()))
form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
name = "@py_format" + str(next(self.variable_counter))
self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
return ast.Name(name, ast.Load())
def generic_visit(self, node):
"""Handle expressions we don't have custom code for."""
assert isinstance(node, ast.expr)
res = self.assign(node)
return res, self.explanation_param(self.display(res))
def visit_Assert(self, assert_):
"""Return the AST statements to replace the ast.Assert instance.
This re-writes the test of an assertion to provide
intermediate values and replace it with an if statement which
raises an assertion error with a detailed explanation in case
the expression is false.
"""
if isinstance(assert_.test, ast.Tuple) and self.config is not None:
fslocation = (self.module_path, assert_.lineno)
self.config.warn('R1', 'assertion is always true, perhaps '
'remove parentheses?', fslocation=fslocation)
self.statements = []
self.variables = []
self.variable_counter = itertools.count()
self.stack = []
self.on_failure = []
self.push_format_context()
# Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test)
# Create failure message.
body = self.on_failure
negation = ast.UnaryOp(ast.Not(), top_condition)
self.statements.append(ast.If(negation, body, []))
if assert_.msg:
assertmsg = self.helper('format_assertmsg', assert_.msg)
explanation = "\n>assert " + explanation
else:
assertmsg = ast.Str("")
explanation = "assert " + explanation
template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
msg = self.pop_format_context(template)
fmt = self.helper("format_explanation", msg)
err_name = ast.Name("AssertionError", ast.Load())
exc = ast_Call(err_name, [fmt], [])
if sys.version_info[0] >= 3:
raise_ = ast.Raise(exc, None)
else:
raise_ = ast.Raise(exc, None, None)
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
variables = [ast.Name(name, ast.Store())
for name in self.variables]
clear = ast.Assign(variables, _NameConstant(None))
self.statements.append(clear)
# Fix line numbers.
for stmt in self.statements:
set_location(stmt, assert_.lineno, assert_.col_offset)
return self.statements
def visit_Name(self, name):
# Display the repr of the name if it's a local variable or
# _should_repr_global_name() thinks it's acceptable.
locs = ast_Call(self.builtin("locals"), [], [])
inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
dorepr = self.helper("should_repr_global_name", name)
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
def visit_BoolOp(self, boolop):
res_var = self.variable()
expl_list = self.assign(ast.List([], ast.Load()))
app = ast.Attribute(expl_list, "append", ast.Load())
is_or = int(isinstance(boolop.op, ast.Or))
body = save = self.statements
fail_save = self.on_failure
levels = len(boolop.values) - 1
self.push_format_context()
# Process each operand, short-circuting if needed.
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
# cond is set in a prior loop iteration below
self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
expl_format = self.pop_format_context(ast.Str(expl))
call = ast_Call(app, [expl_format], [])
self.on_failure.append(ast.Expr(call))
if i < levels:
cond = res
if is_or:
cond = ast.UnaryOp(ast.Not(), cond)
inner = []
self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner
self.statements = save
self.on_failure = fail_save
expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
expl = self.pop_format_context(expl_template)
return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_res, operand_expl = self.visit(unary.operand)
res = self.assign(ast.UnaryOp(unary.op, operand_res))
return res, pattern % (operand_expl,)
def visit_BinOp(self, binop):
symbol = binop_map[binop.op.__class__]
left_expr, left_expl = self.visit(binop.left)
right_expr, right_expl = self.visit(binop.right)
explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
return res, explanation
def visit_Call_35(self, call):
"""
visit `ast.Call` nodes on Python3.5 and after
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
for arg in call.args:
res, expl = self.visit(arg)
arg_expls.append(expl)
new_args.append(res)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
if keyword.arg:
arg_expls.append(keyword.arg + "=" + expl)
else: # **args have `arg` keywords with an .arg of None
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
def visit_Starred(self, starred):
# From Python 3.5, a Starred node can appear in a function call
res, expl = self.visit(starred.value)
return starred, '*' + expl
def visit_Call_legacy(self, call):
"""
visit `ast.Call nodes on 3.4 and below`
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
new_star = new_kwarg = None
for arg in call.args:
res, expl = self.visit(arg)
new_args.append(res)
arg_expls.append(expl)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
arg_expls.append(keyword.arg + "=" + expl)
if call.starargs:
new_star, expl = self.visit(call.starargs)
arg_expls.append("*" + expl)
if call.kwargs:
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs,
new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
# ast.Call signature changed on 3.5,
# conditionally change which methods is named
# visit_Call depending on Python version
if sys.version_info >= (3, 5):
visit_Call = visit_Call_35
else:
visit_Call = visit_Call_legacy
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
value, value_expl = self.visit(attr.value)
res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
res_expl = self.explanation_param(self.display(res))
pat = "%s\n{%s = %s.%s\n}"
expl = pat % (res_expl, res_expl, value_expl, attr.attr)
return res, expl
def visit_Compare(self, comp):
self.push_format_context()
left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)):
left_expl = "({0})".format(left_expl)
res_variables = [self.variable() for i in range(len(comp.ops))]
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
expls = []
syms = []
results = [left_res]
for i, op, next_operand in it:
next_res, next_expl = self.visit(next_operand)
if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)):
next_expl = "({0})".format(next_expl)
results.append(next_res)
sym = binop_map[op.__class__]
syms.append(ast.Str(sym))
expl = "%s %s %s" % (left_expl, sym, next_expl)
expls.append(ast.Str(expl))
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
# Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper("call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
ast.Tuple(expls, ast.Load()),
ast.Tuple(results, ast.Load()))
if len(comp.ops) > 1:
res = ast.BoolOp(ast.And(), load_names)
else:
res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call))
|
hoehnp/navit_test
|
lib/python2.7/site-packages/_pytest/assertion/rewrite.py
|
Python
|
gpl-2.0
| 36,252
|
[
"VisIt"
] |
244194b301d6acae5259c25ca05493695b8e8cce328b77605f13c95d3ef10562
|
#!/usr/bin/env python
__author__ = 'Mike McCann,Duane Edgington,Reiko Michisaki'
__copyright__ = '2013'
__license__ = 'GPL v3'
__contact__ = 'mccann at mbari.org'
__doc__ = '''
Master loader for all Worden's CN13ID Western Flyer cruise in October 2013
CN13ID: CANON 2013 Interdisciplinary
Mike McCann
MBARI 23 October 2013
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
import os
import sys
import datetime # needed for glider data
import time # for startdate, enddate args
os.environ['DJANGO_SETTINGS_MODULE']='settings'
project_dir = os.path.dirname(__file__)
parentDir = os.path.join(os.path.dirname(__file__), "../")
sys.path.insert(0, parentDir) # So that CANON is found
from CANON import CANONLoader
# building input data sources object
cl = CANONLoader('stoqs_cn13id_oct2013', 'CN13ID - October 2013',
description = 'Warden cruise on Western Flyer into the California Current System off Monterey Bay',
x3dTerrains = {
'http://dods.mbari.org/terrain/x3d/Globe_1m_bath_10x/Globe_1m_bath_10x_scene.x3d': {
'position': '14051448.48336 -15407886.51486 6184041.22775',
'orientation': '0.83940 0.33030 0.43164 1.44880',
'centerOfRotation': '0 0 0',
'VerticalExaggeration': '10',
}
},
grdTerrain = os.path.join(parentDir, 'Globe_1m_bath.grd')
)
# Set start and end dates for all loads from sources that contain data
# beyond the temporal bounds of the campaign
startdate = datetime.datetime(2013, 10, 6) # Fixed start
enddate = datetime.datetime(2013, 10, 18) # Fixed end
# default location of thredds and dods data:
cl.tdsBase = 'http://odss.mbari.org/thredds/'
cl.dodsBase = cl.tdsBase + 'dodsC/'
#####################################################################
# DORADO
#####################################################################
# special location for dorado data
cl.dorado_base = 'http://dods.mbari.org/opendap/data/auvctd/surveys/2013/netcdf/'
cl.dorado_files = [
'Dorado389_2013_280_01_280_01_decim.nc',
'Dorado389_2013_282_00_282_00_decim.nc',
'Dorado389_2013_283_00_283_00_decim.nc',
'Dorado389_2013_287_01_287_01_decim.nc',
]
######################################################################
# GLIDERS
######################################################################
# SPRAY glider - for just the duration of the campaign
cl.l_662_base = 'http://www.cencoos.org/thredds/dodsC/gliders/Line66/'
cl.l_662_files = ['OS_Glider_L_662_20130711_TS.nc']
cl.l_662_parms = ['TEMP', 'PSAL', 'FLU2']
cl.l_662_startDatetime = startdate
cl.l_662_endDatetime = enddate
######################################################################
# WESTERN FLYER: October 6-17
######################################################################
# UCTD
cl.wfuctd_base = cl.dodsBase + 'CANON_october2013/Platforms/Ships/Western_Flyer/uctd/'
cl.wfuctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'wetstar' ]
cl.wfuctd_files = [
'CN13IDm01.nc', 'CN13IDm02.nc', 'CN13IDm03.nc', 'CN13IDm04.nc', 'CN13IDm05.nc', 'CN13IDm06.nc', 'CN13IDm07.nc', 'CN13IDm08.nc', 'CN13IDm09.nc', 'CN13IDm10.nc',
'CN13IDm11.nc', 'CN13IDm13.nc', 'CN13IDm13.nc', 'CN13IDm14.nc',
]
# PCTD
cl.pctdDir = 'CANON_october2013/Platforms/Ships/Western_Flyer/pctd/'
cl.wfpctd_base = cl.dodsBase + cl.pctdDir
cl.wfpctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'ecofl' , 'oxygen']
cl.wfpctd_files = [
'CN13IDc01.nc', 'CN13IDc02.nc', 'CN13IDc03.nc', 'CN13IDc04.nc', 'CN13IDc05.nc', 'CN13IDc06.nc', 'CN13IDc07.nc', 'CN13IDc08.nc', 'CN13IDc09.nc', 'CN13IDc10.nc',
'CN13IDc11.nc', 'CN13IDc12.nc', 'CN13IDc13.nc', 'CN13IDc14.nc', 'CN13IDc15.nc', 'CN13IDc16.nc', 'CN13IDc17.nc', 'CN13IDc18.nc', 'CN13IDc19.nc', 'CN13IDc20.nc',
'CN13IDc21.nc', 'CN13IDc22.nc', 'CN13IDc23.nc', 'CN13IDc24.nc', 'CN13IDc25.nc', 'CN13IDc26.nc', 'CN13IDc27.nc', 'CN13IDc28.nc', 'CN13IDc29.nc', 'CN13IDc30.nc',
'CN13IDc31.nc', 'CN13IDc32.nc', 'CN13IDc33.nc', 'CN13IDc34.nc', 'CN13IDc35.nc', 'CN13IDc36.nc', 'CN13IDc37.nc', 'CN13IDc38.nc', 'CN13IDc39.nc', 'CN13IDc40.nc',
'CN13IDc41.nc', 'CN13IDc42.nc', 'CN13IDc43.nc', 'CN13IDc44.nc', 'CN13IDc45.nc', 'CN13IDc46.nc', 'CN13IDc47.nc', 'CN13IDc48.nc', 'CN13IDc49.nc', 'CN13IDc50.nc',
##'CN13IDc51.nc', 'CN13IDc52.nc', 'CN13IDc53.nc', 'CN13IDc54.nc',
]
# BCTD
# SubSample data files from /mbari/BOG_Archive/ReportsForSTOQS/GOC12/ copied to local BOG_Data dir
cl.bctdDir = 'CANON_october2013/Platforms/Ships/Western_Flyer/bctd/'
cl.subsample_csv_base = cl.dodsBase + cl.bctdDir
cl.subsample_csv_base = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'BOG_Data')
cl.subsample_csv_files = [
#'STOQS_canon13_CHL_1U.csv', 'STOQS_canon13_CHL_5U.csv', 'STOQS_canon13_NH4.csv', 'STOQS_canon13_NO2.csv',
#'STOQS_canon13_NO3.csv','STOQS_canon13_OXY_ML.csv', 'STOQS_canon13_PHAEO_1U.csv', 'STOQS_canon13_PHAEO_5U.csv',
#'STOQS_canon13_PHAEO_GFF.csv', 'STOQS_canon13_PO4.csv', 'STOQS_canon13_SIO4.csv', #'STOQS_canon13_CARBON_GFF.csv
#'STOQS_canon13_CHL_GFF.csv',
]
######################################################################
# MOORINGS
######################################################################
# Mooring M1 Combined file produced by DPforSSDS processing - for just the duration of the campaign
cl.m1_base = 'http://dods.mbari.org/opendap/data/ssdsdata/deployments/m1/'
cl.m1_files = [
'201309/OS_M1_20130918hourly_CMSTV.nc'
]
cl.m1_parms = [ 'eastward_sea_water_velocity_HR', 'northward_sea_water_velocity_HR',
'SEA_WATER_SALINITY_HR', 'SEA_WATER_TEMPERATURE_HR', 'SW_FLUX_HR', 'AIR_TEMPERATURE_HR',
'EASTWARD_WIND_HR', 'NORTHWARD_WIND_HR', 'WIND_SPEED_HR'
]
cl.m1_startDatetime = startdate
cl.m1_endDatetime = enddate
# Mooring OA1 CTD
cl.oaDir = 'CANON_september2013/Platforms/Moorings/OA_1/'
cl.OA1ctd_base = cl.dodsBase + cl.oaDir
cl.OA1ctd_files = ['OA1_ctd_2013.nc']
cl.OA1ctd_parms = ['TEMP', 'PSAL', 'conductivity' ]
cl.OA1ctd_startDatetime = startdate
cl.OA1ctd_endDatetime = enddate
# Mooring OA1 MET
cl.OA1met_base = cl.dodsBase + cl.oaDir
cl.OA1met_files = ['OA1_met_2013.nc']
cl.OA1met_parms = ['Wind_direction','Wind_speed','Air_temperature','Barometric_pressure']
cl.OA1met_startDatetime = startdate
cl.OA1met_endDatetime = enddate
# Mooring OA1 PH
cl.OA1pH_base = cl.dodsBase + cl.oaDir
cl.OA1pH_files = ['OA1_pH_2013.nc']
cl.OA1pH_parms = ['pH' ]
cl.OA1pH_startDatetime = startdate
cl.OA1pH_endDatetime = enddate
# Mooring OA1 PCO2
cl.OA1pco2_base = cl.dodsBase + cl.oaDir
cl.OA1pco2_files = ['OA1_pco2_2013.nc']
cl.OA1pco2_parms = ['pCO2' ]
cl.OA1pco2_startDatetime = startdate
cl.OA1pco2_endDatetime = enddate
# Mooring OA1 O2
cl.OA1o2_base = cl.dodsBase + cl.oaDir
cl.OA1o2_files = ['OA1_o2_2013.nc']
cl.OA1o2_parms = ['oxygen', 'oxygen_saturation' ]
cl.OA1o2_startDatetime = startdate
cl.OA1o2_endDatetime = enddate
# Mooring OA1 Fluorescence
cl.OA1fl_base = cl.dodsBase + cl.oaDir
cl.OA1fl_files = ['OA1_fl_2013.nc']
cl.OA1fl_parms = [ 'fluor' ]
cl.OA1fl_startDatetime = startdate
cl.OA1fl_endDatetime = enddate
# Mooring OA2 CTD
cl.oaDir = 'CANON_september2013/Platforms/Moorings/OA_2/'
cl.OA2ctd_base = cl.dodsBase + cl.oaDir
cl.OA2ctd_files = ['OA2_ctd_2013.nc']
cl.OA2ctd_parms = ['TEMP', 'PSAL', 'conductivity' ]
cl.OA2ctd_startDatetime = startdate
cl.OA2ctd_endDatetime = enddate
# Mooring OA2 MET
cl.OA2met_base = cl.dodsBase + cl.oaDir
cl.OA2met_files = ['OA2_met_2013.nc']
cl.OA2met_parms = ['Wind_direction','Wind_speed','Air_temperature','Barometric_pressure']
cl.OA2met_startDatetime = startdate
cl.OA2met_endDatetime = enddate
# Mooring OA2 PH
cl.OA2pH_base = cl.dodsBase + cl.oaDir
cl.OA2pH_files = ['OA2_pH_2013.nc']
cl.OA2pH_parms = ['pH' ]
cl.OA2pH_startDatetime = startdate
cl.OA2pH_endDatetime = enddate
# Mooring OA2 PCO2
cl.OA2pco2_base = cl.dodsBase + cl.oaDir
cl.OA2pco2_files = ['OA2_pco2_2013.nc']
cl.OA2pco2_parms = ['pCO2' ]
cl.OA2pco2_startDatetime = startdate
cl.OA2pco2_endDatetime = enddate
# Mooring OA2 O2
cl.OA2o2_base = cl.dodsBase + cl.oaDir
cl.OA2o2_files = ['OA2_o2_2013.nc']
cl.OA2o2_parms = ['oxygen', 'oxygen_saturation' ]
cl.OA2o2_startDatetime = startdate
cl.OA2o2_endDatetime = enddate
# Mooring OA2 Fluorescence
cl.OA2fl_base = cl.dodsBase + cl.oaDir
cl.OA2fl_files = ['OA2_fl_2013.nc']
cl.OA2fl_parms = [ 'fluor' ]
cl.OA2fl_startDatetime = startdate
cl.OA2fl_endDatetime = enddate
######################################################################################################
# SubSample data files from /mbari/BOG_Archive/ReportsForSTOQS/CN13ID copied to local BOG_Data/CN13ID
######################################################################################################
cl.subsample_csv_base = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'BOG_Data/CN13ID')
cl.subsample_csv_files = [
'STOQS_CN13ID_CARBON_GFF.csv', 'STOQS_CN13ID_CHL_1U.csv', 'STOQS_CN13ID_CHL_5U.csv', 'STOQS_CN13ID_CHLA.csv',
'STOQS_CN13ID_CHL_GFF.csv', 'STOQS_CN13ID_PHAEO_1U.csv', 'STOQS_CN13ID_PHAEO_5U.csv', 'STOQS_CN13ID_PHAEO_GFF.csv',
]
###################################################################################################################
# Execute the load
cl.process_command_line()
if cl.args.test:
cl.loadWFuctd(stride=100)
cl.loadWFpctd(stride=50)
cl.loadL_662(stride=10)
cl.loadDorado(stride=1000)
cl.loadM1(stride=10)
cl.loadOA1ctd(stride=10)
cl.loadOA1met(stride=10)
cl.loadOA1pH(stride=10)
cl.loadOA1pco2(stride=10)
cl.loadOA1fl(stride=10)
cl.loadOA1o2(stride=10)
cl.loadOA2ctd(stride=10)
cl.loadOA2met(stride=10)
cl.loadOA2pH(stride=10)
cl.loadOA2pco2(stride=10)
cl.loadOA2fl(stride=10)
cl.loadOA2o2(stride=10)
cl.loadSubSamples()
elif cl.args.optimal_stride:
cl.loadWFuctd(stride=1)
cl.loadWFpctd(stride=1)
cl.loadL_662(stride=1)
cl.loadDorado(stride=1)
cl.loadM1(stride=1)
cl.loadOA1ctd(stride=1)
cl.loadOA1met(stride=1)
cl.loadOA1pH(stride=1)
cl.loadOA1pco2(stride=1)
cl.loadOA1fl(stride=1)
cl.loadOA1o2(stride=1)
cl.loadOA2ctd(stride=1)
cl.loadOA2met(stride=1)
cl.loadOA2pH(stride=1)
cl.loadOA2pco2(stride=1)
cl.loadOA2fl(stride=1)
cl.loadOA2o2(stride=1)
cl.loadSubSamples()
else:
cl.stride = cl.args.stride
cl.loadWFuctd()
cl.loadWFpctd()
cl.loadL_662()
cl.loadDorado()
cl.loadM1()
cl.loadOA1ctd()
cl.loadOA1met()
cl.loadOA1pH()
cl.loadOA1pco2()
cl.loadOA1fl()
cl.loadOA1o2()
cl.loadOA2ctd()
cl.loadOA2met()
cl.loadOA2pH()
cl.loadOA2pco2()
cl.loadOA2fl()
cl.loadOA2o2()
cl.loadSubSamples()
# Add any X3D Terrain information specified in the constructor to the database - must be done after a load is executed
cl.addTerrainResources()
print "All Done."
|
google-code-export/stoqs
|
loaders/CANON/loadCN13ID_october2013.py
|
Python
|
gpl-3.0
| 11,369
|
[
"NetCDF"
] |
a34c76950f8dd3b14218f32914f71027eeb1ab87f3078c75a6780f95d73f9532
|
from math import *
from random import *
import pickle
import pygame
import inspect
__author__ = 'Max Chiang'
from ChiangObjectives import *
import time
import sys
path = sys.path[0]
if not path: path = sys.path[1]
def STAB(PokeType,PokeType2,AttType):
if PokeType==AttType or PokeType2==AttType:
return 1.5
else:
return 1
def BonusCalc(typ,oppType,oppType2):
bonus=1
for i in range(0,len(eval(typ+"Bonus")[typ])):
if oppType==(eval(typ+"Bonus"))[typ][i][0]:
print(oppType)
print((eval(typ+"Bonus"))[typ][i][1],"a")
bonus*=(eval(typ+"Bonus"))[typ][i][1]
print(bonus)
for i in range(0,len(eval(typ+"Bonus")[typ])):
if oppType2!=oppType:
if oppType2==(eval(typ+"Bonus"))[typ][i][0]:
bonus*=(eval(typ+"Bonus"))[typ][i][1]
print(bonus)
effect = "was fine. "
if bonus==0:
effect="had no effect."
if bonus==1/2:
effect="was not very effective."
if bonus==1:
effect="was fine. "
if bonus==2:
effect="was super effective!"
if bonus==4:
effect="was ultra effective!!!!"
return [bonus,"The attack "+effect]
FirBonus={"Fir":[["Fir",1/2],["Wat",1/2],["Gra",2],["Ice",2],["Bug",2],["Roc",1/2],["Dra",1/2],["Ste",2]]}
NorBonus={"Nor":[["Roc",1/2],["Gho",0],["Ste",1/2]]}
WatBonus={"Wat":[["Fir",2],["Wat",1/2],["Gra",1/2],["Gro",2],["Roc",2],["Dra",1/2]]}
EleBonus={"Ele":[["Wat",2],["Ele",1/2],["Gra",1/2],["Gro",0],["Fly",2],["Dra",1/2]]}
GraBonus={"Gra":[["Fir",1/2],["Wat",2],["Gra",1/2],["Poi",1/2],["Gro",2],["Fly",1/2],["Bug",1/2],["Roc",2],["Dra",1/2],["Ste",1/2]]}
IceBonus={"Ice":[["Fir",1/2],["Wat",1/2],["Grass",2],["Ice",1/2],["Gro",2],["Fly",2],["Dra",2],["Ste",1/2]]}
FigBonus={"Fig":[["Nor",2],["Ice",2],["Poi",1/2],["Fly",1/2],["Roc",1/2],["Gho",0],["Dar",2],["Ste",2],["Fai",1/2]]}
PoiBonus={"Poi":[["Gra",2],["Poi",1/2],["Gro",1/2],["Roc",1/2],["Gho",1/2],["Ste",0],["Fai",2]]}
GroBonus={"Gro":[["Fir",2],["Ele",2],["Gra",1/2],["Poi",2],["Fly",0],["Bug",1/2],["Roc",2],["Ste",2]]}
FlyBonus={"Fly":[["Ele",1/2],["Gra",2],["Fig",2],["Bug",2],["Roc",1/2],["Ste",1/2]]}
PsyBonus={"Psy":[["Fig",2],["Poi",2],["Psy",1/2],["Dar",0],["Ste",1/2]]}
BugBonus={"Bug":[["Fir",1/2],["Gra",2],["Fig",1/2],["Poi",1/2],["Fly",1/2],["Psy",2],["Gho",1/2],["Dar",2],["Ste",1/2],["Fai",1/2]]}
RocBonus={"Roc":[["Fir",2],["Ice",2],["Fig",1/2],["Fly",2],["Gro",1/2],["Bug",2],["Ste",1/2]]}
GhoBonus={"Gho":[["Nor",0],["Psy",2],["Gho",2],["Dar",2],["Ste",1/2]]}
DraBonus={"Dra":[["Dra",2],["Ste",1/2],["Fai",0]]}
DarBonus={"Dar":[["Fig",1/2],["Psy",2],["Gho",2],["Dar",1/2],["Fai",1/2]]}
SteBonus={"Ste":[["Fir",1/2],["Wat",1/2],["Ele",1/2],["Ice",2],["Roc",2],["Ste",1/2],["Fai",2]]}
FaiBonus={"Fai":[["Fir",1/2],["Fig",1/2],["Poi",1/2],["Dra",2],["Dar",2],["Ste",1/2]]}
def PokeName(code):
return PokeStat[code][0]
def PokeType(code):
if len(PokeStat[code][1])==1:
return PokeStat[code][1]
if len(PokeStat[code][1])==2:
return PokeStat[code][2][0],PokeStat[number][1][1]
def Pokelevel(name):
for i in range (len(UserPoke)):
# print(UserPoke[i][0][0])
if UserPoke[i][0][0]==name:
return trunc(UserPoke[i][2][0]/500)
def PokeAtkP(code):
return PokeStat[number][3][1]
def PokeDefP(code):
return PokeStat[number][3][1]
def PokeXPCalc(level):
xp=level*500
atkGain=level/2
defGain=level/2
return [xp,atkGain,defGain]
#Dictionary of 1st-gen Pokemon, with name, type, base stats, attacks
#and evolutionary stats.
PokeStat={1:[['Bulbasaur'],["Gra","Poi"],["A strange seed was planted on its back at birth. The plant sprouts and grows with this Pokémon."],[49,49],["Razor Leaf","Vine Whip","Tackle","Double Edge"],[45],["Ivysaur",16]],
2:[["Ivysaur"],["Gra","Poi"],["When the bulb on its back grows large, it appears to lose the ability to stand on its hind legs."],[62,63],["Vine Whip","Razor Leaf","Mega Drain","Poisonpowder"],[45],["Venasaur",32]],
3:[["Venusaur"],["Gra","Poi"],["The plant blooms when it is absorbing solar energy. It stays on the move to seek sunlight."],[82,83],["Razor Leaf","Giga Drain","Solar Beam","Double Edge"],[45],["Final Stage of Evolution"]],
4:[["Charmander"],["Fir"],["Obviously prefers hot places. When it rains, steam is said to spout from the tip of its tail."],[52,43],["Scratch","Ember","Fire Punch","Tackle"],[45],["Charmeleon",16]],
5:[["Charmeleon"],["Fir"],["When it swings its burning tail, it elevates the temperature to unbearably high levels."],[64,58],["Fire Fang","Flamethrower","Slash","Double Edge"],[45],["Charizard",36]],
6:[["Charizard"],["Fir","Fly"],["It spits fire that is hot enough to melt boulders. Known to cause forest fires unintentionally."],[84,78],["Fire Blast", "Wing Attack", "Flamethrower","Double Edge"],[45],["Final Stage of Evolution"]],
7:[["Squirtle"],["Wat"],["After birth, its back swells and hardens into a shell. Powerful sprays foam from its mouth."],[48,65],["Tackle", "Bubble", "Water Gun","Bite"],[45],["Wartortle",16]],
8:[["Wartortle"],["Wat"],["Often hides in water to stalk unwary prey. For swimming fast, it moves its ears to maintain balance."],[63,80],["Slam", "Water Gun", "Bubblebeam","Crunch"],[45],["Blastoise",36]],
9:[["Blastoise"],["Wat"],["A brutal Pokémon with pressurized water jets on its shell. They are used for high speed tackles."],[83,10],["Hydro Pump","Ice Beam","Crunch","Bubblebeam"],[45],["Final Stage of Evolution"]],
10:[["Caterpie"],["Bug"],["Its short feet are tipped with suction pads that enable it to tirelessly climb slopes and walls."],[30,35],["Tackle","String Shot","Splash","Fury Cutter"],[255],["Metapod",7]],
11:[["Metapod"],["Bug"],["This Pokémon is vulnerable to attack while its shell is soft, exposing its weak and tender body."],[25,50],["Tackle","String Shot","Harden","Splash"],[120],["Butterfree",10]],
12:[["Butterfree"],["Bug","Fly"],["In battle, it flaps its wings at high speed, releasing highly toxic dust into the air."],[45,50],["Psybeam","Gust","Confusion","Silver Wind"],[45],["Final Stage of Evolution"]],
13:[["Weedle"],["Bug","Poi"],["Often found in forests, eating leaves. It has a sharp venomous stinger on its head."],[35,30],["Tackle","String Shot","Splash","Poison Sting"],[255],["Kakuna",7]],
14:[["Kakuna"],["Bug","Poi"],["Almost incapable of moving, this Pokémon can only harden its shell to protect itself from predators."],[25,50],["Tackle","String Shot","Poison Sting","Harden"],[120],["Beedrill",10]],
15:[["Beedrill"],["Bug","Poi"],["Flies at high speed and attacks using its large venomous stingers on its forelegs and tail."],[90,40],["Poison Jab","Silver Wind","Fury Attack","Headbutt"],[45],["Final Stage of Evolution"]],
16:[["Pidgey"],["Fly","Nor"],["A common sight in forests and woods. It flaps its wings at ground level to kick up blinding sand."],[45,40],["Tackle","Gust","Quick Attack","Wing Attack"],[255],["Pidgeotto",18]],
17:[["Pidgeotto"],["Fly","Nor"],["Very protective of its sprawling territory, this Pokémon will fiercely peck at any intruder."],[60,55],["Wing Attack","Quick Attack","Peck","Secret Power"],[120],["Pidgeot",36]],
18:[["Pidgeot"],["Fly","Nor"],["When hunting, it skims the surface of water at high speed to pick off unwary prey such as Magikarp."],[80,75],["Drill Peck","Wing Attack","Secret Power","Hyper Beam"],[45],["Final Stage of Evolution"]],
19:[["Rattata"],["Nor"],["Bites anything when it attacks. Small and very quick, it is a common sight in many places."],[56,35],["Bite","Hyper Fang","Tackle","Quick Attack"],[255],["Raticate",20]],
20:[["Raticate"],["Nor"],["It uses its whiskers to maintain its balance and will slow down if they are cut off."],[81,60],["Super Fang","Hyper Fang","Secret Power","Quick Attack"],[127],["Final Stage of Evolution"]],
21:[["Spearow"],["Nor","Fly"],["Eats bugs in grassy areas. It has to flap its short wings at high speed to stay airborne."],[60,30],["Peck","Wing Attack","Quick Attack","Aerial Ace"],[255],["Fearow",20]],
22:[["Fearow"],["Nor","Fly"],["With its huge and magnificent wings, it can keep aloft without ever having to land for rest."],[90,65],["Drill Peck","Pluck","Quick Attack"],[90],["Final Stage of Evolution"]],
23:[["Ekans"],["Poi"],["Moves silently and stealthily. Eats the eggs of birds, such as Pidgey and Spearow, whole."],[60,44],["Wrap","Poison Sting","Bite","Acid"],[255],["Arbok",22]],
24:[["Arbok"],["Poi"],["It is rumored that the ferocious warning markings on its belly differ from area to area."],[85,69],["Poison Fang","Poison Sting","Wrap","Mud Bomb"],[90],["Final Stage of Evolution"]],
25:[["Pikachu"],["Ele"],["When several of these Pokémon gather, their electricity could build and cause lightning storms."],[55,40],["Thunder Shock","Shock Wave","Quick Attack","Slam"],[190],["Raichu","Thunder Stone"]],
26:[["Raichu"],["Ele"],["Its long tail serves as a ground to protect itself from its own high voltage power."],[90,55],["Thunderbolt","Shock Wave","Slam","Facade"],[75],["Final Stage of Evolution"]],
27:[["Sandshrew"],["Gro"],["Burrows deep underground in arid locations far from water. It only emerges to hunt for food."],[75,85],["Dig","Scratch","Mud Slap","Tackle"],[255],[90],["Sandslash",22]],
28:[["Sandslash"],["Gro"],["Curls up into a spiny ball when threatened. It can roll while curled up to attack or escape."],[100,110],["Dig","Slash","Facade","Quick Attack"],["Final Stage of Evolution"]],
29:[["NidoranF"],["Poi","Gro"],["Although small, its venomous barbs render this Pokémon dangerous. The female has smaller horns."],[47,52],["Poison Sting","Quick Attack","Double Kick","Fury Swipes"],[235],["Nidorina",16]],
30:[["Nidorina"],["Poi","Gro"],["The female's horn develops slowly. Prefers physical attacks such as clawing and biting."],[62,67],["Poison Fang","Poison Sting","Double Kick","Horn Attack"],[120],["Nidoqueen","Moon Stone"]],
31:[["Nidoqueen"],["Poi","Gro"],["Its hard scales provide strong protection. It uses its hefty bulk to execute powerful moves."],[92,87],["Double Kick","Earthquake","Poison Sting","Stomp"],[45],["Final Stage of Evolution","Moon Stone"]],
32:[["NidoranM"],["Poi","Gro"],["Stiffens its ears to sense danger. The larger its horns, the more powerful its secreted venom."],[57,40],["Poison Sting","Horn Attack","Tackle","Dig"],[235],["Nidorino",16]],
33:[["Nidorino"],["Poi","Gro"],["An aggressive Pokémon that is quick to attack. The horn on its head secretes a powerful venom"],[72,57],["Double Kick","Horn Attack","Dig","Take Down"],[120],["Nidoking","Moon Stone"]],
34:[["Nidoking"],["Poi","Gro"],["It uses its powerful tail in battle to smash, constrict, then break the prey's bones."],[102,77],["Earthquake","Megahorn","Horn Attack","Mega Kick"],[45],["Final Stage of Evolution"]],
35:[["Clefairy"],["Fai"],["Its magical and cute appeal has many admirers. It is rare and found only in certain areas."],[45,48],["Double Slap","Pound","Quick Attack","Sing"],[150],["Clefable","Moon Stone"]],
36:[["Clefable"],["Fai"],["A timid fairy Pokémon that is rarely seen. It will run and hide the moment it senses people."],[70,73],["Double Slap","Secret Power","Facade","Sing"],[45],["Final Stage of Evolution"]],
37:[["Vulpix"],["Fir"],["At the time of birth, it has just one tail. The tail splits from its tip as it grows older."],[41,40],["Ember","Confuse Ray","Quick Attack","Leer"],[190],["Ninetales","Fire Stone"]],
38:[["Ninetales"],["Fir"],["Very smart and very vengeful. Grabbing one of its many tails could result in a 1000-year curse."],[76,75],["Flamethrower","Confuse Ray","Screech","Fire Spin"],[75],["Final Stage of Evolution"]],
39:[["Jigglypuff"],["Nor","Fai"],["When its huge eyes light up, it sings a mysteriously soothing melody that lulls its enemies to sleep."],[45,20],["Sing","Pound","Double Slap","Secret Power"],[170],["Wigglytuff","Moon Stone"]],
40:[["Wigglytuff"],["Nor","Fai"],["The body is soft and rubbery. When angered, it will suck in air and inflate itself to an enormous size."],[70,45],["Sing","Double Slap","Facade","Tackle"],[50],["Final Stage of Evolution"]],
41:[["Zubat"],["Poi","Fly"],["Forms colonies in perpetually dark places. Uses ultrasonic waves to identify and approach targets."],[45,35],["Confuse Ray","Wing Attack","Leech Life","Tackle"],[255],["Golbat",22]],
42:[["Golbat"],["Poi","Fly"],["Once it strikes, it will not stop draining energy from the victim even if it gets too heavy to fly."],[80,70],["Confuse Ray","Air Cutter","Wing Attack","Poison Fang"],[90],["Final Stage of Evolution"]],
43:[["Oddish"],["Gra","Poi"],["During the day, it keeps its face buried in the ground. At night, it wanders around sowing its seeds."],[50,55],["Sleep Powder","Poisonpowder","Absorb","Pound"],[255],["Gloom",21]],
44:[["Gloom"],["Gra","Poi"],["The fluid that oozes from its mouth isn't drool. It's a nectar that is used to attract prey."],[65,70],["Sleep Powder","Mega Drain","Absorb","Slam"],[120],["Vineplume","Leaf Stone"]],
45:[["Vineplume"],["Gra","Poi"],["The larger its petals, the more toxic pollen it contains. Its big head is heavy and hard to hold up."],[80,85],["Giga Drain","Acid","Slam","Sleep Powder"],[45],["Final Stage of Evolution"]],
46:[["Paras"],["Gra","Bug"],["Burrows to suck tree roots. The mushrooms on its back grow by drawing nutrients from the bug host."],[70,55],["Scratch","Fury Cutter","Tackle","Leech Life"],[190],["Parasect",24]],
47:[["Parasect"],["Gra","Bug"],["A host-parasite pair in which the parasite mushroom has taken over the host bug. Prefers damp places."],[95,80],["Fury Cutter","Signal Beam","Cut","Slash"],[75],["Final Stage of Evolution"]],
48:[["Venonat"],["Poi","Bug"],["Lives in the shadows of tall trees where it eats bugs. It is attracted by light at night"],[55,50],["Poisonpowder","Tackle","Acid","Confusion"],[190],["Venomoth",31]],
49:[["Venomoth"],["Poi","Bug"],["The dust-like scales covering its wings are color coded to indicate the kinds of poison it has."],[65,60],["Wing Attack","Silver Wind","Acid","Signal Beam"],[75],["Final Stage of Evolution"]],
50:[["Diglett"],["Gro"],["Lives about one yard underground where it feeds on plant roots. It sometimes appears above ground."],[55,25],["Dig","Scratch","Leer","Quick Attack"],[255],["Dugtrio",26]],
51:[["Dugtrio"],["Gro"],["A team of Diglett triplets. It triggers huge earthquakes by burrowing 60 miles underground."],[80,50],["Dig","Slash","Slam","Earth Power"],[50],["Final Stage of Evolution"]],
52:[["Meowth"],["Nor"],["Adores circular objects. Wanders the street on a nightly basis to look for dropped loose change."],[45,35],["Scratch","Quick Attack","Growl","Pay Day"],[255],["Persian",28]],
53:[["Persian"],["Nor"],["Although its fur has many admirers, it is tough to raise as a pet because of its fickle meanness."],[70,60],["Slash","Secret Power","Pay Day","Screech"],[90],["Final Stage of Evolution"]],
54:[["Psyduck"],["Wat"],["While lulling its enemies with its vacant look, this wily Pokémon will use psychokinetic powers."],[52,48],["Confusion","Water Gun","Scratch","Confuse Ray"],["Golduck",33]],
55:[["Golduck"],["Wat"],["Often seen swimming elegantly by lakeshores. It is often mistaken for the Japanese monster Kappa."],[82,78],["Confusion","Bubblebeam","Slash","Brine"],[190],[75],["Final Stage of Evolution"]],
56:[["Mankey"],["Fig"],["Extremely quick to anger. It could be docile one moment then thrashing away the next instant."],[80,35],["Low Kick","Scratch","Thrash","Karate Chop"],[190],["Primeape",28]],
57:[["Primeape"],["Fig"],["Always furious and tenacious to boot. It will not abandon chasing its quarry until it is caught."],[105,60],["Cross Chop","Brick Break","Scratch","Leer"],[75],["Final Stage of Evolution"]],
58:[["Growlithe"],["Fir"],["Very protective of its territory. It will bark and bite to repel intruders from its space."],[70,45],["Ember","Quick Attack","Flame Wheel","Growl"],[190],["Arcanine","Fire Stone"]],
59:[["Arcanine"],["Fir"],["A Pokémon that has been admired since the past for its beauty. It runs agilely as if on wings."],[110,80],["Flame Wheel","Flamethrower","Extremespeed","Screech"],[75],["Final Stage of Evolution"]],
60:[["Poliwag"],["Wat"],["Its newly grown legs prevent it from walking well. It appears to prefer swimming over walking."],[50,40],["Bubble","Water Gun","Hypnosis","Pound"],[255],["Poliwhirl",25]],
61:[["Poliwhirl"],["Wat"],["It can live in or out of water. When out of water, it constantly sweats to keep its body slimy."],[65,65],["Bubblebeam","Water Gun","Hypnosis","Slam"],[120],["Poliwrath","Water Stone"]],
62:[["Poliwrath"],["Wat","Fig"],["A graradept at both the front crawl and breaststroke. Easily overtakes the best human swimmers."],[95,95],["Surf","Mega Punch","Water Gun","Hypnosis"],[45],["Final Stage of Evolution"]],
63:[["Abra"],["Psy"],["Using its ability to read minds, it will sense impending danger and teleport to safety."],[20,15],["Confusion","Teleport","Scratch","Psywave"],[200],["Kadabra",16]],
64:[["Kadabra"],["Psy"],["It emits special alpha waves from its body that induce headaches just by being close by."],[35,30],["Confusion","Psybeam","Psywave","Thunder Shock"],[100],["Alakazam",32]],
65:[["Alakazam"],["Psy"],["Its brain can outperform a super-computer. Its intelligence quotient is said to be 5,000."],[50,45],["Psychic","Psybeam","Thunderbolt","Calm Mind"],[50],["Final Stage of Evolution"]],
66:[["Machop"],["Fig"],["oves to build its muscles. It trains in all styles of martial arts to become even stronger."],[80,50],["Karate Chop","Low Kick","Comet Punch","Body Slam"],[180],["Machoke",28]],
67:[["Machoke"],["Fig"],["Its muscular body is so powerful, it must wear a power-save belt to be able to regulate its motions."],[100,70],["Karate Chop","Body Slam","Mach Punch","Fire Punch"],[90],["Machamp",40]],
68:[["Machamp"],["Fig"],["Using its heavy muscles, it throws powerful punches that can send the victim clear over the horizon."],[130,80],["Cross Chop","Seismic Toss","Fire Punch","Thunder Punch"],[45],["Final Stage of Evolution"]],
69:[["Bellsprout"],["Gra","Poi"],["A carnivorous Pokémon that traps and eats bugs. It appears to use its root feet to replenish moisture."],[75,35],["Vine Whip","Absorb","Wrap","Bind"],[255],["Weepinbell",21]],
70:[["Weepinbell"],["Gra","Poi"],["It spits out poisonpowder to immobilize the enemy and then finishes it with a spray of acid."],[90,50],["Vine Whip","Razor Leaf","Sleep Powder","Acid"],[120],["Victreebel","Leaf Stone"]],
71:[["Victreebel"],["Gra","Poi"],["Said to live in huge colonies deep in jungles, although no one has ever returned from there."],[105,65],["Razor Leaf","Acid","Sludge","Solar Beam"],[45],["Final Stage of Evolution"]],
72:[["Tentacool"],["Wat","Poi"],["Drifts in shallow seas. Anglers who hook them by accident are often punished by its stinging acid."],[40,35],["Acid","Water Gun","Constrict","Wrap"],[190],["Tentacruel",30]],
73:[["Tentacruel"],["Wat","Poi"],["The tentacles are normally kept short. On hunts, they are extended to ensnare and immobilize prey."],[70,65],["Acid","Bubblebeam","Sludge","Constrict"],[60],["Final Stage of Evolution"]],
74:[["Geodude"],["Roc","Gro"],["Found in fields and mountains. Mistaking them for boulders, people often step or trip on them."],[80,100],["Rock Throw","Magnitude","Tackle","Rollout"],[255],["Graveler",25]],
75:[["Graveler"],["Roc","Gro"],["Rolls down slopes to move. It rolls over any obstacle without slowing or changing its direction."],[95,115],["Rock Throw","Magnitude","Slam","Harden"],[120],["Golem",35]],
76:[["Golem"],["Roc","Gro"],["Its boulder-like body is extremely hard. It can easily withstand dynamite blasts without taking damage."],[120,130],["Earth Power","Rock Slide","Dig","Rollout"],[45],["Final Stage of Evolution"]],
77:[["Ponyta"],["Fir"],["Its hooves are 10 times harder than diamonds. It can trample anything completely flat in little time."],[85,55],["Ember","Fire Spin","Stomp","Quick Attack"],[190],["Rapidash",40]],
78:[["Rapidash"],["Fir"],["Very competitive, this Pokémon will chase anything that moves fast in the hopes of racing it."],[100,70],["Stomp","Fire Spin","Flame Wheel","Extremespeed"],[60],["Final Stage of Evolution"]],
79:[["Slowpoke"],["Wat","Psy"],["Incredibly slow and dopey. It takes 5 seconds for it to feel pain when under attack."],[65,65],["Confusion","Water Gun","Tail Whip","Bubble"],[190],["Slowbro",37]],
80:[["Slowbro"],["Wat","Psy"],["The Shellder that latches onto Slowpoke's tail is said to feed on the host's leftover scraps."],[75,110],["Confusion","Water Gun","Bubble","Confuse Ray"],[75],["Final Stage of Evolution"]],
81:[["Magnemite"],["Ele","Ste"],["Uses antigravity to stay suspended. Appears without warning and uses Thunder Wave and similar moves."],[35,70],["Thunder Shock","Tackle","Thunder Wave","Sonicboom"],[190],["Magneton",30]],
82:[["Magneton"],["Ele","Ste"],["Formed by several Magnemite linked together. They frequently appear when sunspots flare up."],[60,95],["Thunderbolt","Sonicboom","Mirror Shot","Magnet Bomb"],[60],["Final Stage of Evolution"]],
83:[["Farfetch'd"],["Fly","Nor"],["The spring of green onions it holds is its weapon. It is used much like a metal sword."],[65,55],["Wing Attack","Quick Attack","Cut","False Swipe"],[45],["Final Stage of Evolution"]],
84:[["Doduo"],["Fly","Nor"],["A bird that makes up for its poor flying with its fast foot speed. Leaves giant footprints."],[110,70],["Peck","Fury Attack","Stomp","Quick Attack"],[190],["Dodrio",31]],
85:[["Dodrio"],["Fly","Nor"],["It uses three brains to execute complex plans. While two heads sleep, one head is said to stay awake."],[110,70],['Drill Peck','Peck','Extremespeed','Stomp'],[45],["Final Stage of Evolution"]],
86:[["Seel"],["Wat","Ice"],["The protruding horn on its head is very hard. It is used for bashing through thick ice."],[45,55],["Aurora Beam",'Water Gun','Body Slam','Powder Snow'],[190],["Dewgong",34]],
87:[["Dewgong"],["Wat","Ice"],["Stores thermal energy in its body. Swims at a steady 8 knots even in intensely cold waters."],[70,80],["Surf","Ice Beam","Body Slam","Aqua Tail"],[75],["Final Stage of Evolution"]],
88:[["Grimer"],["Poi"],["Appears in filthy areas. Thrives by sucking up polluted sludge that is pumped out of factories."],[80,50],["Tackle","Poison Gas","Mud Slap","Sludge"],[190],["Muk",38]],
89:[["Muk"],["Poi"],["Thickly covered with a filthy, vile sludge. It is so toxic, even its footprints contain poison."],[105,75],["Sludge","Sludge Bomb","Pound","Toxic"],[75],["Final Stage of Evolution"]],
90:[["Shellder"],["Wat","Ice"],["Its hard shell repels any kind of attack. It is vulnerable only when its shell is open."],[65,100],["Aurora Beam","Water Gun","Splash","Tackle"],[190],["Cloyster","Water Stone"]],
91:[["Cloyster"],["Wat","Ice"],["When attacked, it launches its horns in quick volleys. Its innards have never been seen."],[95,180],["Water Gun","Aurora Beam","Ice Beam","Spike Cannon"],[60],["Final Stage of Evolution"]],
92:[["Gastly"],["Gho","Poi"],["Almost invisible, this gaseous Pokémon cloaks the target and puts it to sleep without notice."],[35,30],["Lick","Hypnosis","Night Shade","Confuse Ray"],[190],["Haunter",25]],
93:[["Haunter"],["Gho","Poi"],["Because of its ability to slip through block walls, it is said to be from another dimension."],[50,45],["Hypnosis","Poison Gas","Shadow Ball","Sucker Punch"],[90],["Gengar",40]],
94:[["Gengar"],["Gho","Poi"],["Under a full moon, this Pokémon likes to mimic the shadows of people and laugh at their fright."],[65,60],["Hypnosis","Sludge Bomb","Shadow Ball","Sucker Punch"],[45],["Final Stage of Evolution"]],
95:[["Onix"],["Roc","Gro"],["As it grows, the stone portions of its body harden to become similar to a diamond, but colored black."],[45,160],["Bind","Rock Throw","Slam","Rock Tomb"],[45],["Final Stage of Evolution"]],
96:[["Drowzee"],["Psy"],["Puts enemies to sleep then eats their dreams. Occasionally gets sick from eating bad dreams."],[48,45],["Confusion","Hypnosis","Pound","Body Slam"],[190],["Hypno", 26]],
97:[["Hypno"],["Psy"],["When it locks eyes with an enemy, it will use a mix of PSI moves such as Hypnosis and Confusion."],[73,70],["Psychic","Headbutt","Zen Headbutt","Psybeam"],[75],["Final Stage of Evolution"]],
98:[["Krabby"],["Wat"],["Its pincers are not only powerful weapons, they are used for balance when walking sideways."],[105,90],["Water Gun","Vicegrip","Cut","Bubble"],[225],["Kingler",28]],
99:[["Kingler"],["Wat"],["The large pincer has 10,000-horsepower crushing force. However, its huge size makes it unwieldy to use."],[130,115],["Bubblebeam","Crabhammer","Vicegrip","Metal Claw"],[60],["Final Stage of Evolution"]],
100:[["Voltorb"],["Ele"],["Usually found in power plants. Easily mistaken for a Poké Ball, it has zapped many people."],[50,70],["Sonicboom","Explosion","Spark","Rollout"],[190],["Electrode",30]],
101:[["Electrode"],["Ele"],["It stores electric energy under very high pressure. It often explodes with little or no provocation."],[50,70],["Explosion","Spark","Thunderbolt","Charge Beam"],[60],["Final Stage of Evolution"]],
102:[["Exeggcute"],["Gra"],["It is often mistaken for eggs. When disturbed, they gather quickly and attack in swarms."],[40,80],["Uproar","Bullet Seed","Sleep Powder","Confusion"],[90],["Exeggutor","Leaf Stone"]],
103:[["Exeggutor"],["Gra"],["Legend has it that on rare occasions, one of its heads will drop off and continue on as an Exeggcute."],[95,85],["Hypnosis","Confusion","Psychic","Razor Leaf"],[45],["Final Stage of Evolution"]],
104:[["Cubone"],["Gro"],["Because it never removes its skull helmet, no one has ever seen this Pokémon's real face."],[50,95],["Headbutt","Bone Club","Bonemerang","False Swipe"],[190],["Marowak",28]],
105:[["Marowak"],["Gro"],["The bone it holds is its key weapon. It throws the bone skillfully like a boomerang to KO targets."],[80,110],["Bonemerang","Headbutt","Thrash","Dig"],[75],["Final Stage of Evolution"]],
106:[["Hitmonlee"],["Fig"],["When in a hurry, its legs lengthen progressively. It runs smoothly with extra long, loping strides."],[120,53],["Double Kick","Rolling Kick","Jump Kick","Brick Break"],[45],["Final Stage of Evolution"]],
107:[["Hitmonchan"],["Fig"],["While apparently doing nothing, it fires punches in lightning fast volleys that are impossible to see."],[105,79],["Ice Punch","Mach Punch","Thunder Punch","Fire Punch"],[45],["Final Stage of Evolution"]],
108:[["Lickitung"],["Nor"],["Its tongue can be extended like a chameleon's. It leaves a tingling sensation when it licks enemies."],[55,75],["Rollout","Lick","Secret Power","Wrap"],[45],["Final Stage of Evolution"]],
109:[["Koffing"],["Poi"],["Because it stores several kinds of toxic gases in its body, it is prone to exploding without warning."],[65,95],["Poison Gas","Smog","Sludge","Assurance"],[190],["Weezing",34]],
110:[["Weezing"],["Poi"],["Where two kinds of poison gases meet, two Koffings can fuse into a Weezing over many years."],[90,120],["Sludge","Poison Gas","Smog","Smokescreen"],[60],["Final Stage of Evolution"]],
111:[["Rhyhorn"],["Gro","Roc"],["Its massive bones are 1000 times harder than human bones. It can easily knock a trailer flying."],[85,90],["Horn Attack","Rock Throw","Take Down","Rock Blast"],[60],["Rhydon",42]],
112:[["Rhydon"],["Gro","Roc"],["Protected by an armor-like hide, it is capable of living in molten lava of 3,600 degrees."],[130,120],["Stone Edge","Megahorn","Rock Blast","Stomp"],[120],["Final Stage of Evolution"]],
113:[["Chansey"],["Nor"],["A rare and elusive Pokémon that is said to bring happiness to those who manage to get it."],[5,5],["Pound","Softboiled","Double Slap","Sing"],["Final Stage of Evolution"]],
114:[["Tangla"],["Gra"],["The whole body is swathed with wide vines that are similar to seaweed. The vines sway as it walks."],[55,115],["Ancient Power","Vine Whip","Absorb","Pound"],[45],["Final Stage of Evolution"]],
115:[["Kangaskhan"],["Nor"],["The infant rarely ventures out of its mother's protective pouch until it is 3 years old."],[95,80],["Comet Punch","Fake Out","Bite","Mega Punch"],[45],["Final Stage of Evolution"]],
116:[["Horsea"],["Wat"],["Known to shoot down flying bugs with precision blasts of ink from the surface of the water."],[40,70],["Water Gun","Bubble","Pound","Twister"],[225],["Horsea",32]],
117:[["Seadra"],["Wat"],["Capable of swimming backwards by rapidly flapping its wing-like pectoral fins and stout tail."],[65,95],["Bubblebeam","Dragon Rage","Leer","Water Gun"],[75],["Final Stage of Evolution"]],
118:[["Goldeen"],["Wat"],["Its tail fin billows like an elegant ballroom dress, giving it the nickname of the Water Queen."],[67,60],["Bubble","Pound","Horn Attack","Water Gun"],[225],["Seeking",33]],
119:[["Seaking"],["Wat"],["In the autumn spawning season, they can be seen swimming powerfully up rivers and creeks."],[92,65],["Drill Peck","Horn Attack","Surf","Bubble"],[85,90],["Final Stage of Evolution"]],
120:[["Staryu"],["Wat","Psy"],["An enigmatic Pokémon that can regenerate any appendage it loses in battle."],[45,55],["Water Gun","Rapid Spin","Bubble","Slam",],[225],["Starmie","Water Stone"]],
121:[["Starmie"],["Wat","Psy"],["Its central core glows with the seven colors of the rainbow. Some people value the core as a gem."],[75,85],["Rapid Spin","Surf","Aurora Beam","Confusion"],[60],["Final Stage of Evolution"]],
122:[["Mr.Mime"],["Fai","Psy"],["If interrupted while it is miming, it will slap around the offender with its broad hands."],[45,65],["Confusion","Pound","Double Slap","Psychic"],[45],["Final Stage of Evolution"]],
123:[["Scyther"],["Bug","Fly"],["With ninja-like agility and speed, it can create the illusion that there is more than one."],[45,65],["Slash","Fury Cutter","False Swipe","Wing Attack"],[45],["Final Stage of Evolution"]],
124:[["Jynx"],["Psy","Ice"],["It seductively wiggles its hips as it walks. It can cause people to dance in unison with it."],[50,35],["Confusion","Ice Punch","Lovely Kiss","Powder Snow"],[45],["Final Stage of Evolution"]],
125:[["Elecabuzz"],["Ele"],["Normally found near power plants, they can wander away and cause major blackouts in cities."],[83,57],["Thunder Punch","Quick Attack","Take Down","Shock Wave"],[45],["Final Stage of Evolution"]],
126:[["Magmar"],["Fir"],["Its body always burns with an orange glow that enables it to hide perfectly among flames."],[95,57],["Fire Punch","Smog","Ember","Mega Punch"],[45],["Final Stage of Evolution"]],
127:[["Pinsir"],["Bug"],["If it fails to crush the victim in its pincers, it will swing it around and toss it hard."],[120,100],["Vicegrip","Bind","Brick Break","Cut"],[45],["Final Stage of Evolution"]],
128:[["Tauros"],["Nor"],["When it targets an enemy, it charges furiously while whipping its body with its long tails."],[100,95],["Stomp","Take Down","Mud Shot","Horn Attack"],[45],["Final Stage of Evolution"]],
129:[["Magikarp"],["Wat"],["In the distant past, it was somewhat stronger than the horribly weak descendants that exist today."],[10,55],["Splash","Tackle","Splash","Splash"],[255],["Gyarados",20]],
130:[["Gyarados"],["Wat","Fly"],["Rarely seen in the wild. Huge and vicious, it is capable of destroying entire cities in a rage."],[155,109],["Waterfall","Thrash",'Aerial Ace',"Water Pulse"],[45],["Final Stage of Evolution"]],
131:[["Lapras"],["Wat","Ice"],["A Pokémon that has been overhunted almost to extinction. It can ferry people across the water."],[85,80],["Ice Beam","Surf","Water Gun","Confuse Ray"],["Final Stage of Evolution"]],
132:[["Ditto"],["Nor"],["Capable of copying an enemy's genetic code to instantly transform itself into a duplicate of the enemy."],[48,48],["Pound","Tackle","Slam","Secret Power"],["Final Stage of Evolution"]],
133:[["Eevee"],["Nor"],["Its genetic code is irregular. It may mutate if it is exposed to radiation from element stones."],[55,50],["Quick Attack","Secret Power","Iron Tail","Tackle"],[45],[["Vaporeon","Water Stone"],["Flareon","Fire Stone"],["Jolteon","Thunder Stone"]]],
134:[["Vaporeon"],["Wat"],["Lives close to water. Its long tail is ridged with a fin which is often mistaken for a mermaid's."],[65,60],["Aurora Beam","Water Gun","Aqua Tail","Bubblebeam"],[45],["Final Stage of Evolution"]],
135:[["Jolteon"],["Ele"],["It accumulates negative ions in the atmosphere to blast out 10000-volt lightning bolts."],[65,60],["Thunder Shock","Quick Attack","Shock Wave","Thunder Fang"],[45],["Final Stage of Evolution"]],
136:[["Flareon"],["Fir"],["When storing thermal energy in its body, its temperature could soar to over 1,600 degrees."],[130,60],["Ember","Quick Attack","Flame Wheel","Fire Spin"],[45],["Final Stage of Evolution"]],
137:[["Porygon"],["Nor"],["A Pokémon that consists entirely of programming code. Capable of moving freely in cyberspace."],[60,70],["Confusion","Signal Beam","Tackle","Rapid Spin"],[45],["Final Stage of Evolution"]],
138:[["Omanyte"],["Roc","Wat"],["Although long extinct, in rare cases, it can be genetically resurrected from fossils."],[40,100],["Rock Blast","Water Gun","Rock Throw","Bubble"],[45],["Omastar",40]],
139:[["Omastar"],["Roc","Wat"],["A prehistoric Pokémon that died out when its heavy shell made it impossible to catch prey."],[60,125],["Brine","Mud Shot","Rock Blast","Ancient Power"],[45],["Final Stage of Evolution"]],
140:[["Kabuto"],["Roc","Wat"],["A Pokémon that was resurrected from a fossil found in what was once the ocean floor eons ago."],[80,90],["Rock Blast","Water Gun","Scratch","Leech Life"],[45],["Kabutops",40]],
141:[["Kabutops"],["Roc","Wat"],["Its sleek shape is perfect for swimming. It slashes prey with its claws and drains the body fluids."],[115,105],["Slash","Rock Slide","Water Gun","False Swipe"],[45],["Final Stage of Evolution"]],
142:[["Aerodactyl"],["Roc","Fly"],["A ferocious, prehistoric Pokémon that goes for the enemy's throat with its serrated saw-like fangs."],[105,65],["Rock Slide","Air Cutter","Aerial Ace","Crunch"],[45],["Final Stage of Evolution"]],
143:[["Snorlax"],["Nor"],["Very lazy. Just eats and sleeps. As its rotund bulk builds, it becomes steadily more slothful."],[110,65],["Rest","Body Slam","Rollout","Crunch"],[25],["Final Stage of Evolution"]],
144:[["Articuno"],["Fly","Ice"],["A legendary bird Pokémon said to appear to doomed people who are lost in icy mountains."],[85,100],["Ice Beam","Sky Attack","Blizzard","Confuse Ray"],[3],["Final Stage of Evolution"]],
145:[["Zapdos"],["Fly","Ele"],["A legendary bird Pokémon that is said to appear from clouds while dropping enormous lightning bolts."],[90,85],["Zap Cannon","Thunderbolt","Drill Peck","Discharge"][3],["Final Stage of Evolution"]],
146:[["Moltres"],["Fly","Fir"],["Known as the legendary bird of fire. Every flap of its wings creates a giant dazzle of flashing flames."],[100,90],["Flamethrower","Sky Attack","Solar Beam","Heat Wave"],[3],["Final Stage of Evolution"]],
147:[["Dratini"],["Dra"],["Long considered a mythical Pokémon until recently, when a small colony was found living underwater."],[64,45],["Wrap","Thunder Wave","Twister","Dragon Rage"],[45],["Dragonair",30]],
148:[["Dragonair"],["Dra"],["A mystical Pokémon that exudes a gentle aura. Has the ability to change climate conditions."],[84,65],["Dragon Rage","Aqua Tail","Twister","Constrict"],[45],["Dragonite",55]],
149:[["Dragonite"],["Dra","Fly"],["An extremely rarely seen marine Pokémon. Its intelligence is said to match that of humans."],[134,95],["Dragon Rage","Dragon Claw","Hyper Beam","Aqua Tail"],[45],["Final Stage of Evolution"]],
150:[["Mewtwo"],["Psy"],["It was created by a scientist after years of horrific gene splicing and DNA engineering experiments."],[110,90],["Psychic","Swift","Psycho Cut","Aura Sphere"],["Final Stage of Evolution"]],
151:[["Mew"],["Psy"],["So rare that it is still said to be a mirage by many experts. Only a few people have seen it worldwide."],[100,100],["Psychic","Flamethrower","Thunder","Ice Beam"],["Final Stage of Evolution"]]}
def getPokeLore(pokeID):
return PokeStat[pokeID][2][0]
#Dictionary of Attacks, Attack Type, power, pp, and bonus skills
#pp is times the said attack can be reused.
attack={
"Bug Bite":[["Bug"],[[60],[100],[20]],[None]],
"Hard Roller":[["Bug"],[[55],[95],[25]],[None]],
"X-Scissor":[["Bug"],[[80],[100],[15]],[None]],
"Leech Life ":[["Bug"],[[20],[100],[15]],[None]],
"Pin Missile":[["Bug"],[[[25],[85],[20]]],[None]],
"Fury Cutter":[["Bug"],[[40],[95],[20]],[None]],
"String Shot":[["Bug"],[[25],[95],[40]],[None]],
"Megahorn":[["Bug"],[[120],[85],[10]],[None]],
"Silver Wind":[["Bug"],[[60],[100],[5]],[None]],
"Signal Beam":[["Bug"],[[75],[100],[15]],["Confuse",1/10]],
"Bug Buzz":[["Bug"],[[90],[100],[10]],[None]],
"Crunch":[["Dar"],[[80],[100],[15]],[None]],
"Dark Pulse":[["Dar"],[[60],[100],[20]],[None]],
"Beat Up":[["Dar"],[[50],[90],[20]],[None]],
"Fake Out":[["Dar"],[[40],[100],[10]],[None]],
"Payback":[["Dar"],[[50],[100],[10]],[None]],
"Persuit":[["Dar"],[[60],[100],[20]],[None]],
"Sucker Punch":[["Dar"],[[80],[100],[5]],[None]],
"Assurance":[["Dar"],[[60],[100],[10]],[None]],
"Bite":[["Dar"],[[60],[100],[25]],[None]],
"Draco Meteor":[["Dra"],[[130],[90],[5]],[None]],
"Dragon Rage":[["Dra"],[[50],[100],[10]],[None]],
"Dragon Tail":[["Dra"],[[60],[90],[10]],[None]],
"Dragon Claw":[["Dra"],[[80],[100],[15]],[None]],
"Outrage":[["Dra"],[[120],[100],[10]],[None]],
"Dragon Breath":[["Dra"],[[60],[100],[20]],["Para",3/10]],
"Dragon Pulse":[["Dra"],[[85],[100],[10]],[None]],
"Dragon Rush ":[["Dra"],[[100],[75],[10]],[None]],
"Twister":[["Dra"],[[50],[90],[20]],[None]],
"Discharge":[["Ele"],[[80],[100],[15]],["Para",3/10]],
"Electro Ball":[["Ele"],[[60],[100],[20]],[None]],
"Zap Cannon":[["Ele"],[[120],[50],[5]],["Para",1]],
"Thunder":[["Ele"],[[110],[70],[10]],["Para",3/10]],
"Electro Web":[["Ele"],[[55],[95],[15]],[None]],
"Thunder Wave":[["Ele"],[[0],[100],[20]],["Para",1]],
"Bolt Strike":[["Ele"],[[130],[85],[5]],["Para",2/5]],
"Thunder Shock":[["Ele"],[[40],[100],[30]],["Para",1/10]],
"Shock Wave":[["Ele"],[[65],[95],[15]],[["Para",1/10]]],
"Thunder Fang":[["Ele"],[[90],[100],[15]],[None]],
"Spark":[["Ele"],[[65],[100],[20]],["Para",3/10]],
"Charge Beam":[["Ele"],[[50],[90],[10]],[None]],
"Volt Tackle":[["Ele"],[[120],[100],[15]],["Para",1/10]],
"Thunder Punch":[["Ele"],[[75],[100],[15]],["Para",1/10]],
"Thunderbolt":[["Ele"],[[90],[100],[15]],["Para",1/10]],
"Triple Kick":[["Fig"],[[40],[90],[10]],[None]],
"Seismic Toss":[["Fig"],[[0],[100],[20]],[None]],
"Cross Chop":[["Fig"],[[100],[80],[5]],["HighCrit",1]],
"Rolling Kick":[["Fig"],[[60],[85],[15]],["Para",3/10]],
"Jump Kick":[["Fig"],[[40],[90],[10]],[None]],
"Aura Sphere":[["Fig"],[[80],[100],[20]],[None]],
"Mach Punch":[["Fig"],[[40],[100],[30]],[None]],
"Dynamic Punch":[["Fig"],[[100],[50],[5]],["Confuse",1]],
"Hammer Arm":[["Fig"],[[100],[90],[10]],[None]],
"Focus Punch":[["Fig"],[[60],[100],[10]],[None]],
"Revenge":[["Fig"],[[40],[90],[10]],[None]],
"Drain Punch":[["Fig"],[[75],[100],[10]],[None]],
"Sky Uppercut":[["Fig"],[[85],[90],[15]],[None]],
"Superpower":[["Fig"],[[120],[100],[5]],[None]],
"Force Palm":[["Fig"],[[60],[100],[10]],["Para",3/10]],
"Brick Break":[["Fig"],[[75],[100],[15]],[None]],
"Sacred Sword":[["Fig"],[[90],[100],[20]],[None]],
"Karate Chop":[["Fig"],[[50],[100],[25]],["HighCrit",1]],
"Double Kick":[["Fig"],[[30],[100],[30]],[None]],
"Fire Fang":[["Fir"],[[65],[95],[15]],["Burn",1/10]],
"Flare Blitz":[["Fir"],[[120],[100],[15]],["Burn",1/10]],
"Incinerate":[["Fir"],[[60],[100],[15]],[None]],
"Flame Wheel":[["Fir"],[[60],[100],[25]],["Burn",1/10]],
"Flamethrower":[["Fir"],[[90],[100],[15]],["Burn",1/10]],
"Flame Burst":[["Fir"],[[70],[100],[15]],[None]],
"Fire Blast":[["Fir"],[[110],[85],[5]],["Burn",1/10]],
"Eruption":[["Fir"],[[150],[100],[5]],[None]],
"Lava Plume":[["Fir"],[[80],[100],[15]],["Burn",3/10]],
"Fire Punch":[["Fir"],[[75],[100],[15]],["Burn",1/10]],
"Sacred Fire":[["Fir"],[[100],[90],[10]],[None]],
"Overheat":[["Fir"],[[40],[95],[5]],["Burn",1/2]],
"Blue Flare":[["Fir"],[[130],[85],[5]],["Burn",1/5]],
"Heat Wave":[["Fir"],[[95],[90],[10]],["Burn",1/10]],
"Fire Spin":[["Fir"],[[35],[85],[15]],[None]],
"Ember":[["Fir"],[[40],[100],[25]],["Burn",1/10]],
"Air Cutter":[["Fly"],[[60],[95],[25]],[None]],
"Acrobatics":[["Fly"],[[55],[100],[15]],[None]],
"Gust":[["Fly"],[[40],[100],[35]],[None]],
"Bounce":[["Fly"],[[85],[85],[5]],["Para",3/10],[None]],
"Pluck":[["Fly"],[[60],[100],[20]],[None]],
"Brave Bird":[["Fly"],[[120],[100],[15]],[None]],
"Peck":[["Fly"],[[35],[100],[35]],[None]],
"Wing Attack":[["Fly"],[[60],[100],[35]],[None]],
"Sky Attack":[["Fly"],[[140],[90],[5]],[None]],
"Drill Peck":[["Fly"],[[80],[100],[20]],[None]],
"Fly":[["Fly"],[[90],[95],[15]],[None]],
"Air Slash":[["Fly"],[[75],[95],[20]],[None]],
"Aeroblast":[["Fly"],[[100],[95],[5]],[None]],
"Aerial Ace":[["Fly"],[[60],[100],[20]],["HighCrit",1]],
"Hypnosis":[["Gho"],[[0],[60],[20]],["Sleep",1]],
"Confuse Ray":[["Gho"],[[0],[100],[10]],["Confuse",1]],
"Shadow Punch":[["Gho"],[[60],[100],[20]],[None]],
"Shadow Ball":[["Gho"],[[80],[100],[15]],[None]],
"Night Shade":[["Gho"],[[0],[100],[15]],[None]],
"Ominous Wind":[["Gho"],[[60],[100],[5]],["AllRaise",1/10]],
"Lick":[["Gho"],[[30],[100],[30]],["Para",3/10]],
"Astonish":[["Gho"],[[30],[100],[15]],[None]],
"Shadow Sneak":[["Gho"],[[40],[100],[30]],[None]],
"Evil Eye":[["Gho"],[[80],[100],[20]],[None]],
"Bullet Seed":[["Gra"],[[20],[100],[30]],[None]],
"Solar Beam":[["Gra"],[[120],[100],[10]],[None]],
"Petal Dance":[["Gra"],[[120],[100],[10]],[None]],
"Stun Spore":[["Gra"],[[0],[75],[30]],["Para",1]],
"Poisonpowder":[["Gra"],[[0],[75],[35]],["Poison",1]],
"Mega Drain":[["Gra"],[[40],[100],[15]],[None]],
"Grass Knot":[["Gra"],[[60],[100],[20]],[None]],
"Leech Seed":[["Gra"],[[0],[90],[10]],[None]],
"Frenzy Plant":[["Gra"],[[150],[90],[6]],[None]],
"Absorb":[["Gra"],[[20],[100],[25]],[None]],
"Leaf Storm":[["Gra"],[[130],[90],[5]],[None]],
"Power Whip":[["Gra"],[[120],[85],[5]],[None]],
"Giga Drain":[["Gra"],[[75],[100],[10]],[None]],
"Needle Arm":[["Gra"],[[60],[100],[15]],["Flinch",3/10]],
"Seed Bomb":[["Gra"],[[80],[100],[15]],[None]],
"Leaf Blade":[["Gra"],[[90],[100],[15]],["HighCrit",1]],
"Vine Whip":[["Gra"],[[45],[100],[25]],[None]],
"Razor Leaf":[["Gra"],[[55],[95],[25]],["HighCrit",1]],
"Magical Leaf":[["Gra"],[[60],[100],[20]],[None]],
"Sleep Powder":[["Gra"],[[0],[75],[15]],["Sleep",1]],
"Leech Life":[["Gra"],[[20],[100],[15]],[None]],
"Magnitude":[["Gro"],[[60],[100],[30]],[None]],
"Mud Shot":[["Gro"],[[55],[95],[15]],[None]],
"Sand Attack":[["Gro"],[[0],[100],[15]],[None]],
"Sand Tomb":[["Gro"],[[60],[100],[20]],[None]],
"Bone Rush":[["Gro"],[[25],[90],[10]],[None]],
"Bone Club":[["Gro"],[[65],[85],[20]],["Flinch",1/10]],
"Bonemerang":[["Gro"],[[50],[90],[10]],[None]],
"Earthquake":[["Gro"],[[100],[100],[10]],[None]],
"Earth Power":[["Gro"],[[90],[100],[10]],[None]],
"Dig":[["Gro"],[[80],[100],[10]],[None]],
"Mud Bomb":[["Gro"],[[65],[85],[10]],[None]],
"Mud Slap":[["Gro"],[[20],[100],[10]],[None]],
"Ice Punch":[["Ice"],[[75],[100],[15]],["Freeze",1/10]],
"Avalanche":[["Ice"],[[60],[100],[10]],[None]],
"Ice Shard":[["Ice"],[[40],[100],[30]],[None]],
"Icicle Spear":[["Ice"],[[25],[100],[30]],[None]],
"Ice Fang":[["Ice"],[[65],[95],[15]],["Freeze",1/10]],
"Icy Wind":[["Ice"],[[55],[95],[15]],[None]],
"Powder Snow":[["Ice"],[[40],[100],[25]],["Freeze",1/10]],
"Blizzard":[["Ice"],[[110],[70],[5]],["Freeze",1/10]],
"Aurora Beam":[["Ice"],[[65],[100],[20]],[None]],
"Ice Beam":[["Ice"],[[90],[100],[10]],["Freeze",1/10]],
"Constrict":[["Nor"],[[10],[100],[35]],[None]],
"Slam":[["Nor"],[[80],[75],[20]],[None]],
"Explosion":[["Nor"],[[250],[100],[1]],[None]],
"Wrap":[["Nor"],[[15],[90],[20]],[None]],
"Razor Wind":[["Nor"],[[80],[100],[10]],["HighCrit",1]],
"Headbutt":[["Nor"],[[70],[100],[15]],[None]],
"Horn Drill":[["Nor"],[[0],[0],[5]],[None]],
"Bind":[["Nor"],[[15],[85],[20]],[None]],
"Growl":[["Nor"],[[0],[100],[40]],[None]],
"Facade":[["Nor"],[[70],[100],[20]],[None]],
"Sing":[["Nor"],[[0],[55],[15]],["Sleep",1]],
"Softboiled":[["Nor"],[[0],[0],[10]],[None]],
"Rage":[["Nor"],[[20],[100],[20]],[None]],
"Retaliation":[["Nor"],[[70],[100],[5]],[None]],
"Skull Bash":[["Nor"],[[130],[100],[10]],[None]],
"Last Resort":[["Nor"],[[140],[100],[5]],[None]],
"Egg Bomb":[["Nor"],[[100],[75],[10]],[None]],
"Leer":[["Nor"],[[0],[100],[30]],[None]],
"Fury Attack":[["Nor"],[[15],[85],[20]],[None]],
"Body Slam":[["Nor"],[[85],[100],[15]],["Para",3/10]],
"Crush Claw":[["Nor"],[[75],[95],[10]],[None]],
"Take Down":[["Nor"],[[90],[85],[20]],[None]],
"Double Slap":[["Nor"],[[15],[85],[10]],["Multi",1]],
"Double Hit":[["Nor"],[[35],[90],[10]],[None]],
"Secret Power":[["Nor"],[[70],[100],[20]],[None]],
"Fury Swipes":[["Nor"],[[18],[80],[15]],[None]],
"Slash":[["Nor"],[[70],[100],[20]],["HighCrit",1]],
"Swords Dance":[["Nor"],[[40],[10],[30]],[None]],
"Tackle":[["Nor"],[[50],[100],[35]],[None]],
"Extremespeed":[["Nor"],[[80],[100],[5]],[None]],
"Tail Whip":[["Nor"],[[0],[100],[30]],[None]],
"Pound":[["Nor"],[[120],[100],[15]],[None]],
"Double Edge":[["Nor"],[[40],[100],[30]],[None]],
"Rock Climb":[["Nor"],[[90],[85],[20]],["Confuse",1/5]],
"Dizzy Punch":[["Nor"],[[70],[100],[10]],["Confuse",1/5]],
"Hyper Beam":[["Nor"],[[150],[90],[5]],[None]],
"Screech":[["Nor"],[[0],[85],[40]],[None]],
"Scratch":[["Nor"],[[40],[100],[35]],[None]],
"Smokescreen":[["Nor"],[[0],[100],[20]],[None]],
"Giga Impact":[["Nor"],[[150],[90],[5]],[None]],
"False Swipe":[["Nor"],[[40],[100],[40]],[None]],
"Pay Day":[["Nor"],[[40],[100],[20]],[None]],
"Hyper Voice":[["Nor"],[[90],[100],[10]],[None]],
"Supersonic":[["Nor"],[[0],[55],[20]],["Confuse",1]],
"Guillotine":[["Nor"],[[0],[0],[5]],[None]],
"Mega Kick":[["Nor"],[[120],[75],[5]],[None]],
"Comet Punch":[["Nor"],[[90],[100],[10]],[None]],
"Swift":[["Nor"],[[60],[100],[20]],[None]],
"Lovely Kiss":[["Nor"],[[0],[75],[10]],["Sleep",1]],
"Mega Punch":[["Nor"],[[18],[85],[15]],[None]],
"Sonicboom":[["Nor"],[[0],[90],[20]],[None]],
"Rapid Spin":[["Nor"],[[20],[100],[40]],[None]],
"Rest":[["Nor"],[[0],[0],[10]],[None]],
"Yawn":[["Nor"],[[0],[0],[10]],[None]],
"Hyper Fang":[["Nor"],[[80],[90],[15]],[None]],
"Vicegrip":[["Nor"],[[55],[100],[30]],[None]],
"Sweet Kiss":[["Nor"],[[0],[75],[10]],["Confuse",1]],
"Spike Cannon":[["Nor"],[[20],[100],[15]],[None]],
"Horn Attack":[["Nor"],[[65],[100],[25]],[None]],
"Thrash":[["Nor"],[[120],[100],[10]],[None]],
"Cut":[["Nor"],[[50],[95],[30]],[None]],
"Harden":[["Nor"],[[0],[0],[30]],[None]],
"Super Fang":[["Nor"],[[0],[90],[10]],[None]],
"Quick Attack":[["Nor"],[[40],[100],[30]],[None]],
"Clear Smog":[["Poi"],[[50],[100],[15]],[None]],
"Poison Jab":[["Poi"],[[80],[100],[20]],["Poison",3/10]],
"Poison Tail":[["Poi"],[[50],[100],[25]],["Poison",1/10]],
"Sludge Bomb":[["Poi"],[[90],[100],[10]],["Poison",3/10]],
"Toxic":[["Poi"],[[0],[90],[10]],["Poison",1]],
"Acid":[["Poi"],[[40],[100],[30]],[None]],
"Poison Gas":[["Poi"],[[0],[90],[40]],["Poison",1]],
"Poison Sting":[["Poi"],[[15],[100],[35]],["Poison",3/10]],
"Smog":[["Poi"],[[30],[70],[20]],["Poison",2/5]],
"Sludge":[["Poi"],[[65],[100],[20]],["Poison",3/10]],
"Poison Fang":[["Poi"],[[50],[100],[15]],["Poison",3/10]],
"Gunk Shot":[["Poi"],[[120],[80],[5]],["Poison",3/10]],
"Calm Mind":[["Psy"],[[0],[0],[20]],[None]],
"Luster Purge":[["Psy"],[[70],[100],[5]],[None]],
"Teleport":[["Psy"],[[0],[0],[20]],[None]],
"Psywave":[["Psy"],[[0],[80],[15]],[None]],
"Psybeam":[["Psy"],[[65],[100],[20]],["Confuse",1/10]],
"Psychic":[["Psy"],[[90],[100],[10]],[None]],
"Zen Headbutt":[["Psy"],[[80],[90],[15]],[None]],
"Psycho Cut":[["Psy"],[[70],[100],[20]],["HighCrit",1]],
"Extrasensory":[["Psy"],[[80],[100],[20]],[None]],
"Confusion":[["Psy"],[[50],[100],[25]],["Confuse",1/10]],
"Hidden Power":[["Psy"],[[60],[100],[15]],[None]],
"Synchro Noise":[["Psy"],[[50],[100],[15]],[None]],
"Stomp":[["Psy"], [[65], [100], [20]], [None]],
"Low Kick":[["Psy"], [[60], [100], [20]], [None]],
"Rock Slide":[["Roc"],[[75],[90],[10]],[None]],
"Stone Edge":[["Roc"],[[100],[80],[5]],["HighCrit",1]],
"Rollout":[["Roc"],[[30],[90],[20]],[None]],
"Rock Blast":[["Roc"],[[25],[90],[10]],[None]],
"Ancient Power":[["Roc"],[[60],[100],[5]],[None]],
"Rock Wrecker":[["Roc"],[[150],[90],[5]],[None]],
"Head Smash":[["Roc"],[[150],[80],[5]],[None]],
"Rock Throw":[["Roc"],[[50],[90],[15]],[None]],
"Rock Tomb":[["Roc"],[[60],[95],[15]],[None]],
"Steel Wing":[["Roc"],[[70],[90],[25]],[None]],
"Meteor Mash":[["Roc"],[[90],[90],[10]],[None]],
"Metal Burst":[["Roc"],[[0],[100],[10]],[None]],
"Magnet Bomb":[["Roc"],[[60],[100],[20]],[None]],
"Iron Tail":[["Roc"],[[100],[75],[15]],[None]],
"Metal Claw":[["Roc"],[[50],[95],[35]],[None]],
"Mirror Shot":[["Roc"],[[65],[85],[10]],[None]],
"Iron Head":[["Roc"],[[80],[100],[15]],[None]],
"Flash Cannon":[["Roc"],[[50],[100],[10]],[None]],
"Surf":[["Wat"],[[90],[100],[15]],[None]],
"Splash":[["Wat"],[[0],[0],[40]],[None]],
"Aqua Jet":[["Wat"],[[40],[100],[20]],[None]],
"Crabhammer":[["Wat"],[[100],[90],[10]],["HighCrit",1]],
"Aqua Tail":[["Wat"],[[90],[90],[10]],[None]],
"Bubblebeam":[["Wat"],[[65],[100],[20]],[None]],
"Muddy Water":[["Wat"],[[90],[85],[10]],[None]],
"Brine":[["Wat"],[[65],[100],[10]],[None]],
"Water Pulse":[["Wat"],[[60],[100],[20]],["Confuse",1/5]],
"Waterfall":[["Wat"],[[80],[100],[15]],[None]],
"Water Gun":[["Wat"],[[40],[100],[25]],[None]],
"Bubble":[["Wat"],[[40],[100],[30]],[None]],
"Hydro Pump":[["Wat"],[[110],[80],[10]],[None]],
"Uproar":[["Nor"],[[90],[100],[10]],[None]],
}
def Burn(self, if_run, mod):
if if_run:
self.hp-=self.maxhp//16
self.condition[inspect.stack()[0][3]][2]=1/2
def Freeze(self, if_run, mod):
freezet=1
if if_run:
self.condition[inspect.stack()[0][3]][2]=0
x=randint(1,5)
if freezec==freezet:
if_run=false
def Para(self, if_run, mod):
if if_run:
parac=randint(1,4)
if parac==1:
self.condition[inspect.stack()[0][3]][2]=0
else:
self.condition[inspect.stack()[0][3]][2]=1
def Sleep(self,if_run,mod):
if if_run:
sleepc=randint(1,3)
self.condition[inspect.stack()[0][3]][2]=0
sleepc-=1
if sleepc==0:
if_run=False
def Poison(self,if_run,mod):
if if_run:
self.hp-=self.maxhp//8
def BadPoison(self,if_run,mod):
if if_run:
self.hp-=self.maxhp//self.badpoisonc
self.badpoisonc+=1/16
def Flinch(self,if_run,mod):
if if_run:
self.condition[inspect.stack()[0][3]][2]=0
def Confuse(self,if_run,mod):
if if_run:
self.cFlag=True
confusec=randint(1,4)
confusec-=1
if confusec==0:
self.cFlag=False
if_run=False
def HighCrit(self,if_run,mod):
if if_run:
self.CHlist=[1,1,1,2,2]
if not if_run:
self.CHlist=[1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2]
def Damage(Level,BaseP,SpAtk,SpDef,r,STAB,Weakness,CH,mod1):
DamageDealt=trunc(trunc(trunc(trunc(trunc(trunc((trunc(trunc(trunc(trunc((trunc(Level*2/5)+2)*BaseP)*SpAtk)/50)/SpDef)+2)*CH)*r)/100)*STAB)*Weakness)*mod1)
return DamageDealt
def Catchchance(hpMax,hpCurrent,rate,ball,status):
a =round(((3*hpMax-2*hpCurrent)*(rate*ball))/((3*hpMax)*status))
return a
def ShakeComparison(CatchChance,chart):
#Chart is the 2D list of Pokemon Catchrates
comparison=0
BComp = open(chart, "rb")
file=(pickle.loads(BComp.read()))
for i in range(len(file)):
if file[i][0]==CatchChance:
comparison=file[i][1]
instant=randint(0,60)
counter=0
shakeList=[]
if comparison<=instant: #Instantally catch pokemon if chances are right.
return "Caught"
else:
for i in range(3): #Performs 3 chances for pokemon to excape.
shakeList.append(randint(0,255))
for i in range(len(shakeList)):
if shakeList[i]<comparison:
counter+=1
if counter==3:
return ["Caught",3]
else:
return ["Escaped",counter]
def CatchFormula(hpMax,hpCurrent,rate,ball,status,chart):
a=Catchchance(hpMax,hpCurrent,rate,ball,status)
return ShakeComparison(a,chart)
##StartUserPokeHP=UserPoke[0][-1][0]
##CurrUserPokeHP=StartUserPokeHP
##
##StartOppPokeHP=opp[0][-1][0]
##CurrOppPokeHP=StartOppPokeHP
##
##for i in range(0,len(UserPoke)):
## print("Your Pokemon: \n",UserPoke[i][0][0]+",",CurrUserPokeHP,"/",StartUserPokeHP)
##
##for i in range(0,len(opp)):
## print(opp[0][0][0],":",CurrOppPokeHP,"/",StartOppPokeHP)
##
##print("Your Attacks:\n")
##for i in range(4):
## print(i+1,UserPoke[0][4][i])
##
##userattackchoice=input("Enter the attack# you want")
##userattack=(UserPoke[0][4][int(userattackchoice)-1])
##
_POKEMON_ID = 0
class Pokemon(object):
def __init__(self, *args):
self.exp = -1
self.level = -1
self.tID = -1
self.SpAtk = -1
self.SpDef = -1
self.type1 = -1
self.type2 = -1
self.hp = -1
self.maxhp = -1
self.skill = []
self.image_front = None
self.image_back = None
self.name = ""
self.id = 0
self.badpoisonc=1/16
self.skillpp = [0, 0, 0, 0]
self._last_effect = "Missed"
if args: self.load(*args)
def load(self, identifer, xp, name=None, changeid=None):
global _POKEMON_ID
if changeid is None:
_POKEMON_ID += 1
self.id = _POKEMON_ID
else:
self.id = changeid
if xp//500 <= 0: xp = 500
self.exp = xp
self.level = self.exp//500
if self.level > 100: self.level = 100
self.tID = identifer
self.SpAtk = PokeStat[identifer][3][0]+(self.level/2)
self.SpDef = PokeStat[identifer][3][1]+(self.level/2)
self.type1 = PokeStat[identifer][1][0]
self.type2 = PokeStat[identifer][1][-1]
self.hp = self.level*4
self.maxhp = self.level*4
self.skill = PokeStat[identifer][4][:]
self.image_front = None
self.image_back = None
if name is None: self.name = self.getName()
self.condition={"Burn":[self, False, 1], "Freeze":[self,False,1], "Para":[self,False,1],
"Sleep":[self,False,1], "Poison":[self,False,1], "BadPoison":[self,False,1],
"Flinch":[self,False,1], "HighCrit":[self,False,1]}
self.mod1=1
self.badpoisonc=1/16
self.cFlag=False
self.CHlist=[1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2]
self.CH=choice(self.CHlist)
self.cChance=PokeStat[identifer][5][0]
self.skillpp = []
for i in range(4): self.skillpp.append(self.getSkillPP_ori(i))
if isinstance(self.getEvolve()[0], int) and self.getEvolve()[0] <= self.level:
self.evolve()
def getID(self):
return self.tID
def getName(self):
return PokeStat[self.tID][0][0]
def getHP(self):
return self.hp
def getSkill(self, skillno):
return self.skill[skillno]
def getSkills(self):
return self.skill
def getSkillP(self,skillno):
return attack[self.getSkill(skillno)][1][0][0]
def getSkillT(self,skillno):
return attack[self.getSkill(skillno)][0][0]
def getSkillA(self,skillno):
return attack[self.getSkill(skillno)][1][1][0]
def getSkillBonus(self,skillno):
if attack[self.getSkill(skillno)][-1][0] is None:
return None
else:
return attack[self.getSkill(skillno)][-1][0]
def getSkillBonusChance(self, skillno):
try: return attack[self.getSkill(skillno)][-1][1]
except: return 0
def getSkillPP_ori(self, skillno):
return attack[self.getSkill(skillno)][1][2][0]
def getSkillPP(self, skillno):
return self.skillpp[skillno]
def getEvolve(self):
if self.getID!=133:
if PokeStat[self.getID()][-1][0]!="Final Stage of Evolution":
#Checks if Pokemon can Evolve
if PokeStat[self.getID()][-1][1]=="Water Stone":
return ["Water Stone","Stone"]
if PokeStat[self.getID()][-1][1]=="Leaf Stone":
return ["Leaf Stone","Stone"]
if PokeStat[self.getID()][-1][1]=="Fire Stone":
return ["Fire Stone","Stone"]
if PokeStat[self.getID()][-1][1]=="Moon Stone":
return ["Moon Stone","Stone"]
if PokeStat[self.getID()][-1][1]=="Thunder Stone":
return ["Thunder Stone","Stone"]
#If pokemon needs stones to evolve, use as stone
if isinstance(PokeStat[self.getID()][-1][1], int):
return [PokeStat[self.getID()][-1][1]]
#If pokemon only needs to be a level, then it can evolve right away
return ["Final Stage of Evolution"]
elif self.getID==133:
return ["Final Stage of Evolution"]
def evolve(self, using=None):
# Special Case Eevee with multi evolutions
if self.getName()=="Eevee":
if using=="Water Stone":
self.tID=134
elif using=="Thunder Stone":
self.tID=135
elif using=="Fire Stone":
self.tID=136
else:
return self.tID
self.load(self.tID, self.exp, changeid=self.id)
return self.tID
if self.getEvolve()[-1]=="Final Stage of Evolution":
return self.getEvolve()
if self.getEvolve()[-1]=="Stone":
#Normal Pokemon
if using == self.getEvolve()[0]:
self.tID+=1
self.load(self.tID, self.exp, changeid=self.id)
if isinstance(self.getEvolve()[-1],int):
if self.level>=self.getEvolve()[-1]:
self.tID+=1
self.load(self.tID, self.exp, changeid=self.id)
return self.tID
# if isinstance(self.getEvolve()[-1],int):
# if self.getEvolve()[-1]<self.level:
# self.tID+=1
# return self.tID
def get_evolve_item(self):
return pokestat[getID()][-1][1]
def attack(self, other, skillno):
if not self.getSkillPP(skillno):
self._last_effect="No PP Left"
return 0
else: self.skillpp[skillno] -= 1
self.condition["Flinch"][0] = other
if self.getSkillBonus(skillno) is not None:
conditionList=[Burn,Freeze,Para,Sleep,Poison,BadPoison,Flinch,Confuse]
condition=[]
for i in range (len(conditionList)):
## print(self.getSkillBonus(skillno))
if conditionList[i]==eval(self.getSkillBonus(skillno)):
condition.append (conditionList[i].__name__)
chance=self.getSkillBonusChance(skillno)
chancecheck=randint(0,100)
if 100*chance>chancecheck and condition:
if condition[0] == "Confuse":
other.cFlag = True
else:
other.condition[condition[0]][1] = True
_no_action = False
for i in self.condition:
val = eval(i)(*self.condition[i])
if i == "Para" and val == 0:
_no_action = True
elif i == "Flinch" and val == 0:
_no_action = True
if _no_action:
self._last_effect="No Action"
return 0
self._last_effect="Missed"
accCheckv=self.getSkillA(skillno)
accCheck=randint(0,100)
if accCheck<=accCheckv:
if self.cFlag and randint(0, 1):
damage = Damage(self.level,
self.getSkillP(skillno),
self.SpAtk,
self.SpDef,
randint(85,100),
STAB(self.type1,self.type2,self.getSkillT(skillno)),
BonusCalc(self.getSkillT(skillno),self.type1,self.type2)[0],
self.CH,self.mod1)
if not self.getSkillP(skillno): damage = 0
self._last_effect = BonusCalc(self.getSkillT(skillno),self.type1,self.type2)[1]
self._last_effect+="\nAttacked your pokemon itself. Damage: %d"%damage
self.hp-=damage
damage = 0
else:
damage = Damage(self.level,
self.getSkillP(skillno),
self.SpAtk,
other.SpDef,
randint(85,100),
STAB(self.type1,self.type2,self.getSkillT(skillno)),
BonusCalc(self.getSkillT(skillno),other.type1,other.type2)[0],
self.CH,self.mod1)
if not self.getSkillP(skillno): damage = 0
self._last_effect = BonusCalc(self.getSkillT(skillno),other.type1,other.type2)[1]
self._last_effect+="\nDamage: %d"%damage
other.hp -= damage
if other.hp < 0: other.hp = 0
return damage
elif accCheck>=accCheckv:
return 0
def get_str(self):
return self._last_effect
def getProperties(self):
return {} # all properties
def render(self, front=True): # basic image -> no anim.
if self.image_front is None:
self.image_front = pygame.image.load(path+"/pokedex-media/pokemon/"
"main-sprites/platinum/%d.png"%self.tID)\
.convert_alpha()
self.image_front = pygame.transform.scale(self.image_front, (200, 200))
if self.image_back is None:
self.image_back = pygame.image.load(path+"/pokedex-media/pokemon/"
"main-sprites/platinum/back/%d.png"%
self.tID)\
.convert_alpha()
self.image_back = pygame.transform.scale(self.image_back, (200, 200))
if front: return self.image_front
else: return self.image_back
def get_size(self, front=True):
if front: return self.image_front.get_size()
else: return self.image_back.get_size()
def get_hp_percentage(self):
return (self.hp/self.maxhp)*100
def get_exp_percentage(self):
if self.exp >= 50000: return (self.exp-50000)/5
else: return (self.exp%500)/5
def get_level(self):
return self.level
def add_exp(self, exp):
self.exp += exp
level = self.exp//500
if level > 100: return
if level > self.level: self.load(self.tID, self.exp, self.name, changeid=self.id)
def new(self):
p = self.__class__()
p.load(self.tID, self.exp)
return p
##print(userattack)s
##print((Damage((trunc(UserPoke[0][2][0]/500)),\
## (attack[userattack][1][0][0]),\
## (trunc(PokeStat[1][3][0]+(UserPoke[0][2][0]/500)*1/2)),\
## (trunc(PokeStat[1][3][1]+(opp[0][3][0]/500)*1/2)),\
## 100,\
## (STAB(PokeStat[1][3][0],PokeStat[1][3][1],attack[UserPoke[0][4][0]][0])),\
## BonusCalc((attack[userattack][0][0]),opp[0][1][0],opp[0][1][-1]),\
## 1)))
# a = Pokemon()
# a.load(10, 2500)
# b = Pokemon()
# b.load(15, 4000)
# print(a.hp, b.hp)
# print(a.attack(b, 2))
# print(a.hp, b.hp)
# print(b.attack(a, 0))
# print(a.hp, b.hp)
class Character(g_object):
def __init__(self,name,typ,level):
self._level=level
super(Character, self).__init__(typ, name)
class Player(Character):
def __init__(self, pokemons, items, name, level, money, typ=TYPE_PLAYER):
# def __init__(self, pokemons, name, level, money):
for i in items:
if not issubclass(i.__class__, item): raise ValueError(type(items))
# global _CURRENT_ID
super(Player, self).__init__(name, typ, level)
# self.id = _CURRENT_ID
# _CURRENT_ID += 1
self.pokemon = []
self.backpack = {}
self.money = money
self.pokemon_save = [] # computer npc
# if not pokemons: raise ValueError(len(pokemons)) # at least one
for i in pokemons:
self.pokemon.append(i)
for i in items:
i.setOwner(self)
self.backpack[i.id] = i
self.tasks = {}
self.info = {"c_tme": int(time.time()),
"catch_record": {}, # tID: count, (last)time.time())
"accomplishment": [], # (name, time.time())
}
self.g_info = [-1, -1]
def savePokemon(self, p_uniq_id):
if len(self.pokemon) == 1: return False
for i in self.pokemon:
if i.id == p_uniq_id:
self.pokemon_save.append(i)
self.pokemon.remove(i)
break
return True
def loadPokemon(self, p_uniq_id):
if len(self.pokemon)+1 > 6: return False
for i in self.pokemon_save:
if i.id == p_uniq_id:
self.pokemon.append(i)
self.pokemon_save.remove(i)
break
return True
def getCurrentPokemon(self):
return self.pokemon[0]
def addPokemon(self, Pokemon_inst):
self.pokemon.append(Pokemon_inst)
def delPokemon(self, p_uniq_id):
if self.getCurrentPokemon().id == p_uniq_id: raise ValueError("COULD NOT DEL CURRENT POKEMON")
for i in self.pokemon:
if i.id == p_uniq_id:
self.pokemon.remove(i)
def getNextAlivePokemon(self):
for i in self.pokemon:
if i.getHP() > 0:
return i
return None
def setCurrentPokemon(self, p_uniq_id):
if self.getCurrentPokemon().id != p_uniq_id:
for i in range(len(self.pokemon)):
if self.pokemon[i].id == p_uniq_id:
self.pokemon.insert(0, self.pokemon.pop(i))
def evolve(self):
ids=[]
for i in self.backpack:
if self.backpack[i].name==self.getCurrentPokemon().getEvolve()[0]:
ids.append(i)
for i in ids:
self.useItem(i)
self.getCurrentPokemon().evolve()
def addItem(self, item_inst):
if issubclass(item_inst.__class__, item):
item_inst.setOwner(self)
if item_inst.id in self.backpack:
self.backpack[item_inst.id].count += item_inst.count
return False, self.backpack[item_inst.id]
for i in self.backpack:
if self.backpack[i].name == item_inst.name:
self.backpack[i].count += item_inst.count
return False, self.backpack[i]
self.backpack[item_inst.id] = item_inst
return True, self.backpack[item_inst.id]
else: raise ValueError(type(item_inst))
def delItem(self, item_id):
if item_id in self.backpack:
del self.backpack[item_id]
def useItem(self, item_id, other=None):
if item_id in self.backpack:
r = self.backpack[item_id].use(other)
if self.backpack[item_id].count <= 0:
del self.backpack[item_id]
return r
raise ValueError(item_id)
def getItem(self, item_id):
if item_id in self.backpack:
return self.backpack[item_id]
return None
def getBackpackInfo(self):
li = list(map((lambda x: x.getInfo()), self.backpack.values()))
li.sort()
return li
def check_backpack(self):
lidel = []
for i in self.backpack:
if self.backpack[i].count <= 0:
lidel.append(i)
for i in lidel:
del self.backpack[i]
def open_task(self, identifier, *init_args):
if identifier in self.tasks:
return False, self.tasks[identifier]
else:
self.tasks[identifier] = Task(*init_args)
return True, self.open_task(identifier)[1]
def del_task(self, identifier):
if identifier in self.tasks: del self.tasks[identifier]
def check_prev_task(self, prev):
for i in prev:
if i not in self.tasks: return False
if not self.tasks[i].done: return False
return True
def save(self):
for i in self.pokemon:
i.image_front = None
i.image_back = None
self._res = None
self.g_info = [_POKEMON_ID, _ITEM_ID]
def load(self):
global _POKEMON_ID, _ITEM_ID
if self.g_info != [-1, -1]:
_POKEMON_ID = self.g_info[0]
_ITEM_ID = self.g_info[1]
import ChiangObjectives as co
self._res = co._res[self._name]
class Task(object):
def __init__(self, prev=[], name="Task"):
self.stage = 0
self.prev = prev
self.done = False
self.name = name
_ITEM_ID = 0
class item(object):
def __init__(self, name, count=1, buy_price=0, sell_price=0, owner=None):
global _ITEM_ID
_ITEM_ID += 1
self.id = _ITEM_ID
self.name = name
self.owner = owner
self.count = count
self.buy_price = buy_price
self.sell_price = sell_price
def setOwner(self, owner):
self.owner = owner
def getInfo(self):
return self.name, self.id
def getType(self):
return items[self.name][-1][0]
def use(self, other):
if self.count <= 0: raise ValueError(self.count)
result = self.apply(other)
if result is not None: self.count -= 1
return result
def apply(self, other):
pass
# self.owner apply item
# return if successfully applied
def describe(self):
return "description here"
def sell(self, count):
if count > self.count: raise ValueError(count)
self.count -= count
self.owner.money += count * self.sell_price
def buy(self, count):
if count < 0: raise ValueError(count)
if count*self.buy_price <= self.owner.money:
self.owner.money -= count*self.buy_price
self.count += count
else: raise ValueError(self.owner.money)
def getMaxBuy(self):
return self.owner.money//self.buy_price
def getMaxSell(self):
return self.count
items={"Pokeball":[[1],[200],["A device for catching wild Pokémon.It's thrown like a ball at a Pokémon, comfortably encapsulating its target."],["Pokeball"]],
"Great Ball":[[1.5],[600],["A good, high-performance Poké Ball that provides a higher Pokémon catch rate than a standard Poké Ball can."],["Pokeball"]],
"Ultra Ball":[[2],[1200],["An ultra-high performance Poké Ball that provides a higher success rate for catching Pokémon than a Great Ball."],["Pokeball"]],
"Master Ball":[[255],[20000],["The best Poké Ball with the ultimate level of performance. With it, you will catch any wild Pokémon without fail."],["Pokeball"]],
"Potion":[[20],[300],["A spray-type medicine for treating wounds. It can be used to restore 20 HP to an injured Pokémon."],["Potion"]],
"Super Potion":[[50],[700],["A spray-type medicine for treating wounds. It can be used to restore 50 HP to an injured Pokémon."],["Potion"]],
"Hyper Potion":[[200],[1200],["A spray-type medicine for treating wounds.It can be used to restore 200 HP to an injured Pokémon."],["Potion"]],
"Max Potion":[[9999],[2500],["A spray-type medicine for treating wounds. It will completely restore the max HP of a single Pokémon."],["Potion"]],
"Full Restore":[[9999],[2500],["A medicine that can be used to fully restore the HP of a single Pokémon and heal any status conditions it has."],["Potion"]],
"Antidote":[["Poison","BadPoison"],[100],["A spray-type medicine for poisoning. It can be used once to lift the effects of being poisoned from a Pokémon."],["Potion"]],
"Parlyz Heal":[["Para"],[200],["A spray-type medicine for paralysis. It can be used once to free a Pokémon that has been paralyzed."],["Potion"]],
"Awakening":[["Sleep"],[250],["A spray-type medicine used against sleep. It can be used once to rouse a Pokémon from the clutches of sleep."],["Potion"]],
"Burn Heal":[["Burn"],[250],["A spray-type medicine for treating burns. It can be used once to heal a Pokemon suffering from a burn."],["Potion"]],
"Ice Heal":[["Freeze"],[100],["A spray-type medicine for freezing. It can be used once to defrost a Pokémon that has been frozen solid."],["Potion"]],
"Fire Stone":[[1200],["A peculiar stone that can make certain species of Pokémon evolve. The stone has a fiery orange heart."], ["Stone"]],
"Thunder Stone":[[1200],["A peculiar stone that can make certain species of Pokémon evolve. It has a distinct thunderbolt pattern."],["Stone"]],
"Water Stone":[[1200],["A peculiar stone that can make certain species of Pokémon evolve. It is the blue of a pool of clear water."],["Stone"]],
"Leaf Stone":[[1200],["A peculiar stone that can make certain species of Pokémon evolve. It has an unmistakable leaf pattern."],["Stone"]],
"Moon Stone":[[1200],["A peculiar stone that can make certain species of Pokémon evolve. It is as black as the night sky."],["Stone"]],}
ITEM_NOTUSED = None
ITEM_USED = 0
ITEM_CAUGHT = 1
class Item(item):
def describe(self):
return items[self.name][-2][0]
def apply(self, other):
pkmon = self.owner.getCurrentPokemon()
if other is not None: otherpkmon=other.getCurrentPokemon()
else: otherpkmon = None
if self.getType()=="Stone":
if self.name==pkmon.getEvolve()[0]:
pkmon.evolve(self.name)
return ITEM_USED
elif self.getType()=="Potion":
if self.name=="Potion":
pkmon.hp+= item(items[self.name][0])
if pkmon.hp+item(items[self.name][0])>pkmon.maxhp:
pkmon.hp=pkmon.maxhp
elif self.name=="Super Potion":
pkmon.hp+= item(items[self.name][0])
if pkmon.hp+item(items[self.name][0])>pkmon.maxhp:
pkmon.hp=pkmon.maxhp
elif self.name=="Hyper Potion":
pkmon.hp+= item(items[self.name][0])
if pkmon.hp+item(items[self.name][0])>pkmon.maxhp:
pkmon.hp=pkmon.maxhp
elif self.name=="Max Potion":
pkmon.hp=pkmon.maxhp
elif self.name=="Antidote":
if pkmon.condition["Poison"][1]is True:
pkmon.condition["Poison"][1]= False
elif self.name=="Parlyz Heal":
if pkmon.condition["Para"][1]is True:
pkmon.condition["Para"][1]= False
elif self.name=="Parlyz Heal":
if pkmon.condition["Para"][1]is True:
pkmon.condition["Para"][1]= False
elif self.name=="Awakening":
if pkmon.condition["Sleep"][1]is True:
pkmon.condition["Sleep"][1]= False
elif self.name=="Burn Heal":
if pkmon.condition["Burn"][1]is True:
pkmon.condition["Burn"][1]= False
elif self.name=="Ice Heal":
if pkmon.condition["Freeze"][1]is True:
pkmon.condition["Freeze"][1]= False
return ITEM_USED
elif self.getType()=="Pokeball" and other is not None and other.getType() == TYPE_WILD:
# self.owner.pokemon.append(otherpkmon)
# return ITEM_CAUGHT
if len(self.owner.pokemon) >= 6:
return
if self.name=="Pokeball":
x=CatchFormula(otherpkmon.maxhp,otherpkmon.hp,otherpkmon.cChance,1,1,"BComparison2.txt")
if x[0]=="Caught":
self.owner.pokemon.append(otherpkmon)
if otherpkmon.tID in self.owner.info["catch_record"]:
count, last_tme = self.owner.info["catch_record"][otherpkmon.tID]
self.owner.info["catch_record"][otherpkmon.tID] = count+1, time.time()
else:
self.owner.info["catch_record"][otherpkmon.tID] = 1, time.time()
return ITEM_CAUGHT
elif x[0]=="Escaped":
pass
elif self.name=="Great Ball":
x=CatchFormula(otherpkmon.maxhp,otherpkmon.hp,otherpkmon.cChance,1.5,1,"BComparison2.txt")
if x[0]=="Caught":
self.owner.pokemon.append(otherpkmon)
if otherpkmon.tID in self.owner.info["catch_record"]:
count, last_tme = self.owner.info["catch_record"][otherpkmon.tID]
self.owner.info["catch_record"][otherpkmon.tID] = count+1, time.time()
else:
self.owner.info["catch_record"][otherpkmon.tID] = 1, time.time()
return ITEM_CAUGHT
elif x[0]=="Escaped":
pass
elif self.name=="Ultra Ball":
x=CatchFormula (otherpkmon.maxhp,otherpkmon.hp,otherpkmon.cChance,2,1,"BComparison2.txt")
if x[0]=="Caught":
self.owner.pokemon.append(otherpkmon)
if otherpkmon.tID in self.owner.info["catch_record"]:
count, last_tme = self.owner.info["catch_record"][otherpkmon.tID]
self.owner.info["catch_record"][otherpkmon.tID] = count+1, time.time()
else:
self.owner.info["catch_record"][otherpkmon.tID] = 1, time.time()
return ITEM_CAUGHT
elif x=="Escaped":
pass
elif self.name=="Master Ball" and other is not None and other.getType() == TYPE_WILD:
x=CatchFormula(otherpkmon.maxhp,otherpkmon.hp,otherpkmon.cChance,255,1,"BComparison2.txt")
if x[0]=="Caught":
self.owner.pokemon.append(otherpkmon)
if otherpkmon.tID in self.owner.info["catch_record"]:
count, last_tme = self.owner.info["catch_record"][otherpkmon.tID]
self.owner.info["catch_record"][otherpkmon.tID] = count+1, time.time()
else:
self.owner.info["catch_record"][otherpkmon.tID] = 1, time.time()
return ITEM_CAUGHT
elif x[0]=="Escaped":
pass
return ITEM_USED
return ITEM_NOTUSED
gLeader={
1:[["Brock"],[Pokemon(74,13*500),Pokemon(95,15*500)],["Boulder_Badge"]],
2:[["Misty"],[Pokemon(120,20*500),Pokemon(121,22*500)],["Cascade_Badge"]],
3:[["Lt. Surge"],[Pokemon(25,28*500),Pokemon(29,31*500),Pokemon(26,31*500)],["Thunder_Badge"]],
4:[["Erika"],[Pokemon(71,35*500),Pokemon(114,34*500),Pokemon(45,36*500)],["Rainbow_Badge"]],
5:[["Sabrina"],[Pokemon(64,39*500),Pokemon(122,42*500),Pokemon(49,40*500)],["Marsh_Badge"]],
6:[["Koga"],[Pokemon(109,48*500),Pokemon(89,50*500),Pokemon(110,55*500)],["Soul_Badge"]],
7:[["Blaine"],[Pokemon(58,58*500),Pokemon(77,57*500),Pokemon(78,60*500),Pokemon(126,61*500),Pokemon(59,64*500)],["Volcano_Badge"]],
8:[["Giovanni"],[Pokemon(111,69*500),Pokemon(112,72*500),Pokemon(51,70*500),Pokemon(31,78*500),Pokemon(34,78*500),Pokemon(112,80*500)],["Earth_Badge"]]
}
player=Player([],[Item("Pokeball", 5, 1000, 100)],"name", 0, 11000)
# for i in player.backpack:
# if player.backpack[i].name == "Master Ball":
# result = i
# break
# print(player.useItem(result, other))
# print(player.pokemon)
|
chen-charles/PyPokemon
|
PokemonFinalProjectV1/Combinedv2.py
|
Python
|
gpl-3.0
| 114,361
|
[
"BLAST"
] |
03a1b863918233b91a545d106e14677bc03b11b20196c5d465e3f3e0f49153f5
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Copyright (C) 2020 Stoq Tecnologia <http://www.stoq.com.br>
# All rights reserved
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., or visit: http://www.gnu.org/.
#
# Author(s): Stoq Team <dev@stoq.com.br>
#
import gettext
import glob
import importlib
import locale
import os
import platform
import sys
import pkg_resources
version = "2.15.2"
# legacy versioning
__version__ = (1, 6, 0, "beta1")
version_str = '.'.join(str(i) for i in __version__)
def enable_translation(domain, root='..', enable_global=None):
installed = importlib.util.find_spec(domain)
if installed and pkg_resources.resource_exists(domain, 'locale'):
localedir = pkg_resources.resource_filename(domain, 'locale')
elif installed:
localedir = None
else:
localedir = os.path.join(root, 'locale')
gettext.bindtextdomain(domain, localedir)
# For libglade, but only on non-win32 systems
if hasattr(locale, 'bindtextdomain'):
locale.bindtextdomain(domain, localedir)
gettext.bind_textdomain_codeset(domain, 'utf-8')
if enable_global:
gettext.textdomain(domain)
# For libglade, but only on non-win32 systems
if hasattr(locale, 'textdomain'):
locale.textdomain(domain)
if platform.system() == 'Windows':
from ctypes import cdll
libintl = cdll.LoadLibrary("libintl-8.dll")
libintl.bindtextdomain(domain, localedir)
libintl.bind_textdomain_codeset(domain, 'UTF-8')
if enable_global:
libintl.textdomain(domain)
del libintl
enable_translation('stoqserver', enable_global=True)
def activate_virtualenv():
venv_path = glob.glob('/usr/lib/stoq-virtualenv/lib/python*/site-packages/')
if not venv_path:
return
sys.path = [venv_path[0]] + sys.path
|
stoq/stoq-server
|
stoqserver/__init__.py
|
Python
|
gpl-2.0
| 2,477
|
[
"VisIt"
] |
69e78d7341ddc11a4ce6a875ee9806901cdcd0d7aadc129ff3ea402bfb3ca9b9
|
#!/usr/bin/env python
# File created on 09 Feb 2010
from __future__ import division
__author__ = "Antonio Gonzalez Pena"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Justin Kuczynski", "Rob Knight", "Antonio Gonzalez Pena",
"Catherine Lozupone", "Jose Antonio Navas Molina"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Antonio Gonzalez Pena"
__email__ = "antgonza@gmail.com"
from os.path import exists, isdir, splitext, join
from os import makedirs, listdir
from qiime.util import (parse_command_line_parameters, get_options_lookup,
make_option)
from qiime.principal_coordinates import pcoa
options_lookup = get_options_lookup()
script_info = {}
script_info['brief_description'] = "Principal Coordinates Analysis (PCoA)"
script_info['script_description'] = ("Principal Coordinate Analysis (PCoA) is "
"commonly used to compare groups of "
"samples based on phylogenetic or "
"count-based distance metrics (see "
"section on beta_diversity.py).")
script_info['script_usage'] = [
("PCoA (Single File)",
"For this script, the user supplies a distance matrix (i.e. resulting "
"file from beta_diversity.py), along with the output filename (e.g. "
"beta_div_coords.txt), as follows:",
"%prog -i beta_div.txt -o beta_div_coords.txt"),
("PCoA (Multiple Files):",
"The script also functions in batch mode if a folder is supplied as input"
" (e.g. from beta_diversity.py run in batch). No other files should be "
"present in the input folder - only the distance matrix files to be "
"analyzed. This script operates on every distance matrix file in the "
"input directory and creates a corresponding principal coordinates "
"results file in the output directory, e.g.:",
"%prog -i beta_div_weighted_unifrac/ -o beta_div_weighted_pcoa_results/")
]
script_info['output_description'] = ("The resulting output file consists of "
"the principal coordinate (PC) axes "
"(columns) for each sample (rows). "
"Pairs of PCs can then be graphed to view"
" the relationships between samples. The "
"bottom of the output file contains the "
"eigenvalues and % variation explained "
"for each PC. For more information of the"
" file format, check the "
"OrdinationResults class in the "
"scikit-bio package "
"(http://scikit-bio.org/)")
script_info['required_options'] = [
make_option('-i', '--input_path', type='existing_path',
help='path to the input distance matrix file(s) (i.e., the '
'output from beta_diversity.py). Is a directory for '
'batch processing and a filename for a single file '
'operation.'),
make_option('-o', '--output_path', type='new_path',
help='output path. directory for batch processing, filename '
'for single file operation'),
]
script_info['optional_options'] = []
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
input_path = opts.input_path
output_path = opts.output_path
if isdir(input_path):
# Run PCoA on all distance matrices in the input dir
# Create the output directory if it does not exists
if not exists(output_path):
makedirs(output_path)
# Get all the filenames present in the input directory
file_names = [fname for fname in listdir(input_path)
if not (fname.startswith('.') or isdir(fname))]
# Loop through all the input files
for fname in file_names:
# Get the path to the input distance matrix
infile = join(input_path, fname)
# Run PCoA on the input distance matrix
with open(infile, 'U') as lines:
pcoa_scores = pcoa(lines)
# Store the PCoA results on the output directory
base_fname, ext = splitext(fname)
out_file = join(output_path, 'pcoa_%s.txt' % base_fname)
pcoa_scores.write(out_file)
else:
# Run PCoA on the input distance matrix
with open(input_path, 'U') as f:
pcoa_scores = pcoa(f)
# Store the results in the output file
pcoa_scores.write(output_path)
if __name__ == "__main__":
main()
|
josenavas/qiime
|
scripts/principal_coordinates.py
|
Python
|
gpl-2.0
| 4,906
|
[
"scikit-bio"
] |
cd5660f3641b46a73511e1ffbf3ae3d70e269dc62713bfdf13d1e6d8890780be
|
# -*- coding: utf-8 -*-
"""
Unit tests for the OBSolve class.
Thomas Ogden <t@ogden.eu>
"""
import os
import unittest
import numpy as np
from maxwellbloch import ob_solve, t_funcs
# Absolute path of tests/json directory, so that tests can be called from
# different directories.
JSON_DIR = os.path.abspath(os.path.join(__file__, '../', 'json'))
JSON_STR_02 = (
'{'
' "atom": {'
' "decays": ['
' { "channels": [[0,1], [1,2]], '
' "rate": 1.0'
' }'
' ],'
' "energies": [],'
' "fields": ['
' {'
' "coupled_levels": ['
' [0, 1]'
' ],'
' "detuning": 0.0,'
' "detuning_positive": true,'
' "label": "probe",'
' "rabi_freq": 5.0,'
' "rabi_freq_t_args": {},'
' "rabi_freq_t_func": null'
' },'
' {'
' "coupled_levels": ['
' [1, 2]'
' ],'
' "detuning": 0.0,'
' "detuning_positive": false,'
' "label": "coupling",'
' "rabi_freq": 10.0,'
' "rabi_freq_t_args": {},'
' "rabi_freq_t_func": null'
' }'
' ],'
' "num_states": 3'
' },'
' "t_min": 0.0,'
' "t_max": 1.0,'
' "t_steps": 100,'
' "method": "mesolve",'
' "opts": {}'
'}'
)
class TestSetFieldRabiTFunc(unittest.TestCase):
""" Test setting custom Rabi frequency time functions. """
def test_set_field_rabi_t_func_1(self):
""" Test that a custom double pulse Rabi freq time functions can be
set.
"""
ob_solve_02 = ob_solve.OBSolve().from_json_str(JSON_STR_02)
two_pulse_t_func = lambda t, args: (t_funcs.gaussian(0)(t, args) +
t_funcs.gaussian(1)(t, args))
two_pulse_t_args = {"ampl_0": 1.0, "centre_0": 0.0, "fwhm_0": 0.1,
"ampl_1": 2.0, "centre_1": 0.5, "fwhm_1": 0.1, }
ob_solve_02.set_field_rabi_freq_t_func(0, two_pulse_t_func)
ob_solve_02.set_field_rabi_freq_t_args(0, two_pulse_t_args)
field_0 = ob_solve_02.atom.fields[0]
self.assertAlmostEqual(field_0.rabi_freq_t_func(0.0,
field_0.rabi_freq_t_args), 1.0)
self.assertAlmostEqual(field_0.rabi_freq_t_func(0.5,
field_0.rabi_freq_t_args), 2.0)
self.assertAlmostEqual(field_0.rabi_freq_t_func(1.0,
field_0.rabi_freq_t_args), 0.0)
class TestSolve(unittest.TestCase):
def test_two_level_rabi_oscillations(self):
""" Solve the optical Bloch equations for the two-level atom.
Notes:
See https://en.wikipedia.org/wiki/Rabi_cycle
"""
RABI_FREQ = 5.0
atom_dict = {"fields": [{"coupled_levels": [[0, 1]],
"rabi_freq": RABI_FREQ}], "num_states": 2}
obs = ob_solve.OBSolve(atom=atom_dict, t_min=0.0, t_max=1.0,
t_steps=100)
obs.solve()
# Get the populations
pop_0 = np.absolute(obs.states_t()[:, 0, 0])
pop_1 = np.absolute(obs.states_t()[:, 1, 1])
# The solution is known, we should have Rabi cycling at the frequency.
known_0 = np.cos(2.0*np.pi*RABI_FREQ*obs.tlist/2.0)**2
known_1 = np.sin(2.0*np.pi*RABI_FREQ*obs.tlist/2.0)**2
self.assertTrue(np.allclose(pop_0, known_0, rtol=1.e-5, atol=1.e-5))
self.assertTrue(np.allclose(pop_1, known_1, rtol=1.e-5, atol=1.e-5))
# If you want to take a look
# import matplotlib.pyplot as plt
# plt.plot(obs.tlist, pop_0)
# plt.plot(obs.tlist, known_0, ls='dashed')
# plt.plot(obs.tlist, pop_1)
# plt.plot(obs.tlist, known_1, ls='dashed')
# plt.show()
def test_two_level_with_opts(self):
""" Same as test_two_level_rabi_oscillations() but with opts set such
that the tolerances are lower. The results will be less
accurate.
"""
RABI_FREQ = 5.0
atom_dict = {"fields": [{"coupled_levels": [[0, 1]],
"rabi_freq": RABI_FREQ}], "num_states": 2,
"initial_state": [1., 0.]}
obs = ob_solve.OBSolve(atom=atom_dict, t_min=0.0, t_max=1.0,
t_steps=100, opts={'atol': 1e-6, 'rtol': 1e-4})
obs.solve()
# Get the populations
pop_0 = np.absolute(obs.states_t()[:, 0, 0])
pop_1 = np.absolute(obs.states_t()[:, 1, 1])
# The solution is known, we should have Rabi cycling at the frequency.
known_0 = np.cos(2.0 * np.pi * RABI_FREQ * obs.tlist / 2.0)**2
known_1 = np.sin(2.0 * np.pi * RABI_FREQ * obs.tlist / 2.0)**2
# Compared with test_two_level_rabi_oscillations() we can only assert
# a lower tolerance to the known solution.
self.assertTrue(np.allclose(pop_0, known_0, rtol=1.e-3, atol=1.e-3))
self.assertTrue(np.allclose(pop_1, known_1, rtol=1.e-3, atol=1.e-3))
# If you want to take a look
# import matplotlib.pyplot as plt
# plt.plot(obs.tlist, pop_0)
# plt.plot(obs.tlist, known_0, ls='dashed')
# plt.plot(obs.tlist, pop_1)
# plt.plot(obs.tlist, known_1, ls='dashed')
# plt.show()
def test_two_level_with_inital_state(self):
""" Same as test_two_level_rabi_oscillations() but with the initial
state set so that the population starts in the upper level.
"""
RABI_FREQ = 5.0
atom_dict = {"fields": [{"coupled_levels": [[0, 1]],
"rabi_freq": RABI_FREQ}], "num_states": 2,
"initial_state": [0., 1.]}
obs = ob_solve.OBSolve(atom=atom_dict, t_min=0.0, t_max=1.0,
t_steps=100)
obs.solve()
# Get the populations
pop_0 = np.absolute(obs.states_t()[:, 0, 0])
pop_1 = np.absolute(obs.states_t()[:, 1, 1])
# The solution is as test_two_level_rabi_oscillations() but swapped
known_0 = np.sin(2.0*np.pi*RABI_FREQ*obs.tlist/2.0)**2
known_1 = np.cos(2.0*np.pi*RABI_FREQ*obs.tlist/2.0)**2
self.assertTrue(np.allclose(pop_0, known_0, rtol=1.e-5, atol=1.e-5))
self.assertTrue(np.allclose(pop_1, known_1, rtol=1.e-5, atol=1.e-5))
def test_two_two_fields(self):
""" Test a two-level atom addressed by multiple fields.
Notes:
- Test for bug in #159, where multiple fields coupling the same
levels isn't working.
- The first square pi-pulse drives all the population to the excited
state, the second square pi-pulse drives all pop back to ground.
- Before fix, only the second field is driving the atoms.
"""
json_path = os.path.join(JSON_DIR, "obs-two-two-fields.json")
obs = ob_solve.OBSolve().from_json(json_path)
obs.solve()
# Get the populations
pop_0 = np.absolute(obs.states_t()[:, 0, 0])
pop_1 = np.absolute(obs.states_t()[:, 1, 1])
# All population should start in the ground state
self.assertAlmostEqual(pop_0[0], 1.0, places=5)
self.assertAlmostEqual(pop_1[0], 0.0, places=5)
# The first pi-pulse between t = 0.2 and t = 0.3 should drive all the
# population to the exited state
self.assertAlmostEqual(pop_0[len(pop_0)//2], 0.0, places=5)
self.assertAlmostEqual(pop_1[len(pop_0)//2], 1.0, places=5)
# The second pi-pulse between t = 0.6 and t = 0.7 should drive all the
# population back to the ground state
self.assertAlmostEqual(pop_0[-1], 1.0, places=5)
self.assertAlmostEqual(pop_1[-1], 0.0, places=5)
# If you want to take a look
# import matplotlib.pyplot as plt
# plt.plot(obs.tlist, pop_0)
# plt.plot(obs.tlist, pop_1)
# plt.show()
def test_vee_cw_weak_sech_2pi(self):
""" Test a three-level vee config atom where the probe transition is
addressed by a weak cw and the drive is a sech pulse.
Notes:
- Test for bug in #222, where the `t_args can contain ampl or n_pi,
not both` exception is raised even though those args are in
different fields.
- Bug is due to field_idxs not being set correctly, see also #159.
"""
json_path = os.path.join(JSON_DIR, "obs-vee-cw-weak-sech-2pi.json")
obs = ob_solve.OBSolve().from_json(json_path)
# Test that solve does not throw any exceptions.
obs.solve()
class TestJSON(unittest.TestCase):
def test_to_from_json_str_00(self):
ob_solve_00 = ob_solve.OBSolve()
ob_solve_01 = ob_solve.OBSolve.from_json_str(ob_solve_00.to_json_str())
self.assertEqual(ob_solve_00.to_json_str(), ob_solve_01.to_json_str())
def test_from_json_str(self):
ob_solve_02 = ob_solve.OBSolve().from_json_str(JSON_STR_02)
self.assertEqual(ob_solve_02.t_min, 0.0)
self.assertEqual(ob_solve_02.t_max, 1.0)
self.assertEqual(ob_solve_02.t_steps, 100)
self.assertEqual(ob_solve_02.method, "mesolve")
def test_to_from_json_str_03(self):
json_path = os.path.join(JSON_DIR, "ob_solve_03.json")
obs = ob_solve.OBSolve().from_json(json_path)
obs_test = ob_solve.OBSolve.from_json_str(obs.to_json_str())
self.assertEqual(obs.to_json_str(), obs_test.to_json_str())
def test_to_from_json(self):
import os
filepath = "test_ob_solve_02.json"
ob_solve_02 = ob_solve.OBSolve().from_json_str(JSON_STR_02)
ob_solve_02.to_json(filepath)
ob_solve_03 = ob_solve.OBSolve().from_json(filepath)
os.remove(filepath)
self.assertEqual(ob_solve_02.to_json_str(),
ob_solve_03.to_json_str())
class TestSaveLoad(unittest.TestCase):
""" Tests for the OBSolve save and load methods."""
def test_save_load_01(self):
""" Solve a basic OBSolve problem. Save the results to file. Set the
results in the OBSolve object to null. Load the results from file
and check that they match the original values.
"""
json_path = os.path.join(JSON_DIR, "ob_solve_02.json")
ob_solve_02 = ob_solve.OBSolve().from_json(json_path)
states_t = ob_solve_02.solve()
states_t_loaded = ob_solve_02.solve(recalc=False)
self.assertTrue((states_t == states_t_loaded).all())
|
tommyogden/maxwellbloch
|
maxwellbloch/tests/test_ob_solve.py
|
Python
|
mit
| 10,518
|
[
"Gaussian"
] |
e983fb6af514811c115d0d7bbe6b9f1f70590814edef154729c88ae6d51fc01e
|
from __future__ import absolute_import
import cython
cython.declare(PyrexTypes=object, Naming=object, ExprNodes=object, Nodes=object,
Options=object, UtilNodes=object, LetNode=object,
LetRefNode=object, TreeFragment=object, EncodedString=object,
error=object, warning=object, copy=object, _unicode=object)
import copy
import hashlib
from . import PyrexTypes
from . import Naming
from . import ExprNodes
from . import Nodes
from . import Options
from . import Builtin
from . import Errors
from .Visitor import VisitorTransform, TreeVisitor
from .Visitor import CythonTransform, EnvTransform, ScopeTrackingTransform
from .UtilNodes import LetNode, LetRefNode
from .TreeFragment import TreeFragment
from .StringEncoding import EncodedString, _unicode
from .Errors import error, warning, CompileError, InternalError
from .Code import UtilityCode
class NameNodeCollector(TreeVisitor):
"""Collect all NameNodes of a (sub-)tree in the ``name_nodes``
attribute.
"""
def __init__(self):
super(NameNodeCollector, self).__init__()
self.name_nodes = []
def visit_NameNode(self, node):
self.name_nodes.append(node)
def visit_Node(self, node):
self._visitchildren(node, None)
class SkipDeclarations(object):
"""
Variable and function declarations can often have a deep tree structure,
and yet most transformations don't need to descend to this depth.
Declaration nodes are removed after AnalyseDeclarationsTransform, so there
is no need to use this for transformations after that point.
"""
def visit_CTypeDefNode(self, node):
return node
def visit_CVarDefNode(self, node):
return node
def visit_CDeclaratorNode(self, node):
return node
def visit_CBaseTypeNode(self, node):
return node
def visit_CEnumDefNode(self, node):
return node
def visit_CStructOrUnionDefNode(self, node):
return node
class NormalizeTree(CythonTransform):
"""
This transform fixes up a few things after parsing
in order to make the parse tree more suitable for
transforms.
a) After parsing, blocks with only one statement will
be represented by that statement, not by a StatListNode.
When doing transforms this is annoying and inconsistent,
as one cannot in general remove a statement in a consistent
way and so on. This transform wraps any single statements
in a StatListNode containing a single statement.
b) The PassStatNode is a noop and serves no purpose beyond
plugging such one-statement blocks; i.e., once parsed a
` "pass" can just as well be represented using an empty
StatListNode. This means less special cases to worry about
in subsequent transforms (one always checks to see if a
StatListNode has no children to see if the block is empty).
"""
def __init__(self, context):
super(NormalizeTree, self).__init__(context)
self.is_in_statlist = False
self.is_in_expr = False
def visit_ExprNode(self, node):
stacktmp = self.is_in_expr
self.is_in_expr = True
self.visitchildren(node)
self.is_in_expr = stacktmp
return node
def visit_StatNode(self, node, is_listcontainer=False):
stacktmp = self.is_in_statlist
self.is_in_statlist = is_listcontainer
self.visitchildren(node)
self.is_in_statlist = stacktmp
if not self.is_in_statlist and not self.is_in_expr:
return Nodes.StatListNode(pos=node.pos, stats=[node])
else:
return node
def visit_StatListNode(self, node):
self.is_in_statlist = True
self.visitchildren(node)
self.is_in_statlist = False
return node
def visit_ParallelAssignmentNode(self, node):
return self.visit_StatNode(node, True)
def visit_CEnumDefNode(self, node):
return self.visit_StatNode(node, True)
def visit_CStructOrUnionDefNode(self, node):
return self.visit_StatNode(node, True)
def visit_PassStatNode(self, node):
"""Eliminate PassStatNode"""
if not self.is_in_statlist:
return Nodes.StatListNode(pos=node.pos, stats=[])
else:
return []
def visit_ExprStatNode(self, node):
"""Eliminate useless string literals"""
if node.expr.is_string_literal:
return self.visit_PassStatNode(node)
else:
return self.visit_StatNode(node)
def visit_CDeclaratorNode(self, node):
return node
class PostParseError(CompileError): pass
# error strings checked by unit tests, so define them
ERR_CDEF_INCLASS = 'Cannot assign default value to fields in cdef classes, structs or unions'
ERR_BUF_DEFAULTS = 'Invalid buffer defaults specification (see docs)'
ERR_INVALID_SPECIALATTR_TYPE = 'Special attributes must not have a type declared'
class PostParse(ScopeTrackingTransform):
"""
Basic interpretation of the parse tree, as well as validity
checking that can be done on a very basic level on the parse
tree (while still not being a problem with the basic syntax,
as such).
Specifically:
- Default values to cdef assignments are turned into single
assignments following the declaration (everywhere but in class
bodies, where they raise a compile error)
- Interpret some node structures into Python runtime values.
Some nodes take compile-time arguments (currently:
TemplatedTypeNode[args] and __cythonbufferdefaults__ = {args}),
which should be interpreted. This happens in a general way
and other steps should be taken to ensure validity.
Type arguments cannot be interpreted in this way.
- For __cythonbufferdefaults__ the arguments are checked for
validity.
TemplatedTypeNode has its directives interpreted:
Any first positional argument goes into the "dtype" attribute,
any "ndim" keyword argument goes into the "ndim" attribute and
so on. Also it is checked that the directive combination is valid.
- __cythonbufferdefaults__ attributes are parsed and put into the
type information.
Note: Currently Parsing.py does a lot of interpretation and
reorganization that can be refactored into this transform
if a more pure Abstract Syntax Tree is wanted.
"""
def __init__(self, context):
super(PostParse, self).__init__(context)
self.specialattribute_handlers = {
'__cythonbufferdefaults__' : self.handle_bufferdefaults
}
def visit_LambdaNode(self, node):
# unpack a lambda expression into the corresponding DefNode
collector = YieldNodeCollector()
collector.visitchildren(node.result_expr)
if collector.has_yield or collector.has_await or isinstance(node.result_expr, ExprNodes.YieldExprNode):
body = Nodes.ExprStatNode(
node.result_expr.pos, expr=node.result_expr)
else:
body = Nodes.ReturnStatNode(
node.result_expr.pos, value=node.result_expr)
node.def_node = Nodes.DefNode(
node.pos, name=node.name,
args=node.args, star_arg=node.star_arg,
starstar_arg=node.starstar_arg,
body=body, doc=None)
self.visitchildren(node)
return node
def visit_GeneratorExpressionNode(self, node):
# unpack a generator expression into the corresponding DefNode
collector = YieldNodeCollector()
collector.visitchildren(node.loop)
node.def_node = Nodes.DefNode(
node.pos, name=node.name, doc=None,
args=[], star_arg=None, starstar_arg=None,
body=node.loop, is_async_def=collector.has_await)
self.visitchildren(node)
return node
def visit_ComprehensionNode(self, node):
# enforce local scope also in Py2 for async generators (seriously, that's a Py3.6 feature...)
if not node.has_local_scope:
collector = YieldNodeCollector()
collector.visitchildren(node.loop)
if collector.has_await:
node.has_local_scope = True
self.visitchildren(node)
return node
# cdef variables
def handle_bufferdefaults(self, decl):
if not isinstance(decl.default, ExprNodes.DictNode):
raise PostParseError(decl.pos, ERR_BUF_DEFAULTS)
self.scope_node.buffer_defaults_node = decl.default
self.scope_node.buffer_defaults_pos = decl.pos
def visit_CVarDefNode(self, node):
# This assumes only plain names and pointers are assignable on
# declaration. Also, it makes use of the fact that a cdef decl
# must appear before the first use, so we don't have to deal with
# "i = 3; cdef int i = i" and can simply move the nodes around.
try:
self.visitchildren(node)
stats = [node]
newdecls = []
for decl in node.declarators:
declbase = decl
while isinstance(declbase, Nodes.CPtrDeclaratorNode):
declbase = declbase.base
if isinstance(declbase, Nodes.CNameDeclaratorNode):
if declbase.default is not None:
if self.scope_type in ('cclass', 'pyclass', 'struct'):
if isinstance(self.scope_node, Nodes.CClassDefNode):
handler = self.specialattribute_handlers.get(decl.name)
if handler:
if decl is not declbase:
raise PostParseError(decl.pos, ERR_INVALID_SPECIALATTR_TYPE)
handler(decl)
continue # Remove declaration
raise PostParseError(decl.pos, ERR_CDEF_INCLASS)
first_assignment = self.scope_type != 'module'
stats.append(Nodes.SingleAssignmentNode(node.pos,
lhs=ExprNodes.NameNode(node.pos, name=declbase.name),
rhs=declbase.default, first=first_assignment))
declbase.default = None
newdecls.append(decl)
node.declarators = newdecls
return stats
except PostParseError as e:
# An error in a cdef clause is ok, simply remove the declaration
# and try to move on to report more errors
self.context.nonfatal_error(e)
return None
# Split parallel assignments (a,b = b,a) into separate partial
# assignments that are executed rhs-first using temps. This
# restructuring must be applied before type analysis so that known
# types on rhs and lhs can be matched directly. It is required in
# the case that the types cannot be coerced to a Python type in
# order to assign from a tuple.
def visit_SingleAssignmentNode(self, node):
self.visitchildren(node)
return self._visit_assignment_node(node, [node.lhs, node.rhs])
def visit_CascadedAssignmentNode(self, node):
self.visitchildren(node)
return self._visit_assignment_node(node, node.lhs_list + [node.rhs])
def _visit_assignment_node(self, node, expr_list):
"""Flatten parallel assignments into separate single
assignments or cascaded assignments.
"""
if sum([ 1 for expr in expr_list
if expr.is_sequence_constructor or expr.is_string_literal ]) < 2:
# no parallel assignments => nothing to do
return node
expr_list_list = []
flatten_parallel_assignments(expr_list, expr_list_list)
temp_refs = []
eliminate_rhs_duplicates(expr_list_list, temp_refs)
nodes = []
for expr_list in expr_list_list:
lhs_list = expr_list[:-1]
rhs = expr_list[-1]
if len(lhs_list) == 1:
node = Nodes.SingleAssignmentNode(rhs.pos,
lhs = lhs_list[0], rhs = rhs)
else:
node = Nodes.CascadedAssignmentNode(rhs.pos,
lhs_list = lhs_list, rhs = rhs)
nodes.append(node)
if len(nodes) == 1:
assign_node = nodes[0]
else:
assign_node = Nodes.ParallelAssignmentNode(nodes[0].pos, stats = nodes)
if temp_refs:
duplicates_and_temps = [ (temp.expression, temp)
for temp in temp_refs ]
sort_common_subsequences(duplicates_and_temps)
for _, temp_ref in duplicates_and_temps[::-1]:
assign_node = LetNode(temp_ref, assign_node)
return assign_node
def _flatten_sequence(self, seq, result):
for arg in seq.args:
if arg.is_sequence_constructor:
self._flatten_sequence(arg, result)
else:
result.append(arg)
return result
def visit_DelStatNode(self, node):
self.visitchildren(node)
node.args = self._flatten_sequence(node, [])
return node
def visit_ExceptClauseNode(self, node):
if node.is_except_as:
# except-as must delete NameNode target at the end
del_target = Nodes.DelStatNode(
node.pos,
args=[ExprNodes.NameNode(
node.target.pos, name=node.target.name)],
ignore_nonexisting=True)
node.body = Nodes.StatListNode(
node.pos,
stats=[Nodes.TryFinallyStatNode(
node.pos,
body=node.body,
finally_clause=Nodes.StatListNode(
node.pos,
stats=[del_target]))])
self.visitchildren(node)
return node
def eliminate_rhs_duplicates(expr_list_list, ref_node_sequence):
"""Replace rhs items by LetRefNodes if they appear more than once.
Creates a sequence of LetRefNodes that set up the required temps
and appends them to ref_node_sequence. The input list is modified
in-place.
"""
seen_nodes = set()
ref_nodes = {}
def find_duplicates(node):
if node.is_literal or node.is_name:
# no need to replace those; can't include attributes here
# as their access is not necessarily side-effect free
return
if node in seen_nodes:
if node not in ref_nodes:
ref_node = LetRefNode(node)
ref_nodes[node] = ref_node
ref_node_sequence.append(ref_node)
else:
seen_nodes.add(node)
if node.is_sequence_constructor:
for item in node.args:
find_duplicates(item)
for expr_list in expr_list_list:
rhs = expr_list[-1]
find_duplicates(rhs)
if not ref_nodes:
return
def substitute_nodes(node):
if node in ref_nodes:
return ref_nodes[node]
elif node.is_sequence_constructor:
node.args = list(map(substitute_nodes, node.args))
return node
# replace nodes inside of the common subexpressions
for node in ref_nodes:
if node.is_sequence_constructor:
node.args = list(map(substitute_nodes, node.args))
# replace common subexpressions on all rhs items
for expr_list in expr_list_list:
expr_list[-1] = substitute_nodes(expr_list[-1])
def sort_common_subsequences(items):
"""Sort items/subsequences so that all items and subsequences that
an item contains appear before the item itself. This is needed
because each rhs item must only be evaluated once, so its value
must be evaluated first and then reused when packing sequences
that contain it.
This implies a partial order, and the sort must be stable to
preserve the original order as much as possible, so we use a
simple insertion sort (which is very fast for short sequences, the
normal case in practice).
"""
def contains(seq, x):
for item in seq:
if item is x:
return True
elif item.is_sequence_constructor and contains(item.args, x):
return True
return False
def lower_than(a,b):
return b.is_sequence_constructor and contains(b.args, a)
for pos, item in enumerate(items):
key = item[1] # the ResultRefNode which has already been injected into the sequences
new_pos = pos
for i in range(pos-1, -1, -1):
if lower_than(key, items[i][0]):
new_pos = i
if new_pos != pos:
for i in range(pos, new_pos, -1):
items[i] = items[i-1]
items[new_pos] = item
def unpack_string_to_character_literals(literal):
chars = []
pos = literal.pos
stype = literal.__class__
sval = literal.value
sval_type = sval.__class__
for char in sval:
cval = sval_type(char)
chars.append(stype(pos, value=cval, constant_result=cval))
return chars
def flatten_parallel_assignments(input, output):
# The input is a list of expression nodes, representing the LHSs
# and RHS of one (possibly cascaded) assignment statement. For
# sequence constructors, rearranges the matching parts of both
# sides into a list of equivalent assignments between the
# individual elements. This transformation is applied
# recursively, so that nested structures get matched as well.
rhs = input[-1]
if (not (rhs.is_sequence_constructor or isinstance(rhs, ExprNodes.UnicodeNode))
or not sum([lhs.is_sequence_constructor for lhs in input[:-1]])):
output.append(input)
return
complete_assignments = []
if rhs.is_sequence_constructor:
rhs_args = rhs.args
elif rhs.is_string_literal:
rhs_args = unpack_string_to_character_literals(rhs)
rhs_size = len(rhs_args)
lhs_targets = [[] for _ in range(rhs_size)]
starred_assignments = []
for lhs in input[:-1]:
if not lhs.is_sequence_constructor:
if lhs.is_starred:
error(lhs.pos, "starred assignment target must be in a list or tuple")
complete_assignments.append(lhs)
continue
lhs_size = len(lhs.args)
starred_targets = sum([1 for expr in lhs.args if expr.is_starred])
if starred_targets > 1:
error(lhs.pos, "more than 1 starred expression in assignment")
output.append([lhs,rhs])
continue
elif lhs_size - starred_targets > rhs_size:
error(lhs.pos, "need more than %d value%s to unpack"
% (rhs_size, (rhs_size != 1) and 's' or ''))
output.append([lhs,rhs])
continue
elif starred_targets:
map_starred_assignment(lhs_targets, starred_assignments,
lhs.args, rhs_args)
elif lhs_size < rhs_size:
error(lhs.pos, "too many values to unpack (expected %d, got %d)"
% (lhs_size, rhs_size))
output.append([lhs,rhs])
continue
else:
for targets, expr in zip(lhs_targets, lhs.args):
targets.append(expr)
if complete_assignments:
complete_assignments.append(rhs)
output.append(complete_assignments)
# recursively flatten partial assignments
for cascade, rhs in zip(lhs_targets, rhs_args):
if cascade:
cascade.append(rhs)
flatten_parallel_assignments(cascade, output)
# recursively flatten starred assignments
for cascade in starred_assignments:
if cascade[0].is_sequence_constructor:
flatten_parallel_assignments(cascade, output)
else:
output.append(cascade)
def map_starred_assignment(lhs_targets, starred_assignments, lhs_args, rhs_args):
# Appends the fixed-position LHS targets to the target list that
# appear left and right of the starred argument.
#
# The starred_assignments list receives a new tuple
# (lhs_target, rhs_values_list) that maps the remaining arguments
# (those that match the starred target) to a list.
# left side of the starred target
for i, (targets, expr) in enumerate(zip(lhs_targets, lhs_args)):
if expr.is_starred:
starred = i
lhs_remaining = len(lhs_args) - i - 1
break
targets.append(expr)
else:
raise InternalError("no starred arg found when splitting starred assignment")
# right side of the starred target
for i, (targets, expr) in enumerate(zip(lhs_targets[-lhs_remaining:],
lhs_args[starred + 1:])):
targets.append(expr)
# the starred target itself, must be assigned a (potentially empty) list
target = lhs_args[starred].target # unpack starred node
starred_rhs = rhs_args[starred:]
if lhs_remaining:
starred_rhs = starred_rhs[:-lhs_remaining]
if starred_rhs:
pos = starred_rhs[0].pos
else:
pos = target.pos
starred_assignments.append([
target, ExprNodes.ListNode(pos=pos, args=starred_rhs)])
class PxdPostParse(CythonTransform, SkipDeclarations):
"""
Basic interpretation/validity checking that should only be
done on pxd trees.
A lot of this checking currently happens in the parser; but
what is listed below happens here.
- "def" functions are let through only if they fill the
getbuffer/releasebuffer slots
- cdef functions are let through only if they are on the
top level and are declared "inline"
"""
ERR_INLINE_ONLY = "function definition in pxd file must be declared 'cdef inline'"
ERR_NOGO_WITH_INLINE = "inline function definition in pxd file cannot be '%s'"
def __call__(self, node):
self.scope_type = 'pxd'
return super(PxdPostParse, self).__call__(node)
def visit_CClassDefNode(self, node):
old = self.scope_type
self.scope_type = 'cclass'
self.visitchildren(node)
self.scope_type = old
return node
def visit_FuncDefNode(self, node):
# FuncDefNode always come with an implementation (without
# an imp they are CVarDefNodes..)
err = self.ERR_INLINE_ONLY
if (isinstance(node, Nodes.DefNode) and self.scope_type == 'cclass'
and node.name in ('__getbuffer__', '__releasebuffer__')):
err = None # allow these slots
if isinstance(node, Nodes.CFuncDefNode):
if (u'inline' in node.modifiers and
self.scope_type in ('pxd', 'cclass')):
node.inline_in_pxd = True
if node.visibility != 'private':
err = self.ERR_NOGO_WITH_INLINE % node.visibility
elif node.api:
err = self.ERR_NOGO_WITH_INLINE % 'api'
else:
err = None # allow inline function
else:
err = self.ERR_INLINE_ONLY
if err:
self.context.nonfatal_error(PostParseError(node.pos, err))
return None
else:
return node
class TrackNumpyAttributes(VisitorTransform, SkipDeclarations):
# TODO: Make name handling as good as in InterpretCompilerDirectives() below - probably best to merge the two.
def __init__(self):
super(TrackNumpyAttributes, self).__init__()
self.numpy_module_names = set()
def visit_CImportStatNode(self, node):
if node.module_name == u"numpy":
self.numpy_module_names.add(node.as_name or u"numpy")
return node
def visit_AttributeNode(self, node):
self.visitchildren(node)
if node.obj.is_name and node.obj.name in self.numpy_module_names:
node.is_numpy_attribute = True
return node
visit_Node = VisitorTransform.recurse_to_children
class InterpretCompilerDirectives(CythonTransform):
"""
After parsing, directives can be stored in a number of places:
- #cython-comments at the top of the file (stored in ModuleNode)
- Command-line arguments overriding these
- @cython.directivename decorators
- with cython.directivename: statements
This transform is responsible for interpreting these various sources
and store the directive in two ways:
- Set the directives attribute of the ModuleNode for global directives.
- Use a CompilerDirectivesNode to override directives for a subtree.
(The first one is primarily to not have to modify with the tree
structure, so that ModuleNode stay on top.)
The directives are stored in dictionaries from name to value in effect.
Each such dictionary is always filled in for all possible directives,
using default values where no value is given by the user.
The available directives are controlled in Options.py.
Note that we have to run this prior to analysis, and so some minor
duplication of functionality has to occur: We manually track cimports
and which names the "cython" module may have been imported to.
"""
unop_method_nodes = {
'typeof': ExprNodes.TypeofNode,
'operator.address': ExprNodes.AmpersandNode,
'operator.dereference': ExprNodes.DereferenceNode,
'operator.preincrement' : ExprNodes.inc_dec_constructor(True, '++'),
'operator.predecrement' : ExprNodes.inc_dec_constructor(True, '--'),
'operator.postincrement': ExprNodes.inc_dec_constructor(False, '++'),
'operator.postdecrement': ExprNodes.inc_dec_constructor(False, '--'),
'operator.typeid' : ExprNodes.TypeidNode,
# For backwards compatibility.
'address': ExprNodes.AmpersandNode,
}
binop_method_nodes = {
'operator.comma' : ExprNodes.c_binop_constructor(','),
}
special_methods = set(['declare', 'union', 'struct', 'typedef',
'sizeof', 'cast', 'pointer', 'compiled',
'NULL', 'fused_type', 'parallel'])
special_methods.update(unop_method_nodes)
valid_parallel_directives = set([
"parallel",
"prange",
"threadid",
#"threadsavailable",
])
def __init__(self, context, compilation_directive_defaults):
super(InterpretCompilerDirectives, self).__init__(context)
self.cython_module_names = set()
self.directive_names = {'staticmethod': 'staticmethod'}
self.parallel_directives = {}
directives = copy.deepcopy(Options.get_directive_defaults())
for key, value in compilation_directive_defaults.items():
directives[_unicode(key)] = copy.deepcopy(value)
self.directives = directives
def check_directive_scope(self, pos, directive, scope):
legal_scopes = Options.directive_scopes.get(directive, None)
if legal_scopes and scope not in legal_scopes:
self.context.nonfatal_error(PostParseError(pos, 'The %s compiler directive '
'is not allowed in %s scope' % (directive, scope)))
return False
else:
if directive not in Options.directive_types:
error(pos, "Invalid directive: '%s'." % (directive,))
return True
# Set up processing and handle the cython: comments.
def visit_ModuleNode(self, node):
for key in sorted(node.directive_comments):
if not self.check_directive_scope(node.pos, key, 'module'):
self.wrong_scope_error(node.pos, key, 'module')
del node.directive_comments[key]
self.module_scope = node.scope
self.directives.update(node.directive_comments)
node.directives = self.directives
node.parallel_directives = self.parallel_directives
self.visitchildren(node)
node.cython_module_names = self.cython_module_names
return node
# The following four functions track imports and cimports that
# begin with "cython"
def is_cython_directive(self, name):
return (name in Options.directive_types or
name in self.special_methods or
PyrexTypes.parse_basic_type(name))
def is_parallel_directive(self, full_name, pos):
"""
Checks to see if fullname (e.g. cython.parallel.prange) is a valid
parallel directive. If it is a star import it also updates the
parallel_directives.
"""
result = (full_name + ".").startswith("cython.parallel.")
if result:
directive = full_name.split('.')
if full_name == u"cython.parallel":
self.parallel_directives[u"parallel"] = u"cython.parallel"
elif full_name == u"cython.parallel.*":
for name in self.valid_parallel_directives:
self.parallel_directives[name] = u"cython.parallel.%s" % name
elif (len(directive) != 3 or
directive[-1] not in self.valid_parallel_directives):
error(pos, "No such directive: %s" % full_name)
self.module_scope.use_utility_code(
UtilityCode.load_cached("InitThreads", "ModuleSetupCode.c"))
return result
def visit_CImportStatNode(self, node):
if node.module_name == u"cython":
self.cython_module_names.add(node.as_name or u"cython")
elif node.module_name.startswith(u"cython."):
if node.module_name.startswith(u"cython.parallel."):
error(node.pos, node.module_name + " is not a module")
if node.module_name == u"cython.parallel":
if node.as_name and node.as_name != u"cython":
self.parallel_directives[node.as_name] = node.module_name
else:
self.cython_module_names.add(u"cython")
self.parallel_directives[
u"cython.parallel"] = node.module_name
self.module_scope.use_utility_code(
UtilityCode.load_cached("InitThreads", "ModuleSetupCode.c"))
elif node.as_name:
self.directive_names[node.as_name] = node.module_name[7:]
else:
self.cython_module_names.add(u"cython")
# if this cimport was a compiler directive, we don't
# want to leave the cimport node sitting in the tree
return None
return node
def visit_FromCImportStatNode(self, node):
if not node.relative_level and (
node.module_name == u"cython" or node.module_name.startswith(u"cython.")):
submodule = (node.module_name + u".")[7:]
newimp = []
for pos, name, as_name, kind in node.imported_names:
full_name = submodule + name
qualified_name = u"cython." + full_name
if self.is_parallel_directive(qualified_name, node.pos):
# from cython cimport parallel, or
# from cython.parallel cimport parallel, prange, ...
self.parallel_directives[as_name or name] = qualified_name
elif self.is_cython_directive(full_name):
self.directive_names[as_name or name] = full_name
if kind is not None:
self.context.nonfatal_error(PostParseError(pos,
"Compiler directive imports must be plain imports"))
else:
newimp.append((pos, name, as_name, kind))
if not newimp:
return None
node.imported_names = newimp
return node
def visit_FromImportStatNode(self, node):
if (node.module.module_name.value == u"cython") or \
node.module.module_name.value.startswith(u"cython."):
submodule = (node.module.module_name.value + u".")[7:]
newimp = []
for name, name_node in node.items:
full_name = submodule + name
qualified_name = u"cython." + full_name
if self.is_parallel_directive(qualified_name, node.pos):
self.parallel_directives[name_node.name] = qualified_name
elif self.is_cython_directive(full_name):
self.directive_names[name_node.name] = full_name
else:
newimp.append((name, name_node))
if not newimp:
return None
node.items = newimp
return node
def visit_SingleAssignmentNode(self, node):
if isinstance(node.rhs, ExprNodes.ImportNode):
module_name = node.rhs.module_name.value
is_parallel = (module_name + u".").startswith(u"cython.parallel.")
if module_name != u"cython" and not is_parallel:
return node
module_name = node.rhs.module_name.value
as_name = node.lhs.name
node = Nodes.CImportStatNode(node.pos,
module_name = module_name,
as_name = as_name)
node = self.visit_CImportStatNode(node)
else:
self.visitchildren(node)
return node
def visit_NameNode(self, node):
if node.name in self.cython_module_names:
node.is_cython_module = True
else:
directive = self.directive_names.get(node.name)
if directive is not None:
node.cython_attribute = directive
return node
def visit_NewExprNode(self, node):
self.visit(node.cppclass)
self.visitchildren(node)
return node
def try_to_parse_directives(self, node):
# If node is the contents of an directive (in a with statement or
# decorator), returns a list of (directivename, value) pairs.
# Otherwise, returns None
if isinstance(node, ExprNodes.CallNode):
self.visit(node.function)
optname = node.function.as_cython_attribute()
if optname:
directivetype = Options.directive_types.get(optname)
if directivetype:
args, kwds = node.explicit_args_kwds()
directives = []
key_value_pairs = []
if kwds is not None and directivetype is not dict:
for keyvalue in kwds.key_value_pairs:
key, value = keyvalue
sub_optname = "%s.%s" % (optname, key.value)
if Options.directive_types.get(sub_optname):
directives.append(self.try_to_parse_directive(sub_optname, [value], None, keyvalue.pos))
else:
key_value_pairs.append(keyvalue)
if not key_value_pairs:
kwds = None
else:
kwds.key_value_pairs = key_value_pairs
if directives and not kwds and not args:
return directives
directives.append(self.try_to_parse_directive(optname, args, kwds, node.function.pos))
return directives
elif isinstance(node, (ExprNodes.AttributeNode, ExprNodes.NameNode)):
self.visit(node)
optname = node.as_cython_attribute()
if optname:
directivetype = Options.directive_types.get(optname)
if directivetype is bool:
arg = ExprNodes.BoolNode(node.pos, value=True)
return [self.try_to_parse_directive(optname, [arg], None, node.pos)]
elif directivetype is None:
return [(optname, None)]
else:
raise PostParseError(
node.pos, "The '%s' directive should be used as a function call." % optname)
return None
def try_to_parse_directive(self, optname, args, kwds, pos):
if optname == 'np_pythran' and not self.context.cpp:
raise PostParseError(pos, 'The %s directive can only be used in C++ mode.' % optname)
elif optname == 'exceptval':
# default: exceptval(None, check=True)
arg_error = len(args) > 1
check = True
if kwds and kwds.key_value_pairs:
kw = kwds.key_value_pairs[0]
if (len(kwds.key_value_pairs) == 1 and
kw.key.is_string_literal and kw.key.value == 'check' and
isinstance(kw.value, ExprNodes.BoolNode)):
check = kw.value.value
else:
arg_error = True
if arg_error:
raise PostParseError(
pos, 'The exceptval directive takes 0 or 1 positional arguments and the boolean keyword "check"')
return ('exceptval', (args[0] if args else None, check))
directivetype = Options.directive_types.get(optname)
if len(args) == 1 and isinstance(args[0], ExprNodes.NoneNode):
return optname, Options.get_directive_defaults()[optname]
elif directivetype is bool:
if kwds is not None or len(args) != 1 or not isinstance(args[0], ExprNodes.BoolNode):
raise PostParseError(pos,
'The %s directive takes one compile-time boolean argument' % optname)
return (optname, args[0].value)
elif directivetype is int:
if kwds is not None or len(args) != 1 or not isinstance(args[0], ExprNodes.IntNode):
raise PostParseError(pos,
'The %s directive takes one compile-time integer argument' % optname)
return (optname, int(args[0].value))
elif directivetype is str:
if kwds is not None or len(args) != 1 or not isinstance(
args[0], (ExprNodes.StringNode, ExprNodes.UnicodeNode)):
raise PostParseError(pos,
'The %s directive takes one compile-time string argument' % optname)
return (optname, str(args[0].value))
elif directivetype is type:
if kwds is not None or len(args) != 1:
raise PostParseError(pos,
'The %s directive takes one type argument' % optname)
return (optname, args[0])
elif directivetype is dict:
if len(args) != 0:
raise PostParseError(pos,
'The %s directive takes no prepositional arguments' % optname)
return optname, dict([(key.value, value) for key, value in kwds.key_value_pairs])
elif directivetype is list:
if kwds and len(kwds.key_value_pairs) != 0:
raise PostParseError(pos,
'The %s directive takes no keyword arguments' % optname)
return optname, [ str(arg.value) for arg in args ]
elif callable(directivetype):
if kwds is not None or len(args) != 1 or not isinstance(
args[0], (ExprNodes.StringNode, ExprNodes.UnicodeNode)):
raise PostParseError(pos,
'The %s directive takes one compile-time string argument' % optname)
return (optname, directivetype(optname, str(args[0].value)))
else:
assert False
def visit_with_directives(self, body, directives):
olddirectives = self.directives
newdirectives = copy.copy(olddirectives)
newdirectives.update(directives)
self.directives = newdirectives
assert isinstance(body, Nodes.StatListNode), body
retbody = self.visit_Node(body)
directive = Nodes.CompilerDirectivesNode(pos=retbody.pos, body=retbody,
directives=newdirectives)
self.directives = olddirectives
return directive
# Handle decorators
def visit_FuncDefNode(self, node):
directives = self._extract_directives(node, 'function')
if not directives:
return self.visit_Node(node)
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def visit_CVarDefNode(self, node):
directives = self._extract_directives(node, 'function')
if not directives:
return self.visit_Node(node)
for name, value in directives.items():
if name == 'locals':
node.directive_locals = value
elif name not in ('final', 'staticmethod'):
self.context.nonfatal_error(PostParseError(
node.pos,
"Cdef functions can only take cython.locals(), "
"staticmethod, or final decorators, got %s." % name))
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def visit_CClassDefNode(self, node):
directives = self._extract_directives(node, 'cclass')
if not directives:
return self.visit_Node(node)
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def visit_CppClassNode(self, node):
directives = self._extract_directives(node, 'cppclass')
if not directives:
return self.visit_Node(node)
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def visit_PyClassDefNode(self, node):
directives = self._extract_directives(node, 'class')
if not directives:
return self.visit_Node(node)
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def _extract_directives(self, node, scope_name):
if not node.decorators:
return {}
# Split the decorators into two lists -- real decorators and directives
directives = []
realdecs = []
both = []
# Decorators coming first take precedence.
for dec in node.decorators[::-1]:
new_directives = self.try_to_parse_directives(dec.decorator)
if new_directives is not None:
for directive in new_directives:
if self.check_directive_scope(node.pos, directive[0], scope_name):
name, value = directive
if self.directives.get(name, object()) != value:
directives.append(directive)
if directive[0] == 'staticmethod':
both.append(dec)
# Adapt scope type based on decorators that change it.
if directive[0] == 'cclass' and scope_name == 'class':
scope_name = 'cclass'
else:
realdecs.append(dec)
if realdecs and (scope_name == 'cclass' or
isinstance(node, (Nodes.CFuncDefNode, Nodes.CClassDefNode, Nodes.CVarDefNode))):
raise PostParseError(realdecs[0].pos, "Cdef functions/classes cannot take arbitrary decorators.")
node.decorators = realdecs[::-1] + both[::-1]
# merge or override repeated directives
optdict = {}
for directive in directives:
name, value = directive
if name in optdict:
old_value = optdict[name]
# keywords and arg lists can be merged, everything
# else overrides completely
if isinstance(old_value, dict):
old_value.update(value)
elif isinstance(old_value, list):
old_value.extend(value)
else:
optdict[name] = value
else:
optdict[name] = value
return optdict
# Handle with statements
def visit_WithStatNode(self, node):
directive_dict = {}
for directive in self.try_to_parse_directives(node.manager) or []:
if directive is not None:
if node.target is not None:
self.context.nonfatal_error(
PostParseError(node.pos, "Compiler directive with statements cannot contain 'as'"))
else:
name, value = directive
if name in ('nogil', 'gil'):
# special case: in pure mode, "with nogil" spells "with cython.nogil"
node = Nodes.GILStatNode(node.pos, state = name, body = node.body)
return self.visit_Node(node)
if self.check_directive_scope(node.pos, name, 'with statement'):
directive_dict[name] = value
if directive_dict:
return self.visit_with_directives(node.body, directive_dict)
return self.visit_Node(node)
class ParallelRangeTransform(CythonTransform, SkipDeclarations):
"""
Transform cython.parallel stuff. The parallel_directives come from the
module node, set there by InterpretCompilerDirectives.
x = cython.parallel.threadavailable() -> ParallelThreadAvailableNode
with nogil, cython.parallel.parallel(): -> ParallelWithBlockNode
print cython.parallel.threadid() -> ParallelThreadIdNode
for i in cython.parallel.prange(...): -> ParallelRangeNode
...
"""
# a list of names, maps 'cython.parallel.prange' in the code to
# ['cython', 'parallel', 'prange']
parallel_directive = None
# Indicates whether a namenode in an expression is the cython module
namenode_is_cython_module = False
# Keep track of whether we are the context manager of a 'with' statement
in_context_manager_section = False
# One of 'prange' or 'with parallel'. This is used to disallow closely
# nested 'with parallel:' blocks
state = None
directive_to_node = {
u"cython.parallel.parallel": Nodes.ParallelWithBlockNode,
# u"cython.parallel.threadsavailable": ExprNodes.ParallelThreadsAvailableNode,
u"cython.parallel.threadid": ExprNodes.ParallelThreadIdNode,
u"cython.parallel.prange": Nodes.ParallelRangeNode,
}
def node_is_parallel_directive(self, node):
return node.name in self.parallel_directives or node.is_cython_module
def get_directive_class_node(self, node):
"""
Figure out which parallel directive was used and return the associated
Node class.
E.g. for a cython.parallel.prange() call we return ParallelRangeNode
"""
if self.namenode_is_cython_module:
directive = '.'.join(self.parallel_directive)
else:
directive = self.parallel_directives[self.parallel_directive[0]]
directive = '%s.%s' % (directive,
'.'.join(self.parallel_directive[1:]))
directive = directive.rstrip('.')
cls = self.directive_to_node.get(directive)
if cls is None and not (self.namenode_is_cython_module and
self.parallel_directive[0] != 'parallel'):
error(node.pos, "Invalid directive: %s" % directive)
self.namenode_is_cython_module = False
self.parallel_directive = None
return cls
def visit_ModuleNode(self, node):
"""
If any parallel directives were imported, copy them over and visit
the AST
"""
if node.parallel_directives:
self.parallel_directives = node.parallel_directives
return self.visit_Node(node)
# No parallel directives were imported, so they can't be used :)
return node
def visit_NameNode(self, node):
if self.node_is_parallel_directive(node):
self.parallel_directive = [node.name]
self.namenode_is_cython_module = node.is_cython_module
return node
def visit_AttributeNode(self, node):
self.visitchildren(node)
if self.parallel_directive:
self.parallel_directive.append(node.attribute)
return node
def visit_CallNode(self, node):
self.visit(node.function)
if not self.parallel_directive:
return node
# We are a parallel directive, replace this node with the
# corresponding ParallelSomethingSomething node
if isinstance(node, ExprNodes.GeneralCallNode):
args = node.positional_args.args
kwargs = node.keyword_args
else:
args = node.args
kwargs = {}
parallel_directive_class = self.get_directive_class_node(node)
if parallel_directive_class:
# Note: in case of a parallel() the body is set by
# visit_WithStatNode
node = parallel_directive_class(node.pos, args=args, kwargs=kwargs)
return node
def visit_WithStatNode(self, node):
"Rewrite with cython.parallel.parallel() blocks"
newnode = self.visit(node.manager)
if isinstance(newnode, Nodes.ParallelWithBlockNode):
if self.state == 'parallel with':
error(node.manager.pos,
"Nested parallel with blocks are disallowed")
self.state = 'parallel with'
body = self.visit(node.body)
self.state = None
newnode.body = body
return newnode
elif self.parallel_directive:
parallel_directive_class = self.get_directive_class_node(node)
if not parallel_directive_class:
# There was an error, stop here and now
return None
if parallel_directive_class is Nodes.ParallelWithBlockNode:
error(node.pos, "The parallel directive must be called")
return None
node.body = self.visit(node.body)
return node
def visit_ForInStatNode(self, node):
"Rewrite 'for i in cython.parallel.prange(...):'"
self.visit(node.iterator)
self.visit(node.target)
in_prange = isinstance(node.iterator.sequence,
Nodes.ParallelRangeNode)
previous_state = self.state
if in_prange:
# This will replace the entire ForInStatNode, so copy the
# attributes
parallel_range_node = node.iterator.sequence
parallel_range_node.target = node.target
parallel_range_node.body = node.body
parallel_range_node.else_clause = node.else_clause
node = parallel_range_node
if not isinstance(node.target, ExprNodes.NameNode):
error(node.target.pos,
"Can only iterate over an iteration variable")
self.state = 'prange'
self.visit(node.body)
self.state = previous_state
self.visit(node.else_clause)
return node
def visit(self, node):
"Visit a node that may be None"
if node is not None:
return super(ParallelRangeTransform, self).visit(node)
class WithTransform(CythonTransform, SkipDeclarations):
def visit_WithStatNode(self, node):
self.visitchildren(node, 'body')
pos = node.pos
is_async = node.is_async
body, target, manager = node.body, node.target, node.manager
node.enter_call = ExprNodes.SimpleCallNode(
pos, function=ExprNodes.AttributeNode(
pos, obj=ExprNodes.CloneNode(manager),
attribute=EncodedString('__aenter__' if is_async else '__enter__'),
is_special_lookup=True),
args=[],
is_temp=True)
if is_async:
node.enter_call = ExprNodes.AwaitExprNode(pos, arg=node.enter_call)
if target is not None:
body = Nodes.StatListNode(
pos, stats=[
Nodes.WithTargetAssignmentStatNode(
pos, lhs=target, with_node=node),
body])
excinfo_target = ExprNodes.TupleNode(pos, slow=True, args=[
ExprNodes.ExcValueNode(pos) for _ in range(3)])
except_clause = Nodes.ExceptClauseNode(
pos, body=Nodes.IfStatNode(
pos, if_clauses=[
Nodes.IfClauseNode(
pos, condition=ExprNodes.NotNode(
pos, operand=ExprNodes.WithExitCallNode(
pos, with_stat=node,
test_if_run=False,
args=excinfo_target,
await_expr=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)),
body=Nodes.ReraiseStatNode(pos),
),
],
else_clause=None),
pattern=None,
target=None,
excinfo_target=excinfo_target,
)
node.body = Nodes.TryFinallyStatNode(
pos, body=Nodes.TryExceptStatNode(
pos, body=body,
except_clauses=[except_clause],
else_clause=None,
),
finally_clause=Nodes.ExprStatNode(
pos, expr=ExprNodes.WithExitCallNode(
pos, with_stat=node,
test_if_run=True,
args=ExprNodes.TupleNode(
pos, args=[ExprNodes.NoneNode(pos) for _ in range(3)]),
await_expr=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)),
handle_error_case=False,
)
return node
def visit_ExprNode(self, node):
# With statements are never inside expressions.
return node
class DecoratorTransform(ScopeTrackingTransform, SkipDeclarations):
"""
Transforms method decorators in cdef classes into nested calls or properties.
Python-style decorator properties are transformed into a PropertyNode
with up to the three getter, setter and deleter DefNodes.
The functional style isn't supported yet.
"""
_properties = None
_map_property_attribute = {
'getter': '__get__',
'setter': '__set__',
'deleter': '__del__',
}.get
def visit_CClassDefNode(self, node):
if self._properties is None:
self._properties = []
self._properties.append({})
super(DecoratorTransform, self).visit_CClassDefNode(node)
self._properties.pop()
return node
def visit_PropertyNode(self, node):
# Low-level warning for other code until we can convert all our uses over.
level = 2 if isinstance(node.pos[0], str) else 0
warning(node.pos, "'property %s:' syntax is deprecated, use '@property'" % node.name, level)
return node
def visit_DefNode(self, node):
scope_type = self.scope_type
node = self.visit_FuncDefNode(node)
if scope_type != 'cclass' or not node.decorators:
return node
# transform @property decorators
properties = self._properties[-1]
for decorator_node in node.decorators[::-1]:
decorator = decorator_node.decorator
if decorator.is_name and decorator.name == 'property':
if len(node.decorators) > 1:
return self._reject_decorated_property(node, decorator_node)
name = node.name
node.name = EncodedString('__get__')
node.decorators.remove(decorator_node)
stat_list = [node]
if name in properties:
prop = properties[name]
prop.pos = node.pos
prop.doc = node.doc
prop.body.stats = stat_list
return []
prop = Nodes.PropertyNode(node.pos, name=name)
prop.doc = node.doc
prop.body = Nodes.StatListNode(node.pos, stats=stat_list)
properties[name] = prop
return [prop]
elif decorator.is_attribute and decorator.obj.name in properties:
handler_name = self._map_property_attribute(decorator.attribute)
if handler_name:
if decorator.obj.name != node.name:
# CPython does not generate an error or warning, but not something useful either.
error(decorator_node.pos,
"Mismatching property names, expected '%s', got '%s'" % (
decorator.obj.name, node.name))
elif len(node.decorators) > 1:
return self._reject_decorated_property(node, decorator_node)
else:
return self._add_to_property(properties, node, handler_name, decorator_node)
# we clear node.decorators, so we need to set the
# is_staticmethod/is_classmethod attributes now
for decorator in node.decorators:
func = decorator.decorator
if func.is_name:
node.is_classmethod |= func.name == 'classmethod'
node.is_staticmethod |= func.name == 'staticmethod'
# transform normal decorators
decs = node.decorators
node.decorators = None
return self.chain_decorators(node, decs, node.name)
@staticmethod
def _reject_decorated_property(node, decorator_node):
# restrict transformation to outermost decorator as wrapped properties will probably not work
for deco in node.decorators:
if deco != decorator_node:
error(deco.pos, "Property methods with additional decorators are not supported")
return node
@staticmethod
def _add_to_property(properties, node, name, decorator):
prop = properties[node.name]
node.name = name
node.decorators.remove(decorator)
stats = prop.body.stats
for i, stat in enumerate(stats):
if stat.name == name:
stats[i] = node
break
else:
stats.append(node)
return []
@staticmethod
def chain_decorators(node, decorators, name):
"""
Decorators are applied directly in DefNode and PyClassDefNode to avoid
reassignments to the function/class name - except for cdef class methods.
For those, the reassignment is required as methods are originally
defined in the PyMethodDef struct.
The IndirectionNode allows DefNode to override the decorator.
"""
decorator_result = ExprNodes.NameNode(node.pos, name=name)
for decorator in decorators[::-1]:
decorator_result = ExprNodes.SimpleCallNode(
decorator.pos,
function=decorator.decorator,
args=[decorator_result])
name_node = ExprNodes.NameNode(node.pos, name=name)
reassignment = Nodes.SingleAssignmentNode(
node.pos,
lhs=name_node,
rhs=decorator_result)
reassignment = Nodes.IndirectionNode([reassignment])
node.decorator_indirection = reassignment
return [node, reassignment]
class CnameDirectivesTransform(CythonTransform, SkipDeclarations):
"""
Only part of the CythonUtilityCode pipeline. Must be run before
DecoratorTransform in case this is a decorator for a cdef class.
It filters out @cname('my_cname') decorators and rewrites them to
CnameDecoratorNodes.
"""
def handle_function(self, node):
if not getattr(node, 'decorators', None):
return self.visit_Node(node)
for i, decorator in enumerate(node.decorators):
decorator = decorator.decorator
if (isinstance(decorator, ExprNodes.CallNode) and
decorator.function.is_name and
decorator.function.name == 'cname'):
args, kwargs = decorator.explicit_args_kwds()
if kwargs:
raise AssertionError(
"cname decorator does not take keyword arguments")
if len(args) != 1:
raise AssertionError(
"cname decorator takes exactly one argument")
if not (args[0].is_literal and
args[0].type == Builtin.str_type):
raise AssertionError(
"argument to cname decorator must be a string literal")
cname = args[0].compile_time_value(None)
del node.decorators[i]
node = Nodes.CnameDecoratorNode(pos=node.pos, node=node,
cname=cname)
break
return self.visit_Node(node)
visit_FuncDefNode = handle_function
visit_CClassDefNode = handle_function
visit_CEnumDefNode = handle_function
visit_CStructOrUnionDefNode = handle_function
class ForwardDeclareTypes(CythonTransform):
def visit_CompilerDirectivesNode(self, node):
env = self.module_scope
old = env.directives
env.directives = node.directives
self.visitchildren(node)
env.directives = old
return node
def visit_ModuleNode(self, node):
self.module_scope = node.scope
self.module_scope.directives = node.directives
self.visitchildren(node)
return node
def visit_CDefExternNode(self, node):
old_cinclude_flag = self.module_scope.in_cinclude
self.module_scope.in_cinclude = 1
self.visitchildren(node)
self.module_scope.in_cinclude = old_cinclude_flag
return node
def visit_CEnumDefNode(self, node):
node.declare(self.module_scope)
return node
def visit_CStructOrUnionDefNode(self, node):
if node.name not in self.module_scope.entries:
node.declare(self.module_scope)
return node
def visit_CClassDefNode(self, node):
if node.class_name not in self.module_scope.entries:
node.declare(self.module_scope)
# Expand fused methods of .pxd declared types to construct the final vtable order.
type = self.module_scope.entries[node.class_name].type
if type is not None and type.is_extension_type and not type.is_builtin_type and type.scope:
scope = type.scope
for entry in scope.cfunc_entries:
if entry.type and entry.type.is_fused:
entry.type.get_all_specialized_function_types()
return node
class AnalyseDeclarationsTransform(EnvTransform):
basic_property = TreeFragment(u"""
property NAME:
def __get__(self):
return ATTR
def __set__(self, value):
ATTR = value
""", level='c_class', pipeline=[NormalizeTree(None)])
basic_pyobject_property = TreeFragment(u"""
property NAME:
def __get__(self):
return ATTR
def __set__(self, value):
ATTR = value
def __del__(self):
ATTR = None
""", level='c_class', pipeline=[NormalizeTree(None)])
basic_property_ro = TreeFragment(u"""
property NAME:
def __get__(self):
return ATTR
""", level='c_class', pipeline=[NormalizeTree(None)])
struct_or_union_wrapper = TreeFragment(u"""
cdef class NAME:
cdef TYPE value
def __init__(self, MEMBER=None):
cdef int count
count = 0
INIT_ASSIGNMENTS
if IS_UNION and count > 1:
raise ValueError, "At most one union member should be specified."
def __str__(self):
return STR_FORMAT % MEMBER_TUPLE
def __repr__(self):
return REPR_FORMAT % MEMBER_TUPLE
""", pipeline=[NormalizeTree(None)])
init_assignment = TreeFragment(u"""
if VALUE is not None:
ATTR = VALUE
count += 1
""", pipeline=[NormalizeTree(None)])
fused_function = None
in_lambda = 0
def __call__(self, root):
# needed to determine if a cdef var is declared after it's used.
self.seen_vars_stack = []
self.fused_error_funcs = set()
super_class = super(AnalyseDeclarationsTransform, self)
self._super_visit_FuncDefNode = super_class.visit_FuncDefNode
return super_class.__call__(root)
def visit_NameNode(self, node):
self.seen_vars_stack[-1].add(node.name)
return node
def visit_ModuleNode(self, node):
# Pickling support requires injecting module-level nodes.
self.extra_module_declarations = []
self.seen_vars_stack.append(set())
node.analyse_declarations(self.current_env())
self.visitchildren(node)
self.seen_vars_stack.pop()
node.body.stats.extend(self.extra_module_declarations)
return node
def visit_LambdaNode(self, node):
self.in_lambda += 1
node.analyse_declarations(self.current_env())
self.visitchildren(node)
self.in_lambda -= 1
return node
def visit_CClassDefNode(self, node):
node = self.visit_ClassDefNode(node)
if node.scope and node.scope.implemented and node.body:
stats = []
for entry in node.scope.var_entries:
if entry.needs_property:
property = self.create_Property(entry)
property.analyse_declarations(node.scope)
self.visit(property)
stats.append(property)
if stats:
node.body.stats += stats
if (node.visibility != 'extern'
and not node.scope.lookup('__reduce__')
and not node.scope.lookup('__reduce_ex__')):
self._inject_pickle_methods(node)
return node
def _inject_pickle_methods(self, node):
env = self.current_env()
if node.scope.directives['auto_pickle'] is False: # None means attempt it.
# Old behavior of not doing anything.
return
auto_pickle_forced = node.scope.directives['auto_pickle'] is True
all_members = []
cls = node.entry.type
cinit = None
inherited_reduce = None
while cls is not None:
all_members.extend(e for e in cls.scope.var_entries if e.name not in ('__weakref__', '__dict__'))
cinit = cinit or cls.scope.lookup('__cinit__')
inherited_reduce = inherited_reduce or cls.scope.lookup('__reduce__') or cls.scope.lookup('__reduce_ex__')
cls = cls.base_type
all_members.sort(key=lambda e: e.name)
if inherited_reduce:
# This is not failsafe, as we may not know whether a cimported class defines a __reduce__.
# This is why we define __reduce_cython__ and only replace __reduce__
# (via ExtensionTypes.SetupReduce utility code) at runtime on class creation.
return
non_py = [
e for e in all_members
if not e.type.is_pyobject and (not e.type.can_coerce_to_pyobject(env)
or not e.type.can_coerce_from_pyobject(env))
]
structs = [e for e in all_members if e.type.is_struct_or_union]
if cinit or non_py or (structs and not auto_pickle_forced):
if cinit:
# TODO(robertwb): We could allow this if __cinit__ has no require arguments.
msg = 'no default __reduce__ due to non-trivial __cinit__'
elif non_py:
msg = "%s cannot be converted to a Python object for pickling" % ','.join("self.%s" % e.name for e in non_py)
else:
# Extern structs may be only partially defined.
# TODO(robertwb): Limit the restriction to extern
# (and recursively extern-containing) structs.
msg = ("Pickling of struct members such as %s must be explicitly requested "
"with @auto_pickle(True)" % ','.join("self.%s" % e.name for e in structs))
if auto_pickle_forced:
error(node.pos, msg)
pickle_func = TreeFragment(u"""
def __reduce_cython__(self):
raise TypeError("%(msg)s")
def __setstate_cython__(self, __pyx_state):
raise TypeError("%(msg)s")
""" % {'msg': msg},
level='c_class', pipeline=[NormalizeTree(None)]).substitute({})
pickle_func.analyse_declarations(node.scope)
self.visit(pickle_func)
node.body.stats.append(pickle_func)
else:
for e in all_members:
if not e.type.is_pyobject:
e.type.create_to_py_utility_code(env)
e.type.create_from_py_utility_code(env)
all_members_names = sorted([e.name for e in all_members])
checksum = '0x%s' % hashlib.md5(' '.join(all_members_names).encode('utf-8')).hexdigest()[:7]
unpickle_func_name = '__pyx_unpickle_%s' % node.class_name
# TODO(robertwb): Move the state into the third argument
# so it can be pickled *after* self is memoized.
unpickle_func = TreeFragment(u"""
def %(unpickle_func_name)s(__pyx_type, long __pyx_checksum, __pyx_state):
if __pyx_checksum != %(checksum)s:
from pickle import PickleError as __pyx_PickleError
raise __pyx_PickleError("Incompatible checksums (%%s vs %(checksum)s = (%(members)s))" %% __pyx_checksum)
__pyx_result = %(class_name)s.__new__(__pyx_type)
if __pyx_state is not None:
%(unpickle_func_name)s__set_state(<%(class_name)s> __pyx_result, __pyx_state)
return __pyx_result
cdef %(unpickle_func_name)s__set_state(%(class_name)s __pyx_result, tuple __pyx_state):
%(assignments)s
if len(__pyx_state) > %(num_members)d and hasattr(__pyx_result, '__dict__'):
__pyx_result.__dict__.update(__pyx_state[%(num_members)d])
""" % {
'unpickle_func_name': unpickle_func_name,
'checksum': checksum,
'members': ', '.join(all_members_names),
'class_name': node.class_name,
'assignments': '; '.join(
'__pyx_result.%s = __pyx_state[%s]' % (v, ix)
for ix, v in enumerate(all_members_names)),
'num_members': len(all_members_names),
}, level='module', pipeline=[NormalizeTree(None)]).substitute({})
unpickle_func.analyse_declarations(node.entry.scope)
self.visit(unpickle_func)
self.extra_module_declarations.append(unpickle_func)
pickle_func = TreeFragment(u"""
def __reduce_cython__(self):
cdef bint use_setstate
state = (%(members)s)
_dict = getattr(self, '__dict__', None)
if _dict is not None:
state += (_dict,)
use_setstate = True
else:
use_setstate = %(any_notnone_members)s
if use_setstate:
return %(unpickle_func_name)s, (type(self), %(checksum)s, None), state
else:
return %(unpickle_func_name)s, (type(self), %(checksum)s, state)
def __setstate_cython__(self, __pyx_state):
%(unpickle_func_name)s__set_state(self, __pyx_state)
""" % {
'unpickle_func_name': unpickle_func_name,
'checksum': checksum,
'members': ', '.join('self.%s' % v for v in all_members_names) + (',' if len(all_members_names) == 1 else ''),
# Even better, we could check PyType_IS_GC.
'any_notnone_members' : ' or '.join(['self.%s is not None' % e.name for e in all_members if e.type.is_pyobject] or ['False']),
},
level='c_class', pipeline=[NormalizeTree(None)]).substitute({})
pickle_func.analyse_declarations(node.scope)
self.visit(pickle_func)
node.body.stats.append(pickle_func)
def _handle_fused_def_decorators(self, old_decorators, env, node):
"""
Create function calls to the decorators and reassignments to
the function.
"""
# Delete staticmethod and classmethod decorators, this is
# handled directly by the fused function object.
decorators = []
for decorator in old_decorators:
func = decorator.decorator
if (not func.is_name or
func.name not in ('staticmethod', 'classmethod') or
env.lookup_here(func.name)):
# not a static or classmethod
decorators.append(decorator)
if decorators:
transform = DecoratorTransform(self.context)
def_node = node.node
_, reassignments = transform.chain_decorators(
def_node, decorators, def_node.name)
reassignments.analyse_declarations(env)
node = [node, reassignments]
return node
def _handle_def(self, decorators, env, node):
"Handle def or cpdef fused functions"
# Create PyCFunction nodes for each specialization
node.stats.insert(0, node.py_func)
node.py_func = self.visit(node.py_func)
node.update_fused_defnode_entry(env)
pycfunc = ExprNodes.PyCFunctionNode.from_defnode(node.py_func, binding=True)
pycfunc = ExprNodes.ProxyNode(pycfunc.coerce_to_temp(env))
node.resulting_fused_function = pycfunc
# Create assignment node for our def function
node.fused_func_assignment = self._create_assignment(
node.py_func, ExprNodes.CloneNode(pycfunc), env)
if decorators:
node = self._handle_fused_def_decorators(decorators, env, node)
return node
def _create_fused_function(self, env, node):
"Create a fused function for a DefNode with fused arguments"
from . import FusedNode
if self.fused_function or self.in_lambda:
if self.fused_function not in self.fused_error_funcs:
if self.in_lambda:
error(node.pos, "Fused lambdas not allowed")
else:
error(node.pos, "Cannot nest fused functions")
self.fused_error_funcs.add(self.fused_function)
node.body = Nodes.PassStatNode(node.pos)
for arg in node.args:
if arg.type.is_fused:
arg.type = arg.type.get_fused_types()[0]
return node
decorators = getattr(node, 'decorators', None)
node = FusedNode.FusedCFuncDefNode(node, env)
self.fused_function = node
self.visitchildren(node)
self.fused_function = None
if node.py_func:
node = self._handle_def(decorators, env, node)
return node
def _handle_nogil_cleanup(self, lenv, node):
"Handle cleanup for 'with gil' blocks in nogil functions."
if lenv.nogil and lenv.has_with_gil_block:
# Acquire the GIL for cleanup in 'nogil' functions, by wrapping
# the entire function body in try/finally.
# The corresponding release will be taken care of by
# Nodes.FuncDefNode.generate_function_definitions()
node.body = Nodes.NogilTryFinallyStatNode(
node.body.pos,
body=node.body,
finally_clause=Nodes.EnsureGILNode(node.body.pos),
finally_except_clause=Nodes.EnsureGILNode(node.body.pos))
def _handle_fused(self, node):
if node.is_generator and node.has_fused_arguments:
node.has_fused_arguments = False
error(node.pos, "Fused generators not supported")
node.gbody = Nodes.StatListNode(node.pos,
stats=[],
body=Nodes.PassStatNode(node.pos))
return node.has_fused_arguments
def visit_FuncDefNode(self, node):
"""
Analyse a function and its body, as that hasn't happened yet. Also
analyse the directive_locals set by @cython.locals().
Then, if we are a function with fused arguments, replace the function
(after it has declared itself in the symbol table!) with a
FusedCFuncDefNode, and analyse its children (which are in turn normal
functions). If we're a normal function, just analyse the body of the
function.
"""
env = self.current_env()
self.seen_vars_stack.append(set())
lenv = node.local_scope
node.declare_arguments(lenv)
# @cython.locals(...)
for var, type_node in node.directive_locals.items():
if not lenv.lookup_here(var): # don't redeclare args
type = type_node.analyse_as_type(lenv)
if type:
lenv.declare_var(var, type, type_node.pos)
else:
error(type_node.pos, "Not a type")
if self._handle_fused(node):
node = self._create_fused_function(env, node)
else:
node.body.analyse_declarations(lenv)
self._handle_nogil_cleanup(lenv, node)
self._super_visit_FuncDefNode(node)
self.seen_vars_stack.pop()
return node
def visit_DefNode(self, node):
node = self.visit_FuncDefNode(node)
env = self.current_env()
if isinstance(node, Nodes.DefNode) and node.is_wrapper:
env = env.parent_scope
if (not isinstance(node, Nodes.DefNode) or
node.fused_py_func or node.is_generator_body or
not node.needs_assignment_synthesis(env)):
return node
return [node, self._synthesize_assignment(node, env)]
def visit_GeneratorBodyDefNode(self, node):
return self.visit_FuncDefNode(node)
def _synthesize_assignment(self, node, env):
# Synthesize assignment node and put it right after defnode
genv = env
while genv.is_py_class_scope or genv.is_c_class_scope:
genv = genv.outer_scope
if genv.is_closure_scope:
rhs = node.py_cfunc_node = ExprNodes.InnerFunctionNode(
node.pos, def_node=node,
pymethdef_cname=node.entry.pymethdef_cname,
code_object=ExprNodes.CodeObjectNode(node))
else:
binding = self.current_directives.get('binding')
rhs = ExprNodes.PyCFunctionNode.from_defnode(node, binding)
node.code_object = rhs.code_object
if node.is_generator:
node.gbody.code_object = node.code_object
if env.is_py_class_scope:
rhs.binding = True
node.is_cyfunction = rhs.binding
return self._create_assignment(node, rhs, env)
def _create_assignment(self, def_node, rhs, env):
if def_node.decorators:
for decorator in def_node.decorators[::-1]:
rhs = ExprNodes.SimpleCallNode(
decorator.pos,
function = decorator.decorator,
args = [rhs])
def_node.decorators = None
assmt = Nodes.SingleAssignmentNode(
def_node.pos,
lhs=ExprNodes.NameNode(def_node.pos, name=def_node.name),
rhs=rhs)
assmt.analyse_declarations(env)
return assmt
def visit_ScopedExprNode(self, node):
env = self.current_env()
node.analyse_declarations(env)
# the node may or may not have a local scope
if node.has_local_scope:
self.seen_vars_stack.append(set(self.seen_vars_stack[-1]))
self.enter_scope(node, node.expr_scope)
node.analyse_scoped_declarations(node.expr_scope)
self.visitchildren(node)
self.exit_scope()
self.seen_vars_stack.pop()
else:
node.analyse_scoped_declarations(env)
self.visitchildren(node)
return node
def visit_TempResultFromStatNode(self, node):
self.visitchildren(node)
node.analyse_declarations(self.current_env())
return node
def visit_CppClassNode(self, node):
if node.visibility == 'extern':
return None
else:
return self.visit_ClassDefNode(node)
def visit_CStructOrUnionDefNode(self, node):
# Create a wrapper node if needed.
# We want to use the struct type information (so it can't happen
# before this phase) but also create new objects to be declared
# (so it can't happen later).
# Note that we don't return the original node, as it is
# never used after this phase.
if True: # private (default)
return None
self_value = ExprNodes.AttributeNode(
pos = node.pos,
obj = ExprNodes.NameNode(pos=node.pos, name=u"self"),
attribute = EncodedString(u"value"))
var_entries = node.entry.type.scope.var_entries
attributes = []
for entry in var_entries:
attributes.append(ExprNodes.AttributeNode(pos = entry.pos,
obj = self_value,
attribute = entry.name))
# __init__ assignments
init_assignments = []
for entry, attr in zip(var_entries, attributes):
# TODO: branch on visibility
init_assignments.append(self.init_assignment.substitute({
u"VALUE": ExprNodes.NameNode(entry.pos, name = entry.name),
u"ATTR": attr,
}, pos = entry.pos))
# create the class
str_format = u"%s(%s)" % (node.entry.type.name, ("%s, " * len(attributes))[:-2])
wrapper_class = self.struct_or_union_wrapper.substitute({
u"INIT_ASSIGNMENTS": Nodes.StatListNode(node.pos, stats = init_assignments),
u"IS_UNION": ExprNodes.BoolNode(node.pos, value = not node.entry.type.is_struct),
u"MEMBER_TUPLE": ExprNodes.TupleNode(node.pos, args=attributes),
u"STR_FORMAT": ExprNodes.StringNode(node.pos, value = EncodedString(str_format)),
u"REPR_FORMAT": ExprNodes.StringNode(node.pos, value = EncodedString(str_format.replace("%s", "%r"))),
}, pos = node.pos).stats[0]
wrapper_class.class_name = node.name
wrapper_class.shadow = True
class_body = wrapper_class.body.stats
# fix value type
assert isinstance(class_body[0].base_type, Nodes.CSimpleBaseTypeNode)
class_body[0].base_type.name = node.name
# fix __init__ arguments
init_method = class_body[1]
assert isinstance(init_method, Nodes.DefNode) and init_method.name == '__init__'
arg_template = init_method.args[1]
if not node.entry.type.is_struct:
arg_template.kw_only = True
del init_method.args[1]
for entry, attr in zip(var_entries, attributes):
arg = copy.deepcopy(arg_template)
arg.declarator.name = entry.name
init_method.args.append(arg)
# setters/getters
for entry, attr in zip(var_entries, attributes):
# TODO: branch on visibility
if entry.type.is_pyobject:
template = self.basic_pyobject_property
else:
template = self.basic_property
property = template.substitute({
u"ATTR": attr,
}, pos = entry.pos).stats[0]
property.name = entry.name
wrapper_class.body.stats.append(property)
wrapper_class.analyse_declarations(self.current_env())
return self.visit_CClassDefNode(wrapper_class)
# Some nodes are no longer needed after declaration
# analysis and can be dropped. The analysis was performed
# on these nodes in a separate recursive process from the
# enclosing function or module, so we can simply drop them.
def visit_CDeclaratorNode(self, node):
# necessary to ensure that all CNameDeclaratorNodes are visited.
self.visitchildren(node)
return node
def visit_CTypeDefNode(self, node):
return node
def visit_CBaseTypeNode(self, node):
return None
def visit_CEnumDefNode(self, node):
if node.visibility == 'public':
return node
else:
return None
def visit_CNameDeclaratorNode(self, node):
if node.name in self.seen_vars_stack[-1]:
entry = self.current_env().lookup(node.name)
if (entry is None or entry.visibility != 'extern'
and not entry.scope.is_c_class_scope):
warning(node.pos, "cdef variable '%s' declared after it is used" % node.name, 2)
self.visitchildren(node)
return node
def visit_CVarDefNode(self, node):
# to ensure all CNameDeclaratorNodes are visited.
self.visitchildren(node)
return None
def visit_CnameDecoratorNode(self, node):
child_node = self.visit(node.node)
if not child_node:
return None
if type(child_node) is list: # Assignment synthesized
node.child_node = child_node[0]
return [node] + child_node[1:]
node.node = child_node
return node
def create_Property(self, entry):
if entry.visibility == 'public':
if entry.type.is_pyobject:
template = self.basic_pyobject_property
else:
template = self.basic_property
elif entry.visibility == 'readonly':
template = self.basic_property_ro
property = template.substitute({
u"ATTR": ExprNodes.AttributeNode(pos=entry.pos,
obj=ExprNodes.NameNode(pos=entry.pos, name="self"),
attribute=entry.name),
}, pos=entry.pos).stats[0]
property.name = entry.name
property.doc = entry.doc
return property
class CalculateQualifiedNamesTransform(EnvTransform):
"""
Calculate and store the '__qualname__' and the global
module name on some nodes.
"""
def visit_ModuleNode(self, node):
self.module_name = self.global_scope().qualified_name
self.qualified_name = []
_super = super(CalculateQualifiedNamesTransform, self)
self._super_visit_FuncDefNode = _super.visit_FuncDefNode
self._super_visit_ClassDefNode = _super.visit_ClassDefNode
self.visitchildren(node)
return node
def _set_qualname(self, node, name=None):
if name:
qualname = self.qualified_name[:]
qualname.append(name)
else:
qualname = self.qualified_name
node.qualname = EncodedString('.'.join(qualname))
node.module_name = self.module_name
def _append_entry(self, entry):
if entry.is_pyglobal and not entry.is_pyclass_attr:
self.qualified_name = [entry.name]
else:
self.qualified_name.append(entry.name)
def visit_ClassNode(self, node):
self._set_qualname(node, node.name)
self.visitchildren(node)
return node
def visit_PyClassNamespaceNode(self, node):
# class name was already added by parent node
self._set_qualname(node)
self.visitchildren(node)
return node
def visit_PyCFunctionNode(self, node):
orig_qualified_name = self.qualified_name[:]
if node.def_node.is_wrapper and self.qualified_name and self.qualified_name[-1] == '<locals>':
self.qualified_name.pop()
self._set_qualname(node)
else:
self._set_qualname(node, node.def_node.name)
self.visitchildren(node)
self.qualified_name = orig_qualified_name
return node
def visit_DefNode(self, node):
if node.is_wrapper and self.qualified_name:
assert self.qualified_name[-1] == '<locals>', self.qualified_name
orig_qualified_name = self.qualified_name[:]
self.qualified_name.pop()
self._set_qualname(node)
self._super_visit_FuncDefNode(node)
self.qualified_name = orig_qualified_name
else:
self._set_qualname(node, node.name)
self.visit_FuncDefNode(node)
return node
def visit_FuncDefNode(self, node):
orig_qualified_name = self.qualified_name[:]
if getattr(node, 'name', None) == '<lambda>':
self.qualified_name.append('<lambda>')
else:
self._append_entry(node.entry)
self.qualified_name.append('<locals>')
self._super_visit_FuncDefNode(node)
self.qualified_name = orig_qualified_name
return node
def visit_ClassDefNode(self, node):
orig_qualified_name = self.qualified_name[:]
entry = (getattr(node, 'entry', None) or # PyClass
self.current_env().lookup_here(node.name)) # CClass
self._append_entry(entry)
self._super_visit_ClassDefNode(node)
self.qualified_name = orig_qualified_name
return node
class AnalyseExpressionsTransform(CythonTransform):
def visit_ModuleNode(self, node):
node.scope.infer_types()
node.body = node.body.analyse_expressions(node.scope)
self.visitchildren(node)
return node
def visit_FuncDefNode(self, node):
node.local_scope.infer_types()
node.body = node.body.analyse_expressions(node.local_scope)
self.visitchildren(node)
return node
def visit_ScopedExprNode(self, node):
if node.has_local_scope:
node.expr_scope.infer_types()
node = node.analyse_scoped_expressions(node.expr_scope)
self.visitchildren(node)
return node
def visit_IndexNode(self, node):
"""
Replace index nodes used to specialize cdef functions with fused
argument types with the Attribute- or NameNode referring to the
function. We then need to copy over the specialization properties to
the attribute or name node.
Because the indexing might be a Python indexing operation on a fused
function, or (usually) a Cython indexing operation, we need to
re-analyse the types.
"""
self.visit_Node(node)
if node.is_fused_index and not node.type.is_error:
node = node.base
return node
class FindInvalidUseOfFusedTypes(CythonTransform):
def visit_FuncDefNode(self, node):
# Errors related to use in functions with fused args will already
# have been detected
if not node.has_fused_arguments:
if not node.is_generator_body and node.return_type.is_fused:
error(node.pos, "Return type is not specified as argument type")
else:
self.visitchildren(node)
return node
def visit_ExprNode(self, node):
if node.type and node.type.is_fused:
error(node.pos, "Invalid use of fused types, type cannot be specialized")
else:
self.visitchildren(node)
return node
class ExpandInplaceOperators(EnvTransform):
def visit_InPlaceAssignmentNode(self, node):
lhs = node.lhs
rhs = node.rhs
if lhs.type.is_cpp_class:
# No getting around this exact operator here.
return node
if isinstance(lhs, ExprNodes.BufferIndexNode):
# There is code to handle this case in InPlaceAssignmentNode
return node
env = self.current_env()
def side_effect_free_reference(node, setting=False):
if node.is_name:
return node, []
elif node.type.is_pyobject and not setting:
node = LetRefNode(node)
return node, [node]
elif node.is_subscript:
base, temps = side_effect_free_reference(node.base)
index = LetRefNode(node.index)
return ExprNodes.IndexNode(node.pos, base=base, index=index), temps + [index]
elif node.is_attribute:
obj, temps = side_effect_free_reference(node.obj)
return ExprNodes.AttributeNode(node.pos, obj=obj, attribute=node.attribute), temps
elif isinstance(node, ExprNodes.BufferIndexNode):
raise ValueError("Don't allow things like attributes of buffer indexing operations")
else:
node = LetRefNode(node)
return node, [node]
try:
lhs, let_ref_nodes = side_effect_free_reference(lhs, setting=True)
except ValueError:
return node
dup = lhs.__class__(**lhs.__dict__)
binop = ExprNodes.binop_node(node.pos,
operator = node.operator,
operand1 = dup,
operand2 = rhs,
inplace=True)
# Manually analyse types for new node.
lhs.analyse_target_types(env)
dup.analyse_types(env)
binop.analyse_operation(env)
node = Nodes.SingleAssignmentNode(
node.pos,
lhs = lhs,
rhs=binop.coerce_to(lhs.type, env))
# Use LetRefNode to avoid side effects.
let_ref_nodes.reverse()
for t in let_ref_nodes:
node = LetNode(t, node)
return node
def visit_ExprNode(self, node):
# In-place assignments can't happen within an expression.
return node
class AdjustDefByDirectives(CythonTransform, SkipDeclarations):
"""
Adjust function and class definitions by the decorator directives:
@cython.cfunc
@cython.cclass
@cython.ccall
@cython.inline
"""
def visit_ModuleNode(self, node):
self.directives = node.directives
self.in_py_class = False
self.visitchildren(node)
return node
def visit_CompilerDirectivesNode(self, node):
old_directives = self.directives
self.directives = node.directives
self.visitchildren(node)
self.directives = old_directives
return node
def visit_DefNode(self, node):
modifiers = []
if 'inline' in self.directives:
modifiers.append('inline')
except_val = self.directives.get('exceptval')
return_type_node = self.directives.get('returns')
if return_type_node is None and self.directives['annotation_typing']:
return_type_node = node.return_type_annotation
# for Python anntations, prefer safe exception handling by default
if return_type_node is not None and except_val is None:
except_val = (None, True) # except *
elif except_val is None:
# backward compatible default: no exception check
except_val = (None, False)
if 'ccall' in self.directives:
node = node.as_cfunction(
overridable=True, modifiers=modifiers,
returns=return_type_node, except_val=except_val)
return self.visit(node)
if 'cfunc' in self.directives:
if self.in_py_class:
error(node.pos, "cfunc directive is not allowed here")
else:
node = node.as_cfunction(
overridable=False, modifiers=modifiers,
returns=return_type_node, except_val=except_val)
return self.visit(node)
if 'inline' in modifiers:
error(node.pos, "Python functions cannot be declared 'inline'")
self.visitchildren(node)
return node
def visit_PyClassDefNode(self, node):
if 'cclass' in self.directives:
node = node.as_cclass()
return self.visit(node)
else:
old_in_pyclass = self.in_py_class
self.in_py_class = True
self.visitchildren(node)
self.in_py_class = old_in_pyclass
return node
def visit_CClassDefNode(self, node):
old_in_pyclass = self.in_py_class
self.in_py_class = False
self.visitchildren(node)
self.in_py_class = old_in_pyclass
return node
class AlignFunctionDefinitions(CythonTransform):
"""
This class takes the signatures from a .pxd file and applies them to
the def methods in a .py file.
"""
def visit_ModuleNode(self, node):
self.scope = node.scope
self.directives = node.directives
self.imported_names = set() # hack, see visit_FromImportStatNode()
self.visitchildren(node)
return node
def visit_PyClassDefNode(self, node):
pxd_def = self.scope.lookup(node.name)
if pxd_def:
if pxd_def.is_cclass:
return self.visit_CClassDefNode(node.as_cclass(), pxd_def)
elif not pxd_def.scope or not pxd_def.scope.is_builtin_scope:
error(node.pos, "'%s' redeclared" % node.name)
if pxd_def.pos:
error(pxd_def.pos, "previous declaration here")
return None
return node
def visit_CClassDefNode(self, node, pxd_def=None):
if pxd_def is None:
pxd_def = self.scope.lookup(node.class_name)
if pxd_def:
if not pxd_def.defined_in_pxd:
return node
outer_scope = self.scope
self.scope = pxd_def.type.scope
self.visitchildren(node)
if pxd_def:
self.scope = outer_scope
return node
def visit_DefNode(self, node):
pxd_def = self.scope.lookup(node.name)
if pxd_def and (not pxd_def.scope or not pxd_def.scope.is_builtin_scope):
if not pxd_def.is_cfunction:
error(node.pos, "'%s' redeclared" % node.name)
if pxd_def.pos:
error(pxd_def.pos, "previous declaration here")
return None
node = node.as_cfunction(pxd_def)
elif (self.scope.is_module_scope and self.directives['auto_cpdef']
and not node.name in self.imported_names
and node.is_cdef_func_compatible()):
# FIXME: cpdef-ing should be done in analyse_declarations()
node = node.as_cfunction(scope=self.scope)
# Enable this when nested cdef functions are allowed.
# self.visitchildren(node)
return node
def visit_FromImportStatNode(self, node):
# hack to prevent conditional import fallback functions from
# being cdpef-ed (global Python variables currently conflict
# with imports)
if self.scope.is_module_scope:
for name, _ in node.items:
self.imported_names.add(name)
return node
def visit_ExprNode(self, node):
# ignore lambdas and everything else that appears in expressions
return node
class RemoveUnreachableCode(CythonTransform):
def visit_StatListNode(self, node):
if not self.current_directives['remove_unreachable']:
return node
self.visitchildren(node)
for idx, stat in enumerate(node.stats):
idx += 1
if stat.is_terminator:
if idx < len(node.stats):
if self.current_directives['warn.unreachable']:
warning(node.stats[idx].pos, "Unreachable code", 2)
node.stats = node.stats[:idx]
node.is_terminator = True
break
return node
def visit_IfClauseNode(self, node):
self.visitchildren(node)
if node.body.is_terminator:
node.is_terminator = True
return node
def visit_IfStatNode(self, node):
self.visitchildren(node)
if node.else_clause and node.else_clause.is_terminator:
for clause in node.if_clauses:
if not clause.is_terminator:
break
else:
node.is_terminator = True
return node
def visit_TryExceptStatNode(self, node):
self.visitchildren(node)
if node.body.is_terminator and node.else_clause:
if self.current_directives['warn.unreachable']:
warning(node.else_clause.pos, "Unreachable code", 2)
node.else_clause = None
return node
class YieldNodeCollector(TreeVisitor):
def __init__(self):
super(YieldNodeCollector, self).__init__()
self.yields = []
self.returns = []
self.finallys = []
self.excepts = []
self.has_return_value = False
self.has_yield = False
self.has_await = False
def visit_Node(self, node):
self.visitchildren(node)
def visit_YieldExprNode(self, node):
self.yields.append(node)
self.has_yield = True
self.visitchildren(node)
def visit_AwaitExprNode(self, node):
self.yields.append(node)
self.has_await = True
self.visitchildren(node)
def visit_ReturnStatNode(self, node):
self.visitchildren(node)
if node.value:
self.has_return_value = True
self.returns.append(node)
def visit_TryFinallyStatNode(self, node):
self.visitchildren(node)
self.finallys.append(node)
def visit_TryExceptStatNode(self, node):
self.visitchildren(node)
self.excepts.append(node)
def visit_ClassDefNode(self, node):
pass
def visit_FuncDefNode(self, node):
pass
def visit_LambdaNode(self, node):
pass
def visit_GeneratorExpressionNode(self, node):
pass
def visit_CArgDeclNode(self, node):
# do not look into annotations
# FIXME: support (yield) in default arguments (currently crashes)
pass
class MarkClosureVisitor(CythonTransform):
def visit_ModuleNode(self, node):
self.needs_closure = False
self.visitchildren(node)
return node
def visit_FuncDefNode(self, node):
self.needs_closure = False
self.visitchildren(node)
node.needs_closure = self.needs_closure
self.needs_closure = True
collector = YieldNodeCollector()
collector.visitchildren(node)
if node.is_async_def:
coroutine_type = Nodes.AsyncDefNode
if collector.has_yield:
coroutine_type = Nodes.AsyncGenNode
for yield_expr in collector.yields + collector.returns:
yield_expr.in_async_gen = True
elif self.current_directives['iterable_coroutine']:
coroutine_type = Nodes.IterableAsyncDefNode
elif collector.has_await:
found = next(y for y in collector.yields if y.is_await)
error(found.pos, "'await' not allowed in generators (use 'yield')")
return node
elif collector.has_yield:
coroutine_type = Nodes.GeneratorDefNode
else:
return node
for i, yield_expr in enumerate(collector.yields, 1):
yield_expr.label_num = i
for retnode in collector.returns + collector.finallys + collector.excepts:
retnode.in_generator = True
gbody = Nodes.GeneratorBodyDefNode(
pos=node.pos, name=node.name, body=node.body,
is_async_gen_body=node.is_async_def and collector.has_yield)
coroutine = coroutine_type(
pos=node.pos, name=node.name, args=node.args,
star_arg=node.star_arg, starstar_arg=node.starstar_arg,
doc=node.doc, decorators=node.decorators,
gbody=gbody, lambda_name=node.lambda_name)
return coroutine
def visit_CFuncDefNode(self, node):
self.needs_closure = False
self.visitchildren(node)
node.needs_closure = self.needs_closure
self.needs_closure = True
if node.needs_closure and node.overridable:
error(node.pos, "closures inside cpdef functions not yet supported")
return node
def visit_LambdaNode(self, node):
self.needs_closure = False
self.visitchildren(node)
node.needs_closure = self.needs_closure
self.needs_closure = True
return node
def visit_ClassDefNode(self, node):
self.visitchildren(node)
self.needs_closure = True
return node
class CreateClosureClasses(CythonTransform):
# Output closure classes in module scope for all functions
# that really need it.
def __init__(self, context):
super(CreateClosureClasses, self).__init__(context)
self.path = []
self.in_lambda = False
def visit_ModuleNode(self, node):
self.module_scope = node.scope
self.visitchildren(node)
return node
def find_entries_used_in_closures(self, node):
from_closure = []
in_closure = []
for scope in node.local_scope.iter_local_scopes():
for name, entry in scope.entries.items():
if not name:
continue
if entry.from_closure:
from_closure.append((name, entry))
elif entry.in_closure:
in_closure.append((name, entry))
return from_closure, in_closure
def create_class_from_scope(self, node, target_module_scope, inner_node=None):
# move local variables into closure
if node.is_generator:
for scope in node.local_scope.iter_local_scopes():
for entry in scope.entries.values():
if not entry.from_closure:
entry.in_closure = True
from_closure, in_closure = self.find_entries_used_in_closures(node)
in_closure.sort()
# Now from the beginning
node.needs_closure = False
node.needs_outer_scope = False
func_scope = node.local_scope
cscope = node.entry.scope
while cscope.is_py_class_scope or cscope.is_c_class_scope:
cscope = cscope.outer_scope
if not from_closure and (self.path or inner_node):
if not inner_node:
if not node.py_cfunc_node:
raise InternalError("DefNode does not have assignment node")
inner_node = node.py_cfunc_node
inner_node.needs_self_code = False
node.needs_outer_scope = False
if node.is_generator:
pass
elif not in_closure and not from_closure:
return
elif not in_closure:
func_scope.is_passthrough = True
func_scope.scope_class = cscope.scope_class
node.needs_outer_scope = True
return
as_name = '%s_%s' % (
target_module_scope.next_id(Naming.closure_class_prefix),
node.entry.cname)
entry = target_module_scope.declare_c_class(
name=as_name, pos=node.pos, defining=True,
implementing=True)
entry.type.is_final_type = True
func_scope.scope_class = entry
class_scope = entry.type.scope
class_scope.is_internal = True
class_scope.is_closure_class_scope = True
if node.is_async_def or node.is_generator:
# Generators need their closure intact during cleanup as they resume to handle GeneratorExit
class_scope.directives['no_gc_clear'] = True
if Options.closure_freelist_size:
class_scope.directives['freelist'] = Options.closure_freelist_size
if from_closure:
assert cscope.is_closure_scope
class_scope.declare_var(pos=node.pos,
name=Naming.outer_scope_cname,
cname=Naming.outer_scope_cname,
type=cscope.scope_class.type,
is_cdef=True)
node.needs_outer_scope = True
for name, entry in in_closure:
closure_entry = class_scope.declare_var(
pos=entry.pos,
name=entry.name if not entry.in_subscope else None,
cname=entry.cname,
type=entry.type,
is_cdef=True)
if entry.is_declared_generic:
closure_entry.is_declared_generic = 1
node.needs_closure = True
# Do it here because other classes are already checked
target_module_scope.check_c_class(func_scope.scope_class)
def visit_LambdaNode(self, node):
if not isinstance(node.def_node, Nodes.DefNode):
# fused function, an error has been previously issued
return node
was_in_lambda = self.in_lambda
self.in_lambda = True
self.create_class_from_scope(node.def_node, self.module_scope, node)
self.visitchildren(node)
self.in_lambda = was_in_lambda
return node
def visit_FuncDefNode(self, node):
if self.in_lambda:
self.visitchildren(node)
return node
if node.needs_closure or self.path:
self.create_class_from_scope(node, self.module_scope)
self.path.append(node)
self.visitchildren(node)
self.path.pop()
return node
def visit_GeneratorBodyDefNode(self, node):
self.visitchildren(node)
return node
def visit_CFuncDefNode(self, node):
if not node.overridable:
return self.visit_FuncDefNode(node)
else:
self.visitchildren(node)
return node
class GilCheck(VisitorTransform):
"""
Call `node.gil_check(env)` on each node to make sure we hold the
GIL when we need it. Raise an error when on Python operations
inside a `nogil` environment.
Additionally, raise exceptions for closely nested with gil or with nogil
statements. The latter would abort Python.
"""
def __call__(self, root):
self.env_stack = [root.scope]
self.nogil = False
# True for 'cdef func() nogil:' functions, as the GIL may be held while
# calling this function (thus contained 'nogil' blocks may be valid).
self.nogil_declarator_only = False
return super(GilCheck, self).__call__(root)
def visit_FuncDefNode(self, node):
self.env_stack.append(node.local_scope)
was_nogil = self.nogil
self.nogil = node.local_scope.nogil
if self.nogil:
self.nogil_declarator_only = True
if self.nogil and node.nogil_check:
node.nogil_check(node.local_scope)
self.visitchildren(node)
# This cannot be nested, so it doesn't need backup/restore
self.nogil_declarator_only = False
self.env_stack.pop()
self.nogil = was_nogil
return node
def visit_GILStatNode(self, node):
if self.nogil and node.nogil_check:
node.nogil_check()
was_nogil = self.nogil
self.nogil = (node.state == 'nogil')
if was_nogil == self.nogil and not self.nogil_declarator_only:
if not was_nogil:
error(node.pos, "Trying to acquire the GIL while it is "
"already held.")
else:
error(node.pos, "Trying to release the GIL while it was "
"previously released.")
if isinstance(node.finally_clause, Nodes.StatListNode):
# The finally clause of the GILStatNode is a GILExitNode,
# which is wrapped in a StatListNode. Just unpack that.
node.finally_clause, = node.finally_clause.stats
self.visitchildren(node)
self.nogil = was_nogil
return node
def visit_ParallelRangeNode(self, node):
if node.nogil:
node.nogil = False
node = Nodes.GILStatNode(node.pos, state='nogil', body=node)
return self.visit_GILStatNode(node)
if not self.nogil:
error(node.pos, "prange() can only be used without the GIL")
# Forget about any GIL-related errors that may occur in the body
return None
node.nogil_check(self.env_stack[-1])
self.visitchildren(node)
return node
def visit_ParallelWithBlockNode(self, node):
if not self.nogil:
error(node.pos, "The parallel section may only be used without "
"the GIL")
return None
if node.nogil_check:
# It does not currently implement this, but test for it anyway to
# avoid potential future surprises
node.nogil_check(self.env_stack[-1])
self.visitchildren(node)
return node
def visit_TryFinallyStatNode(self, node):
"""
Take care of try/finally statements in nogil code sections.
"""
if not self.nogil or isinstance(node, Nodes.GILStatNode):
return self.visit_Node(node)
node.nogil_check = None
node.is_try_finally_in_nogil = True
self.visitchildren(node)
return node
def visit_Node(self, node):
if self.env_stack and self.nogil and node.nogil_check:
node.nogil_check(self.env_stack[-1])
self.visitchildren(node)
node.in_nogil_context = self.nogil
return node
class TransformBuiltinMethods(EnvTransform):
"""
Replace Cython's own cython.* builtins by the corresponding tree nodes.
"""
def visit_SingleAssignmentNode(self, node):
if node.declaration_only:
return None
else:
self.visitchildren(node)
return node
def visit_AttributeNode(self, node):
self.visitchildren(node)
return self.visit_cython_attribute(node)
def visit_NameNode(self, node):
return self.visit_cython_attribute(node)
def visit_cython_attribute(self, node):
attribute = node.as_cython_attribute()
if attribute:
if attribute == u'compiled':
node = ExprNodes.BoolNode(node.pos, value=True)
elif attribute == u'__version__':
from .. import __version__ as version
node = ExprNodes.StringNode(node.pos, value=EncodedString(version))
elif attribute == u'NULL':
node = ExprNodes.NullNode(node.pos)
elif attribute in (u'set', u'frozenset', u'staticmethod'):
node = ExprNodes.NameNode(node.pos, name=EncodedString(attribute),
entry=self.current_env().builtin_scope().lookup_here(attribute))
elif PyrexTypes.parse_basic_type(attribute):
pass
elif self.context.cython_scope.lookup_qualified_name(attribute):
pass
else:
error(node.pos, u"'%s' not a valid cython attribute or is being used incorrectly" % attribute)
return node
def visit_ExecStatNode(self, node):
lenv = self.current_env()
self.visitchildren(node)
if len(node.args) == 1:
node.args.append(ExprNodes.GlobalsExprNode(node.pos))
if not lenv.is_module_scope:
node.args.append(
ExprNodes.LocalsExprNode(
node.pos, self.current_scope_node(), lenv))
return node
def _inject_locals(self, node, func_name):
# locals()/dir()/vars() builtins
lenv = self.current_env()
entry = lenv.lookup_here(func_name)
if entry:
# not the builtin
return node
pos = node.pos
if func_name in ('locals', 'vars'):
if func_name == 'locals' and len(node.args) > 0:
error(self.pos, "Builtin 'locals()' called with wrong number of args, expected 0, got %d"
% len(node.args))
return node
elif func_name == 'vars':
if len(node.args) > 1:
error(self.pos, "Builtin 'vars()' called with wrong number of args, expected 0-1, got %d"
% len(node.args))
if len(node.args) > 0:
return node # nothing to do
return ExprNodes.LocalsExprNode(pos, self.current_scope_node(), lenv)
else: # dir()
if len(node.args) > 1:
error(self.pos, "Builtin 'dir()' called with wrong number of args, expected 0-1, got %d"
% len(node.args))
if len(node.args) > 0:
# optimised in Builtin.py
return node
if lenv.is_py_class_scope or lenv.is_module_scope:
if lenv.is_py_class_scope:
pyclass = self.current_scope_node()
locals_dict = ExprNodes.CloneNode(pyclass.dict)
else:
locals_dict = ExprNodes.GlobalsExprNode(pos)
return ExprNodes.SortedDictKeysNode(locals_dict)
local_names = sorted(var.name for var in lenv.entries.values() if var.name)
items = [ExprNodes.IdentifierStringNode(pos, value=var)
for var in local_names]
return ExprNodes.ListNode(pos, args=items)
def visit_PrimaryCmpNode(self, node):
# special case: for in/not-in test, we do not need to sort locals()
self.visitchildren(node)
if node.operator in 'not_in': # in/not_in
if isinstance(node.operand2, ExprNodes.SortedDictKeysNode):
arg = node.operand2.arg
if isinstance(arg, ExprNodes.NoneCheckNode):
arg = arg.arg
node.operand2 = arg
return node
def visit_CascadedCmpNode(self, node):
return self.visit_PrimaryCmpNode(node)
def _inject_eval(self, node, func_name):
lenv = self.current_env()
entry = lenv.lookup_here(func_name)
if entry or len(node.args) != 1:
return node
# Inject globals and locals
node.args.append(ExprNodes.GlobalsExprNode(node.pos))
if not lenv.is_module_scope:
node.args.append(
ExprNodes.LocalsExprNode(
node.pos, self.current_scope_node(), lenv))
return node
def _inject_super(self, node, func_name):
lenv = self.current_env()
entry = lenv.lookup_here(func_name)
if entry or node.args:
return node
# Inject no-args super
def_node = self.current_scope_node()
if (not isinstance(def_node, Nodes.DefNode) or not def_node.args or
len(self.env_stack) < 2):
return node
class_node, class_scope = self.env_stack[-2]
if class_scope.is_py_class_scope:
def_node.requires_classobj = True
class_node.class_cell.is_active = True
node.args = [
ExprNodes.ClassCellNode(
node.pos, is_generator=def_node.is_generator),
ExprNodes.NameNode(node.pos, name=def_node.args[0].name)
]
elif class_scope.is_c_class_scope:
node.args = [
ExprNodes.NameNode(
node.pos, name=class_node.scope.name,
entry=class_node.entry),
ExprNodes.NameNode(node.pos, name=def_node.args[0].name)
]
return node
def visit_SimpleCallNode(self, node):
# cython.foo
function = node.function.as_cython_attribute()
if function:
if function in InterpretCompilerDirectives.unop_method_nodes:
if len(node.args) != 1:
error(node.function.pos, u"%s() takes exactly one argument" % function)
else:
node = InterpretCompilerDirectives.unop_method_nodes[function](
node.function.pos, operand=node.args[0])
elif function in InterpretCompilerDirectives.binop_method_nodes:
if len(node.args) != 2:
error(node.function.pos, u"%s() takes exactly two arguments" % function)
else:
node = InterpretCompilerDirectives.binop_method_nodes[function](
node.function.pos, operand1=node.args[0], operand2=node.args[1])
elif function == u'cast':
if len(node.args) != 2:
error(node.function.pos,
u"cast() takes exactly two arguments and an optional typecheck keyword")
else:
type = node.args[0].analyse_as_type(self.current_env())
if type:
node = ExprNodes.TypecastNode(
node.function.pos, type=type, operand=node.args[1], typecheck=False)
else:
error(node.args[0].pos, "Not a type")
elif function == u'sizeof':
if len(node.args) != 1:
error(node.function.pos, u"sizeof() takes exactly one argument")
else:
type = node.args[0].analyse_as_type(self.current_env())
if type:
node = ExprNodes.SizeofTypeNode(node.function.pos, arg_type=type)
else:
node = ExprNodes.SizeofVarNode(node.function.pos, operand=node.args[0])
elif function == 'cmod':
if len(node.args) != 2:
error(node.function.pos, u"cmod() takes exactly two arguments")
else:
node = ExprNodes.binop_node(node.function.pos, '%', node.args[0], node.args[1])
node.cdivision = True
elif function == 'cdiv':
if len(node.args) != 2:
error(node.function.pos, u"cdiv() takes exactly two arguments")
else:
node = ExprNodes.binop_node(node.function.pos, '/', node.args[0], node.args[1])
node.cdivision = True
elif function == u'set':
node.function = ExprNodes.NameNode(node.pos, name=EncodedString('set'))
elif function == u'staticmethod':
node.function = ExprNodes.NameNode(node.pos, name=EncodedString('staticmethod'))
elif self.context.cython_scope.lookup_qualified_name(function):
pass
else:
error(node.function.pos,
u"'%s' not a valid cython language construct" % function)
self.visitchildren(node)
if isinstance(node, ExprNodes.SimpleCallNode) and node.function.is_name:
func_name = node.function.name
if func_name in ('dir', 'locals', 'vars'):
return self._inject_locals(node, func_name)
if func_name == 'eval':
return self._inject_eval(node, func_name)
if func_name == 'super':
return self._inject_super(node, func_name)
return node
def visit_GeneralCallNode(self, node):
function = node.function.as_cython_attribute()
if function == u'cast':
# NOTE: assuming simple tuple/dict nodes for positional_args and keyword_args
args = node.positional_args.args
kwargs = node.keyword_args.compile_time_value(None)
if (len(args) != 2 or len(kwargs) > 1 or
(len(kwargs) == 1 and 'typecheck' not in kwargs)):
error(node.function.pos,
u"cast() takes exactly two arguments and an optional typecheck keyword")
else:
type = args[0].analyse_as_type(self.current_env())
if type:
typecheck = kwargs.get('typecheck', False)
node = ExprNodes.TypecastNode(
node.function.pos, type=type, operand=args[1], typecheck=typecheck)
else:
error(args[0].pos, "Not a type")
self.visitchildren(node)
return node
class ReplaceFusedTypeChecks(VisitorTransform):
"""
This is not a transform in the pipeline. It is invoked on the specific
versions of a cdef function with fused argument types. It filters out any
type branches that don't match. e.g.
if fused_t is mytype:
...
elif fused_t in other_fused_type:
...
"""
def __init__(self, local_scope):
super(ReplaceFusedTypeChecks, self).__init__()
self.local_scope = local_scope
# defer the import until now to avoid circular import time dependencies
from .Optimize import ConstantFolding
self.transform = ConstantFolding(reevaluate=True)
def visit_IfStatNode(self, node):
"""
Filters out any if clauses with false compile time type check
expression.
"""
self.visitchildren(node)
return self.transform(node)
def visit_PrimaryCmpNode(self, node):
with Errors.local_errors(ignore=True):
type1 = node.operand1.analyse_as_type(self.local_scope)
type2 = node.operand2.analyse_as_type(self.local_scope)
if type1 and type2:
false_node = ExprNodes.BoolNode(node.pos, value=False)
true_node = ExprNodes.BoolNode(node.pos, value=True)
type1 = self.specialize_type(type1, node.operand1.pos)
op = node.operator
if op in ('is', 'is_not', '==', '!='):
type2 = self.specialize_type(type2, node.operand2.pos)
is_same = type1.same_as(type2)
eq = op in ('is', '==')
if (is_same and eq) or (not is_same and not eq):
return true_node
elif op in ('in', 'not_in'):
# We have to do an instance check directly, as operand2
# needs to be a fused type and not a type with a subtype
# that is fused. First unpack the typedef
if isinstance(type2, PyrexTypes.CTypedefType):
type2 = type2.typedef_base_type
if type1.is_fused:
error(node.operand1.pos, "Type is fused")
elif not type2.is_fused:
error(node.operand2.pos,
"Can only use 'in' or 'not in' on a fused type")
else:
types = PyrexTypes.get_specialized_types(type2)
for specialized_type in types:
if type1.same_as(specialized_type):
if op == 'in':
return true_node
else:
return false_node
if op == 'not_in':
return true_node
return false_node
return node
def specialize_type(self, type, pos):
try:
return type.specialize(self.local_scope.fused_to_specific)
except KeyError:
error(pos, "Type is not specific")
return type
def visit_Node(self, node):
self.visitchildren(node)
return node
class DebugTransform(CythonTransform):
"""
Write debug information for this Cython module.
"""
def __init__(self, context, options, result):
super(DebugTransform, self).__init__(context)
self.visited = set()
# our treebuilder and debug output writer
# (see Cython.Debugger.debug_output.CythonDebugWriter)
self.tb = self.context.gdb_debug_outputwriter
#self.c_output_file = options.output_file
self.c_output_file = result.c_file
# Closure support, basically treat nested functions as if the AST were
# never nested
self.nested_funcdefs = []
# tells visit_NameNode whether it should register step-into functions
self.register_stepinto = False
def visit_ModuleNode(self, node):
self.tb.module_name = node.full_module_name
attrs = dict(
module_name=node.full_module_name,
filename=node.pos[0].filename,
c_filename=self.c_output_file)
self.tb.start('Module', attrs)
# serialize functions
self.tb.start('Functions')
# First, serialize functions normally...
self.visitchildren(node)
# ... then, serialize nested functions
for nested_funcdef in self.nested_funcdefs:
self.visit_FuncDefNode(nested_funcdef)
self.register_stepinto = True
self.serialize_modulenode_as_function(node)
self.register_stepinto = False
self.tb.end('Functions')
# 2.3 compatibility. Serialize global variables
self.tb.start('Globals')
entries = {}
for k, v in node.scope.entries.items():
if (v.qualified_name not in self.visited and not
v.name.startswith('__pyx_') and not
v.type.is_cfunction and not
v.type.is_extension_type):
entries[k]= v
self.serialize_local_variables(entries)
self.tb.end('Globals')
# self.tb.end('Module') # end Module after the line number mapping in
# Cython.Compiler.ModuleNode.ModuleNode._serialize_lineno_map
return node
def visit_FuncDefNode(self, node):
self.visited.add(node.local_scope.qualified_name)
if getattr(node, 'is_wrapper', False):
return node
if self.register_stepinto:
self.nested_funcdefs.append(node)
return node
# node.entry.visibility = 'extern'
if node.py_func is None:
pf_cname = ''
else:
pf_cname = node.py_func.entry.func_cname
attrs = dict(
name=node.entry.name or getattr(node, 'name', '<unknown>'),
cname=node.entry.func_cname,
pf_cname=pf_cname,
qualified_name=node.local_scope.qualified_name,
lineno=str(node.pos[1]))
self.tb.start('Function', attrs=attrs)
self.tb.start('Locals')
self.serialize_local_variables(node.local_scope.entries)
self.tb.end('Locals')
self.tb.start('Arguments')
for arg in node.local_scope.arg_entries:
self.tb.start(arg.name)
self.tb.end(arg.name)
self.tb.end('Arguments')
self.tb.start('StepIntoFunctions')
self.register_stepinto = True
self.visitchildren(node)
self.register_stepinto = False
self.tb.end('StepIntoFunctions')
self.tb.end('Function')
return node
def visit_NameNode(self, node):
if (self.register_stepinto and
node.type is not None and
node.type.is_cfunction and
getattr(node, 'is_called', False) and
node.entry.func_cname is not None):
# don't check node.entry.in_cinclude, as 'cdef extern: ...'
# declared functions are not 'in_cinclude'.
# This means we will list called 'cdef' functions as
# "step into functions", but this is not an issue as they will be
# recognized as Cython functions anyway.
attrs = dict(name=node.entry.func_cname)
self.tb.start('StepIntoFunction', attrs=attrs)
self.tb.end('StepIntoFunction')
self.visitchildren(node)
return node
def serialize_modulenode_as_function(self, node):
"""
Serialize the module-level code as a function so the debugger will know
it's a "relevant frame" and it will know where to set the breakpoint
for 'break modulename'.
"""
name = node.full_module_name.rpartition('.')[-1]
cname_py2 = 'init' + name
cname_py3 = 'PyInit_' + name
py2_attrs = dict(
name=name,
cname=cname_py2,
pf_cname='',
# Ignore the qualified_name, breakpoints should be set using
# `cy break modulename:lineno` for module-level breakpoints.
qualified_name='',
lineno='1',
is_initmodule_function="True",
)
py3_attrs = dict(py2_attrs, cname=cname_py3)
self._serialize_modulenode_as_function(node, py2_attrs)
self._serialize_modulenode_as_function(node, py3_attrs)
def _serialize_modulenode_as_function(self, node, attrs):
self.tb.start('Function', attrs=attrs)
self.tb.start('Locals')
self.serialize_local_variables(node.scope.entries)
self.tb.end('Locals')
self.tb.start('Arguments')
self.tb.end('Arguments')
self.tb.start('StepIntoFunctions')
self.register_stepinto = True
self.visitchildren(node)
self.register_stepinto = False
self.tb.end('StepIntoFunctions')
self.tb.end('Function')
def serialize_local_variables(self, entries):
for entry in entries.values():
if not entry.cname:
# not a local variable
continue
if entry.type.is_pyobject:
vartype = 'PythonObject'
else:
vartype = 'CObject'
if entry.from_closure:
# We're dealing with a closure where a variable from an outer
# scope is accessed, get it from the scope object.
cname = '%s->%s' % (Naming.cur_scope_cname,
entry.outer_entry.cname)
qname = '%s.%s.%s' % (entry.scope.outer_scope.qualified_name,
entry.scope.name,
entry.name)
elif entry.in_closure:
cname = '%s->%s' % (Naming.cur_scope_cname,
entry.cname)
qname = entry.qualified_name
else:
cname = entry.cname
qname = entry.qualified_name
if not entry.pos:
# this happens for variables that are not in the user's code,
# e.g. for the global __builtins__, __doc__, etc. We can just
# set the lineno to 0 for those.
lineno = '0'
else:
lineno = str(entry.pos[1])
attrs = dict(
name=entry.name,
cname=cname,
qualified_name=qname,
type=vartype,
lineno=lineno)
self.tb.start('LocalVar', attrs)
self.tb.end('LocalVar')
|
ryfeus/lambda-packs
|
HDF4_H5_NETCDF/source2.7/Cython/Compiler/ParseTreeTransforms.py
|
Python
|
mit
| 135,741
|
[
"VisIt"
] |
c664f658d6084933699e18edc23af95013c7ebae4d9e9f55604d269c8ad1663a
|
import mayavi.mlab as mlab
from pylayers.gis.layout import *
from pylayers.simul.link import *
L = Layout('Luebbers.ini')
#L.showG('st',aw=True,labels=True,nodelist=L.ldiffout)
#f,lax= plt.subplots(2,2)
#L.showG('s',aw=True,labels=True,fig=f,ax=lax[0][0])
#lax[0][0].set_title('Gs',fontsize=18)
#L.showG('st',aw=True,labels=True,fig=f,ax=lax[0][1])
#lax[0][1].set_title('Gt',fontsize=18)
#L.showG('v',aw=True,labels=True,fig=f,ax=lax[1][0])
#lax[1][0].set_title('Gv',fontsize=18)
#L.showG('i',aw=True,labels=True,fig=f,ax=lax[1][1])
#lax[1][1].set_title('Gi',fontsize=18)
#
fGHz=np.arange(0.5,1,0.01)
DL = DLink(L=L,fGHz=fGHz)
DL.Aa = Antenna('Omni',fGHz=fGHz)
DL.Ab = Antenna('Omni',fGHz=fGHz)
DL.a = np.array([37.5,6.2,1.5])
DL.b = np.array([12.5,30,1.5])
DL.eval(force=True,cutoff=4,verbose=False,ra_ceil_H=0)
#DL.R.show(L=L)
|
buguen/pylayers
|
pylayers/gis/test/test_luebbers.py
|
Python
|
lgpl-3.0
| 830
|
[
"Mayavi"
] |
564090264cd55c02c799b55ba5e4fb808ccaadefc3117422cbff1a51206cf408
|
"""
Unit tests for the JPER API class
"""
from octopus.modules.es.testindex import ESTestCase
from octopus.lib import http, paths
from octopus.core import app
from service.tests import fixtures
from service import api, models
from octopus.modules.store import store
import os
class MockResponse(object):
def __init__(self, status_code):
self.status_code = status_code
def mock_get_stream(*args, **kwargs):
# http://example.com/pub/1/file.pdf
# resp, content, size = http.get_stream(url, cut_off=100, chunk_size=100)
if args[0] == "http://example.com/pub/1/file.pdf":
return MockResponse(200), "a bunch of text", 5000
def get_stream_fail(*args, **kwargs):
return None, "", 0
def get_stream_status(*args, **kwargs):
return MockResponse(401), "", 6000
def get_stream_empty(*args, **kwargs):
return MockResponse(200), "", 0
class TestAPI(ESTestCase):
def setUp(self):
# need to do this first, before kicking upstairs, as ESTestCase runs initialise
self.run_schedule = app.config.get("RUN_SCHEDULE")
app.config["RUN_SCHEDULE"] = False
self.store_impl = app.config.get("STORE_IMPL")
app.config["STORE_IMPL"] = "octopus.modules.store.store.TempStore"
# now call the superclass, which will init the app
super(TestAPI, self).setUp()
self.old_get_stream = http.get_stream
self.custom_zip_path = paths.rel2abs(__file__, "..", "resources", "custom.zip")
self.stored_ids = []
def tearDown(self):
super(TestAPI, self).tearDown()
http.get_stream = self.old_get_stream
app.config["STORE_IMPL"] = self.store_impl
app.config["RUN_SCHEDULE"] = self.run_schedule
if os.path.exists(self.custom_zip_path):
os.remove(self.custom_zip_path)
s = store.StoreFactory.get()
for id in self.stored_ids:
s.delete(id)
def test_01_validate(self):
# 3 different kinds of validation required
acc = models.Account()
acc.id = "12345"
# 1. Validation of plain metadata-only notification
notification = fixtures.APIFactory.incoming()
del notification["links"]
api.JPER.validate(acc, notification)
# 2. Validation of metadata-only notification with external file links
http.get_stream = mock_get_stream
notification = fixtures.APIFactory.incoming()
api.JPER.validate(acc, notification)
# 3. Validation of metadata + zip content
notification = fixtures.APIFactory.incoming()
del notification["links"]
filepath = fixtures.PackageFactory.example_package_path()
with open(filepath) as f:
api.JPER.validate(acc, notification, f)
def test_02_validate_md_only_fail(self):
acc = models.Account()
acc.id = "12345"
# 1. JSON is invalid structure
with self.assertRaises(api.ValidationException):
api.JPER.validate(acc, {"random" : "content"})
# 2. No match data present
with self.assertRaises(api.ValidationException):
api.JPER.validate(acc, {})
def test_03_validate_md_links_fail(self):
acc = models.Account()
acc.id = "12345"
# 3. No url provided
notification = fixtures.APIFactory.incoming()
del notification["links"][0]["url"]
with self.assertRaises(api.ValidationException):
api.JPER.validate(acc, notification)
# 4. HTTP connection failure
notification = fixtures.APIFactory.incoming()
http.get_stream = get_stream_fail
with self.assertRaises(api.ValidationException):
api.JPER.validate(acc, notification)
# 5. Incorrect status code
notification = fixtures.APIFactory.incoming()
http.get_stream = get_stream_status
with self.assertRaises(api.ValidationException):
api.JPER.validate(acc, notification)
# 6. Empty content
notification = fixtures.APIFactory.incoming()
http.get_stream = get_stream_empty
with self.assertRaises(api.ValidationException):
api.JPER.validate(acc, notification)
def test_04_validate_md_content_fail(self):
acc = models.Account()
acc.id = "12345"
# 7. No format supplied
notification = fixtures.APIFactory.incoming()
del notification["links"]
del notification["content"]
path = fixtures.PackageFactory.example_package_path()
with open(path) as f:
with self.assertRaises(api.ValidationException):
api.JPER.validate(acc, notification, f)
# 8. Incorrect format supplied
notification = fixtures.APIFactory.incoming()
del notification["links"]
notification["content"]["packaging_format"] = "http://some.random.url"
path = fixtures.PackageFactory.example_package_path()
with open(path) as f:
with self.assertRaises(api.ValidationException):
api.JPER.validate(acc, notification, f)
# 9. Package invald/corrupt
notification = fixtures.APIFactory.incoming()
del notification["links"]
fixtures.PackageFactory.make_custom_zip(self.custom_zip_path, corrupt_zip=True)
with open(self.custom_zip_path) as f:
with self.assertRaises(api.ValidationException):
api.JPER.validate(acc, notification, f)
# 10. No match data in either md or package
fixtures.PackageFactory.make_custom_zip(self.custom_zip_path, no_jats=True, no_epmc=True)
with open(self.custom_zip_path) as f:
with self.assertRaises(api.ValidationException):
api.JPER.validate(acc, {}, f)
def test_05_create(self):
# 2 different kinds of create mechanism
# make some repository accounts that we'll be doing the test as
acc1 = models.Account()
acc1.add_role('publisher')
acc1.save()
# 1. Creation of plain metadata-only notification (with links that aren't checked)
notification = fixtures.APIFactory.incoming()
note = api.JPER.create_notification(acc1, notification)
assert note is not None
assert note.id is not None
check = models.UnroutedNotification.pull(note.id)
assert check is not None
assert len(check.links) == 1
assert check.links[0]["url"] == "http://example.com/pub/1/file.pdf"
assert check.provider_id == acc1.id
# 2. Creation of metadata + zip content
notification = fixtures.APIFactory.incoming()
del notification["links"]
filepath = fixtures.PackageFactory.example_package_path()
with open(filepath) as f:
note = api.JPER.create_notification(acc1, notification, f)
self.stored_ids.append(note.id)
assert note is not None
assert note.id is not None
check = models.UnroutedNotification.pull(note.id)
assert check is not None
assert len(check.links) == 1
assert check.links[0]["url"].endswith("notification/" + note.id + "/content")
assert check.links[0]["packaging"].endswith("FilesAndJATS")
assert check.provider_id == acc1.id
s = store.StoreFactory.get()
stored = s.list(note.id)
assert len(stored) == 3
def test_06_create_fail(self):
# There are only 2 circumstances under which the notification will fail
# make some repository accounts that we'll be doing the test as
acc1 = models.Account()
acc1.add_role('publisher')
acc1.save()
# 1. Invalid notification metadata
with self.assertRaises(api.ValidationException):
note = api.JPER.create_notification(acc1, {"random" : "content"})
# 2. Corrupt zip file
notification = fixtures.APIFactory.incoming()
fixtures.PackageFactory.make_custom_zip(self.custom_zip_path, corrupt_zip=True)
with open(self.custom_zip_path) as f:
with self.assertRaises(api.ValidationException):
api.JPER.validate(acc1, notification, f)
|
JiscPER/jper
|
service/tests/unit/test_api.py
|
Python
|
apache-2.0
| 8,162
|
[
"Octopus"
] |
c75f88626cbf8a81b6e9e267053de0724fc7e24bfb7e695a8330a6e46bcfac79
|
#!/usr/bin/env python
import sys
import re
import os.path
import numpy as N
import sgmllib
from pprint import pprint
PTG_NUMS=[1, 2, 3, 6, 10, 16, 25, 47, 75, 81, 83, 89, 99, 111,
123, 143, 147, 149, 156, 162, 168, 174, 175, 177, 183, 189, 191, 195, 200, 207, 215, 221]
ptg_names = [
["C1" , "1"],
["Ci" , "-1"],
["C2" , "2"],
["Cs" , "m"],
["C2h" , "2/m"],
["D2" , "222"],
["C2v" , "mm2"],
["D2h" , "mmm"],
["C4" , "4"],
["S4" , "-4"],
["C4h" , "4/m"],
["D4" , "422"],
["C4v" , "4mm"],
["D2d" , "-42m"],
["D4h" , "4/mmm"],
["C3" , "3"],
["C3i" , "-3"],
["D3" , "32"],
["C3v" , "3m"],
["D3d" , "-3m"],
["C6" , "6"],
["C3h" , "-6"],
["C6h" , "6/m"],
["D6" , "622"],
["C6v" , "6mm"],
["D3h" , "-62m"],
["D6h" , "6/mmm"],
["T" , "23"],
["Th" , "m-3"],
["O" , "432"],
["Td" , "-43m"],
["Oh" , "m-3m"],
]
_E3D = N.identity(3, N.int)
_03D = N.zeros((3,3), N.int)
def dotc(v1, v2):
"""Scalar product between two complex vectors"""
return N.dot(N.conjugate(v1), v2)
def rflat(iterables):
"""Iterator over all elements of a nested iterable. It's recursive!"""
for item in iterables:
if not hasattr(item, "__iter__"):
yield item
else: # iterable object.
for it in rflat(item): yield it
def npymat2Fmat(npymat):
"""Return a string with the F90 declaration of a numpy matrix."""
shape = str(npymat.shape)
shape = shape.replace("(","(/").replace(")","/)")
fmat = npymat.T # From C to F ordering.
vect = ""
for idx, ele in enumerate(fmat.flat):
#print ele
tk = str(ele)
tk = tk.replace("+-","-") # Have to Fix weird output of str when imag part is negative (+-)
tk = tk.replace("++","+") # Fortran compilers will likely complain!
tk = tk.replace("(","")
tk = tk.replace(")","")
vect += tk
if idx != (fmat.size-1): vect += ", "
vect = vect.replace("j","*j") # For complex numbers, j has to be defined in F source.
vect = "(/" + vect + "/)"
return " RESHAPE( %(vect)s, %(shape)s )" % locals()
#########################################################################################
class RotationException(Exception):
pass
class Rotation(object):
"""Object describing a pure rotation (proper, improper, mirror symmetry"""
def __init__(self, rotation, order=None, trcoords=None, versor=None):
self.rotation = N.matrix(rotation, N.int)
self.order = order
self.trcoords = trcoords
if versor is None:
self.versor = [0,0,0]
else:
self.versor = versor
dd = {0:"x",1:"y",2:"z"}
self.my_trcoords = ""
for ridx, row in enumerate(self.rotation):
for cidx, el in enumerate(row.flat):
ax = dd[cidx]
if el == -1: self.my_trcoords += "-" + ax
elif el == 0: pass
elif el == +1: self.my_trcoords += "+" + ax
else:
raise RotationException("wrong element value" + str(el))
if ridx < 2: self.my_trcoords += ", "
# Might subclass N.matrix though.
def __eq__(self, other):
return N.allclose(self.rotation, other.rotation)
def __neq__(self, other):
return not self == other
# Implement the unary arithmetic operations (+, -)
def __pos__(self): return self
def __neg__(self): return Rotation(-self.rotation)
def __mul__(self, other):
return Rotation(self.rotation * other.rotation)
def __pow__(self, intexp, modulo=1):
if intexp == 0: return Rotation(_E3D)
if intexp > 0: return Rotation(self.rotation**intexp)
if intexp == -1: return self.invert()
if intexp < 0: return self.__pow__(-intexp).invert()
def _get_det(self):
"""Return the determinant of a symmetry matrix mat[3,3]. It must be +-1"""
mat = self.rotation
det = mat[0,0]* ( mat[1,1]*mat[2,2] - mat[1,2]*mat[2,1] )\
- mat[0,1]* ( mat[1,0]*mat[2,2] - mat[1,2]*mat[2,0] )\
+ mat[0,2]* ( mat[1,0]*mat[2,1] - mat[1,1]*mat[2,0] )
if abs(det) != 1:
raise RotationException("abs(det) must be 1 while it is " + str(abs(det)))
else:
return det
det = property(_get_det, doc="The determinant of the rotation")
def _get_trace(self):
return self.rotation.trace()[0,0]
trace = property(_get_trace, doc="The trace of the rotation")
def _isproper(self):
return bool(self.det+1)
isproper = property(_isproper, doc="True if proper rotation")
def invert(self):
"""
Invert an orthogonal 3x3 matrix of INTEGER elements.
Note use of integer arithmetic. Raise RotationException if not invertible.
"""
det = self.det
mm = self.rotation
inv= N.matrix(N.zeros((3,3), N.int))
inv[0,0] = mm[1,1] * mm[2,2] - mm[1,2] * mm[2,1]
inv[0,1] = mm[0,2] * mm[2,1] - mm[0,1] * mm[2,2]
inv[0,2] = mm[0,1] * mm[1,2] - mm[0,2] * mm[1,1]
inv[1,0] = mm[1,2] * mm[2,0] - mm[1,0] * mm[2,2]
inv[1,1] = mm[0,0] * mm[2,2] - mm[0,2] * mm[2,0]
inv[1,2] = mm[0,2] * mm[1,0] - mm[0,0] * mm[1,2]
inv[2,0] = mm[1,0] * mm[2,1] - mm[1,1] * mm[2,0]
inv[2,1] = mm[0,1] * mm[2,0] - mm[0,0] * mm[2,1]
inv[2,2] = mm[0,0] * mm[1,1] - mm[0,1] * mm[1,0]
# Make sure matrix is not singular
if det != 0:
return Rotation(inv/det)
else:
raise RotationException("Attempting to invert singular matrix")
def _rottype(self):
"""
Receive a 3x3 orthogonal matrix and reports its type:
1 Identity
2 Inversion
3 Proper rotation of an angle <> 180 degrees
4 Proper rotation of 180 degrees
5 Mirror symmetry
6 Improper rotation
"""
rot = self.rotation # Just an alias.
# Treat identity and inversion first
#identity = Rotation(_E3D)
if self.isE: return 1
if self.isI: return 2
if self.isproper: # Proper rotation
t = 3 # try angle != 180
#det180 = get_sym_det(rot+_E3D)
if (self + identity).det == 0: t = 4 # 180 rotation
else: # Mirror symmetry or Improper rotation
t = 6
#detmirror = get_sym_det(rot-_E3D)
if (self - identity).det == 0: t = 5 # Mirror symmetry if an eigenvalue is 1
return t
def _isE(self):
return N.allclose(self.rotation, _E3D)
isE = property(_isE, doc="True if it is the identity")
def _isI(self):
return N.allclose(self.rotation, -_E3D)
isI = property(_isI, doc="True if it is the inversion")
def get_versor(self):
raise NotImplementedError
if self.isE or self.isI:
versor = [0, 0, 0]
return versor
def _get_order(self):
order = None
root_invers = 0
for ior in range(1,7):
rn = self**ior
if rn.isE:
order = ior
break
if rn.isI: root_invers = ior
if order is None:
raise RotationException("symmetry is not a root of unit!")
return (order, root_invers)
info = property(_get_order, doc="Order and root of unit")
def _get_name(self):
order, root_invers = self.info
name = ""
if self.det == -1: name = "-"
name += str(order) # FIXME this one doesn't work yet.
if root_invers != 0:
name += "-"
else:
name += "+"
return name
name = property(_get_name, doc="")
def __str__(self):
string = "Rotation: " + str(self.order) + ", versor: " + str(self.versor) + ", " + str(self.trcoords) + "\n"
string += str(self.rotation)
return string
def toFortran(self, varname):
"""Return a string with the F90 declaration of the symmetry."""
fmat = self.rotation.T # From C to F indexing
string = "RESHAPE(("
vect = str([e for e in fmat.flat])
vect = vect.replace("[","(/")
vect = vect.replace("]","/)")
return " %(varname)s = RESHAPE( %(vect)s ,(/3,3/) )" % locals()
#########################################################################################
class IrreducibleRepr(object):
"""Class defining an irreducible representation"""
def __init__(self, name, dim, matrices):
self.name = name
self.dim = dim
self.matrices = matrices
#self.matrices = [ N.matrix(mat) for mat in matrices ]
def __str__(self):
string = " Irred repr: " + self.name + " dimension= " + str(self.dim) + "\n"
for mat in self.matrices: string += str(mat) + "\n"
return string
def traces(self):
return N.array( [ mat.trace()[0,0] for mat in self.matrices ] )
def mk_classes(rotations):
"""
Find the classes of a group. Return a list containing the indeces
of the operations in each class sorted in ascending order.
A class is defined as the set of distinct items obtained by
considering for each element, S, of the group all its conjugate
X^-1 S X where X is one of the elements of the group.
"""
class_ids = list()
seen = [ 0 for i in range(len(rotations)) ]
nclass = -1
for idx, m in enumerate(rotations):
if seen[idx]: continue
seen[idx] = 1
nclass += 1
class_ids.append([])
class_ids[nclass].append(idx)
for x in rotations:
new = x.invert() * m * x
idx_found = -1
for idx_search, search in enumerate(rotations):
if search == new:
idx_found = idx_search
break
if (idx_found == -1): sys.stderr.write("idx_found == -1")
if not seen[idx_found]:
seen[idx_found] = 1
class_ids[nclass].append(idx_found)
# Now sort the indeces.
sort_class_ids = list()
for ids in class_ids:
ids.sort()
sort_class_ids.append(ids)
return sort_class_ids
#########################################################################################
class PointGroupException(Exception):
pass
class NotAGroup(PointGroupException):
pass
class RotationNotFound(PointGroupException):
pass
class PointGroup(list):
"""A PointGroup is a list of Rotations and it has irreducible representations"""
def __init__(self, rotations, name=None, irreprs=None):
class_ids = mk_classes(rotations)
# Always reorder rotations and irreprs according to class indeces.
ord_rotations = [ None for ii in range(len(rotations)) ]
idx = -1
for ord_idx in rflat(class_ids):
idx += 1
ord_rotations[idx] = rotations[ord_idx]
ord_irreprs = list()
for irr in irreprs:
ord_matrices = [ None for ii in range(len(irr.matrices)) ]
idx = -1
for ord_idx in rflat(class_ids):
idx += 1
ord_matrices[idx] = irr.matrices[ord_idx]
ord_irreprs.append( IrreducibleRepr(irr.name, irr.dim, ord_matrices) )
list.__init__(self)
for orot in ord_rotations: self.append(orot)
if __debug__ and False:
for rot in self:
print "info", rot.det, rot.info
print "name", rot.name
print "order", rot.order
self.class_ids = mk_classes(ord_rotations)
self.nclass = len(self.class_ids)
# Create name of each class.
#self.class_names = [ "None" for ii in range(self.nclass) ]
first_rot_ids = [ self.class_ids[ii][0] for ii in range(self.nclass) ]
self.class_names = [ self[ii].name for ii in first_rot_ids ]
self.nsym = len(self)
self.name = str(name)
self.irreprs = ord_irreprs
#for ii in self.irreprs: print ii
self.nirrepr = len(self.irreprs)
def find(self, rot):
"""Return the index of rot."""
try:
return self.index(rot)
except ValueError:
raise RotationNotFound(rot)
def findE(self):
"""Return the index of the identity."""
try:
return self.index(Rotation(_E3D))
except RotationNotFound:
raise
def find_inverse(self, rot):
"""Return the index of the inverse of rot."""
E = Rotation(_E3D)
for s in self:
if s * rot == E: return s
sys.stderr.write("Warning: Inverse not found!!\n")
raise RotationNotFound(rot)
def isgroup(self):
try:
self.findE()
for rot in self: self.find_inverse(rot)
return True
except RotationNotFound:
sys.stderr.write("Not a group! Identity or inverse are missing")
return False
def mk_mtable(self):
"""Check if it is a group, then build the multiplication table"""
# Check if r1 * r2 is in group and build the multiplication table.
mtable = dict()
for idx1, r1 in enumerate(self):
for idx2, r2 in enumerate(self):
try:
ij = (idx1, idx2)
mtable[ij] = self.index(r1 * r2)
except RotationNotFound:
sys.stderr.write("Not a group. Not close wrt *")
raise
return mtable
def show_mtable(self):
"""Print out multiplication table."""
mtable = self.mk_mtable()
print 4*" " + (2*" ").join([str(i) for i in xrange(self.nsym)]) + "\n"
for i in xrange(self.nsym):
lij = [(i, j) for j in xrange(self.nsym)]
print str(i) + (2*" ").join([str(mtable[ij]) for ij in lij]) + "\n"
def show_character_table(self):
vlen = 10
print 100*"*"
print ("Point Group" + self.name)
cln = ""
for clname in self.class_names:
cln += str(clname).center(vlen)
print "Class" + cln
mult = "Mult"
for cls in self.class_ids:
mult += str(len(cls)).center(vlen)
print mult
for irrepr in self.irreprs:
#print "irrepr ", irrepr
row = irrepr.name.ljust(5)
for icls in range(self.nclass):
sym_id = self.class_ids[icls][0]
mat = irrepr.matrices[sym_id]
char = mat.trace()[0,0]
row += str(char).center(vlen)
print row
print 100*"*"
print 100*"*"
def check(self):
if not self.isgroup(): raise NotAGroup
class_ids = mk_classes(self)
#print class_ids
check = -1
for idx in rflat(class_ids):
check = check +1
if check!= idx: raise PointGroupException("Symmetries are not ordered by classes")
mtable = self.mk_mtable()
err = 0.0
for idx1 in range(len(self)):
for idx2 in range(len(self)):
ij = (idx1, idx2)
idx_prod = mtable[ij]
for irr in self.irreprs:
mat_prod = irr.matrices[idx1] * irr.matrices[idx2]
my_err = (mat_prod - irr.matrices[idx_prod]).max()
err = max(err, abs(my_err))
print "Error in Group Representation", err
character_of = dict()
for irr in self.irreprs:
traces = irr.traces()
#character = [ traces[ii]
chr = list()
for clids in self.class_ids:
idx = clids[0]
chr.append(traces[idx])
#character_of[irr.name] = N.array(chr)
character_of[irr.name] = traces
#irr.name
err_otrace = 0.0
for k1, v1 in character_of.iteritems():
for k2, v2 in character_of.iteritems():
my_err = dotc(v1, v2) / self.nsym
if k2 == k1: my_err -= 1.0
err_otrace = max(err_otrace, abs(my_err))
print "Error in orthogonality relation of traces ", err
def dump_Fortran_sub(self, fh):
subname = "ptg_" + self.name.split()[0].strip()
fh.write("!{\src2tex{textfont=tt}}\n")
fh.write("!!****f* ABINIT/%s\n" % subname)
fh.write("!!\n")
fh.write("!! NAME\n")
fh.write("!! %s\n" % subname)
fh.write("!!\n")
fh.write("!! FUNCTION\n")
fh.write("!!\n")
fh.write("!! COPYRIGHT\n")
fh.write("!! Copyright (C) 2010-2021 ABINIT group (MG)\n")
fh.write("!! This file is distributed under the terms of the\n")
fh.write("!! GNU General Public License, see ~abinit/COPYING\n")
fh.write("!! or http://www.gnu.org/copyleft/gpl.txt .\n")
fh.write("!! For the initials of contributors, see ~abinit/doc/developers/contributors.txt .\n")
fh.write("!!\n")
fh.write("!! INPUTS\n")
fh.write("!!\n")
fh.write("!! OUTPUT\n")
fh.write("!!\n")
fh.write("!! PARENTS\n")
fh.write("!!\n")
fh.write("!! CHILDREN\n")
fh.write("!!\n")
fh.write("!! SOURCE\n")
fh.write("!!\n")
fh.write("!" + 80*"*" + "\n")
fh.write("! This include file has been automatically generated by the script " + os.path.basename(__file__) +"\n")
fh.write("! Do not edit! Change the script source instead.\n")
fh.write("!" + 80*"*" + "\n")
fh.write("\n")
fh.write("! Point group name " + self.name + "\n")
fh.write("\n")
fh.write("#if defined HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n")
fh.write("#include \"abi_common.h\"\n\n")
fh.write(" subroutine %s (nsym,nclass,sym,class_ids,class_names,Irr)\n" % subname )
# Disable optimization if ifc is used.
fh.write(" !DEC$ NOOPTIMIZE")
fh.write(" use defs_basis\n")
fh.write(" use m_profiling_abi\n")
fh.write(" use m_defs_ptgroups, only : irrep_t \n")
fh.write(" implicit none\n")
fh.write("!Arguments ------------------------------------\n")
fh.write(" integer,intent(out) :: nclass,nsym \n")
#fh.write(" character(len=5),intent(in) :: ptg_name \n")
fh.write(" !arrays\n")
fh.write(" integer,allocatable,intent(out) :: sym(:,:,:), class_ids(:,:)\n")
fh.write(" character(len=5),allocatable,intent(out) :: class_names(:)\n")
fh.write(" type(irrep_t),allocatable,intent(out) :: Irr(:)\n")
#fh.write(" integer,pointer :: sym(:,:,:), class_ids(:,:)\n")
#fh.write(" character(len=5),pointer :: class_names(:)\n")
#fh.write(" type(irrep_t),pointer :: Irr(:)\n")
fh.write(" !Local variables-------------------------------\n")
fh.write(" complex(dpc) :: j=(0.0_dp,1.0_dp) \n")
#fh.write(" character(len=500) :: msg \n")
fh.write(" ! " + 80*"*" + "\n")
# Write list of Symmetries first.
fh.write("! List of symmetries packed in classes\n")
nsym = self.nsym
fh.write(" nsym = %s\n" % nsym)
fh.write(" ABI_MALLOC(sym, (3,3,nsym))\n")
isym=0
for sym in self:
isym += 1
varname = "sym(:,:,%s)" % isym
fh.write(sym.toFortran(varname)+"\n")
fh.write("\n")
# Write classes and their names.
fh.write("! Number of classes and corresponding indeces\n")
nclass = self.nclass
fh.write(" nclass = %s\n" % nclass)
fh.write(" ABI_MALLOC(class_ids, (2,nclass))\n")
for iclas in range(self.nclass):
first = self.class_ids[iclas][0] +1 # From C to Fortran
last = self.class_ids[iclas][-1] +1
idx = iclas + 1
fh.write(" class_ids(1,%(idx)s) = %(first)s\n" % locals())
fh.write(" class_ids(2,%(idx)s) = %(last)s\n" % locals())
# Write the name of each class to be reported in the table.
fh.write("\n")
fh.write("ABI_MALLOC(class_names,(%(nclass)s))\n" % locals())
idx = 0
for iclas in range(nclass):
idx += 1
name = self.class_names[iclas]
if name: name = name.lstrip().rstrip()
fh.write(" class_names(%(idx)s) = \"%(name)s\" \n" % locals() )
# Write the irreducible representations.
fh.write("\n")
fh.write("! List of irreducible representations.\n")
#fh.write(" nirrepr = %s\n" % self.nirrepr)
fh.write(" ABI_MALLOC(Irr, (%(nclass)s))\n" % locals())
idx = 0
for irrepr in self.irreprs:
idx += 1
dim = irrepr.dim
name = irrepr.name.lstrip().rstrip()
fh.write(" Irr(%(idx)s)%%name = \"%(name)s\"\n" % locals())
fh.write(" Irr(%(idx)s)%%dim = %(dim)s\n" % locals())
fh.write(" Irr(%(idx)s)%%nsym = %(nsym)s\n" % locals())
fh.write(" ABI_MALLOC(Irr(%(idx)s)%%mat, (%(dim)s,%(dim)s,%(nsym)s))\n" % locals())
irp = 0
for mat in irrepr.matrices:
fmat = npymat2Fmat(mat)
irp += 1
fh.write(" Irr(%(idx)s)%%mat(:,:,%(irp)s) = %(fmat)s\n" % locals())
fh.write("\n")
fh.write(" RETURN\n ")
fh.write(" if (.FALSE.) write(std_out,*) j\n")
fh.write(" end subroutine %s \n" % subname)
fh.write("!!***\n")
return None
def to_dict(self):
d = {}
#subname = "ptg_" + self.name.split()[0].strip()
#fh.write("! Point group name " + self.name + "\n")
# List of symmetries packed in classes
d["rotations"] = [o.rotation.tolist() for o in self]
# Write classes and their names.
#fh.write("! Number of classes and corresponding indeces\n")
d["nclass"] = self.nclass
#fh.write(" allocate(class_ids(2,nclass))\n")
class_range = []
for iclas in range(self.nclass):
first = self.class_ids[iclas][0] #+1 # From C to Fortran
last = self.class_ids[iclas][-1] #+1
#fh.write(" class_ids(1,%(idx)s) = %(first)s\n" % locals())
#fh.write(" class_ids(2,%(idx)s) = %(last)s\n" % locals())
class_range.append((first, last+1))
assert last+1 == self.nsym
d["class_range"] = class_range
# Write the name of each class to be reported in the table.
#fh.write("\n")
#fh.write(" allocate(class_names(%(nclass)s))\n" % locals())
#for iclas in range(self.nclass):
# name = self.class_names[iclas]
# if name: name = name.lstrip().rstrip()
# fh.write(" class_names(%(idx)s) = \"%(name)s\" \n" % locals() )
d["class_names"] = [name.lstrip().rstrip() for name in self.class_names]
# Write the irreducible representations.
#fh.write("! List of irreducible representations.\n")
#fh.write(" nirrepr = %s\n" % self.nirrepr)
irreps = {}
for irrepr in self.irreprs:
name = irrepr.name.lstrip().rstrip()
dim = irrepr.dim
#fh.write(" Irr(%(idx)s)%%name = \"%(name)s\"\n" % locals())
#fh.write(" Irr(%(idx)s)%%dim = %(dim)s\n" % locals())
#fh.write(" Irr(%(idx)s)%%nsym = %(nsym)s\n" % locals())
#fh.write(" allocate(Irr(%(idx)s)%%mat(%(dim)s,%(dim)s,%(nsym)s))\n" % locals())
#for mat in irrepr.matrices:
#fmat = npymat2Fmat(mat)
#fh.write(" Irr(%(idx)s)%%mat(:,:,%(irp)s) = %(fmat)s\n" % locals())
assert name not in irreps
irreps[name] = {"dim": dim, "matrices": [mat.tolist() for mat in irrepr.matrices]}
d["irreps"] = irreps
return d
#########################################################################################
class PtGroupParserException(Exception):
pass
class PtGroupParser(object):
def __init__(self):
self.text = list()
def parse_sym_field(self):
"""Parse the section with the symmetry operations. Returns the list of symmetries found"""
symmetries = list()
#2 : -x,-y,z => 2+ [[ 0 0 1 ]]
re_symheader = re.compile("(\d+)\s?:\s+(.+)=>\s+([-]?[\d\w][+-]?)(.*)")
for line in self.sym_field:
m = re_symheader.match(line)
if m:
#print "match ", line
idx = m.group(1)
trcoords = m.group(2).rstrip()
order = m.group(3)
strversor = m.group(4)
if not strversor: strversor = "[ 0 0 0 ]"
strversor = strversor.lstrip().rstrip()
strversor = strversor.replace("[ ","[")
strversor = strversor.replace(" ] ","]")
#print "idx", idx, "trcoords", trcoords , "order", order, "versor", strversor, "\n"
strmat = ""
row = 0
else:
if row in (0,1): strmat += line + ","
if row == 2: strmat += line
row += 1
if row == 3:
#print strmat
#matrix = strmat.replace(".",".,")
matrix = strmat.replace(".",",")
matrix = eval(matrix)
versor = strversor.replace(" ",",")
versor = eval(versor)
#print matrix, versor
# Istanciate the symmetry and append it to symmetries.
rot = Rotation(matrix, order, trcoords, versor)
symmetries.append(rot)
return symmetries
def parse_irrepr_field(self):
irrow = list()
for name, dim, text in zip(self.irrepr_names, self.irrepr_dims, self.irrepr_fields):
# Normalize input such that each string is of the form "1 : matrix"
newtext = list()
sbuf = None
for line in text:
if ":" in line:
if sbuf: newtext.append(sbuf)
sbuf = line + " "
else:
sbuf += line
newtext.append(sbuf)
matrices = [ None for x in range(self.nsym) ]
for line in newtext:
#print name, line, self.nsym
tokens = line.split(":")
idx = int(tokens[0]) -1
val = tokens[1]
if dim != 1: # Convert string to python list. #FIXME this is not safe!
#print line
#print "before eval: ",val
val = val.replace("] ","], ")
if "j" not in val:
val = val.replace(". ","., ")
else:
#print "before eval: ",val
val = val.replace("j ","j, ") # Dirty but it works
#print "after eval : ", val
val = eval(val)
#print idx, val
matrices[idx] = N.matrix(val)
#print matrices[idx]
irepr = IrreducibleRepr(name, dim, matrices)
#print irepr
irrow.append(irepr)
return irrow
def parse(self, fname):
# Read data from file and remove blank lines
lines = open(fname,"r").readlines()
self.text = [ line for line in lines if line.lstrip() ]
# Extract the group name from the first string.
title = self.text.pop(0)
substr = "Symmetry operations for the point group"
if substr not in title:
raise PtGroupParser("Read wrong title " + title)
self.pgroup_name = title.replace(substr, "",1).rstrip()
# Separate the symmetry field from the one with the irred. representations.
# 1) Consume text until we reach the files with the irreducible repr.
self.sym_field = []
substr = "Irreducible representations for the point group"
line = self.text.pop(0)
while substr not in line:
if line.lstrip(): self.sym_field.append(line.rstrip())
line = self.text.pop(0)
# Extract the group name from the string
#check = line.replace(substr, "",1).rstrip()
#if check != self.pgroup_name :
# raise ValueError, "Wrong format" + self.pgroup_name + "!=" + check
symmetries = self.parse_sym_field()
self.nsym = len(symmetries)
self.irrepr_fields= list()
self.irrepr_names = list()
self.irrepr_dims = list()
self.nirrepr = 0
# Now extract the Irreducible representations.
# Each representation is signaled by a line of the form
#Irrep A ( dimension 1 )
re_irrep = re.compile("\s*Irrep(.*)\(\s+dimension\s+(\d)\s+\)")
#line = self.text(0)
#nlines = len(self.text)
for line in self.text:
m = re_irrep.match(line)
#print line
if m:
if self.nirrepr: self.irrepr_fields.append(newirr)
self.nirrepr += 1
irr_name = m.group(1)
irr_dim = int(m.group(2))
#print line, irr_name, irr_dim
self.irrepr_names.append(irr_name)
self.irrepr_dims.append(irr_dim)
newirr = list()
else:
newirr.append(line.rstrip())
self.irrepr_fields.append(newirr) # Save the last irred repr.
irreprs = self.parse_irrepr_field()
#for irr in irreprs: print irr
#print "remaining",self.text
xx = PointGroup(symmetries, self.pgroup_name, irreprs)
#xx.show_table()
#xx.dump_Fortran_inc(sys.stdout)
return xx
#########################################################################################
class HTMLStripper(sgmllib.SGMLParser):
"""Simple HTML parser that removes all HTML tags"""
def __init__(self):
sgmllib.SGMLParser.__init__(self)
def handle_data(self, data):
self.plain_text.append(data)
def parse(self, input):
"""Remove HTML tags from input. Return string."""
self.plain_text = list()
for item in input: self.feed(item)
return "".join(self.plain_text)
def download_ptgroup_tables(ptgnames=None, stripHTML=False):
"""Download point group tables from the Bilbao server.
Return the list of filenames containing the tables
"""
import telnetlib
HOST = "www.cryst.ehu.es"
if stripHTML:
html_parser = HTMLStripper()
fnames = list()
ii=-1
for pg in PTG_NUMS:
ii += 1
doublename = ptg_names[ii]
if ptgnames and doublename[0] not in ptgnames:
continue
print "Downloading table for point group: " + str(pg) + str(doublename) + "..."
tn = telnetlib.Telnet(HOST, 80)
command = "GET /cgi-bin/rep/programs/sam/point.py?sg=" + str(pg) + "&what=irreps\n"
tn.write(command)
table = tn.read_all()
fname = "ptgroup_" + doublename[0] + ".html"
if stripHTML:
table = html_parser.parse(table)
print table
fname = "ptgroup_" + doublename[0] + ".txt"
# Write table on file.
fh = open(fname,"w")
fh.write(table)
fh.close()
fnames.append(fname)
return fnames
def download_klgroup_table(ita_spgnum, basis, kcoords, label=None):
"""Download point group tables from the Bilbao server.
Return the list of filenames containing the tables
"""
import telnetlib
HOST = "www.cryst.ehu.es"
# To obtain the representations for a given k-vector and given space group in text form you have to give the command
# GET /cgi-bin/cryst/text/nph-repr?g=[gn]&b=[p|c|a]&x=[number]&y=[number]&z=[number]&l=[label]
# where
# [gn] is the group number in ITA,
# b corresponds to the basis in which the k-vector coordinates will be given.
# The choices are: p - primitive, c - centered dual, a - adjusted coefficients.
# x,y,z are the values for the three coordinates of the k-vector,
# l is the label for the k-vector.
# NOTE: By now the program uses the default choice for the group setting when there is more than one conventional setting.
#print "Downloading table for point group: " + str(pg) + str(doublename) + "..."
tn = telnetlib.Telnet(HOST, 80)
if label is None: label="None"
if basis not in ("pca"): raise ValueError
k1 = kcoords[0]
k2 = kcoords[1]
k3 = kcoords[2]
command = \
"GET /cgi-bin/cryst/text/nph-repr?g=%(ita_spgnum)s&b=%(basis)s&x=%(k1)s&y=%(k2)s&z=%(k3)s&l=%(label)s\n" % locals()
print "Executing command " + command
tn.write(command)
# Read table in a single string.
k_table = tn.read_all()
# Write table on file.
fname = "k_test"
fh = open(fname,"w")
fh.write(k_table)
fh.close()
return k_table.splitlines()
WIDTH = 45
def parse_klgroup_table(text_lines, fh=None):
WIDTH = 45
if fh is None:
write = sys.stdout.write
else:
write = fh.write
# Remove blank lines from input text.
text = [ line for line in text_lines if line.lstrip() ]
line = text[0]
while "Number of elements" not in line: line = text.pop(0)
nsym = int(line.split(":")[1])
#print "nsym= ", nsym
line = text.pop(0)
while "The k-vector coordinates relative to the standard dual basis are" not in line:
line = text.pop(0)
kdual = line.split(":")[1].split()
strk = ""
for kc in kdual: strk += str(kc) + " "
write(strk.ljust(WIDTH) + " # kdual\n")
line = text.pop(0)
search_str = "The little group of the k-vector has the following "
while search_str not in line:
line = text.pop(0)
nsym_ltgk = int(line.replace(search_str, ""))
write(str(nsym_ltgk).ljust(WIDTH) + " # Symmetries of the little group of k\n")
text.pop(0) # Remove next lines
# Read the rows of symmetries of the little group (C-ordering).
nfields = nsym_ltgk/5 + 1
for ifield in range(nfields):
sym_ids = [ int(idx) for idx in text.pop(0).split()] # Remove line with symmetry indices.
# coy this field in sym_mats because we are going to read the matrices by colums
sym_mats = list()
for il in xrange(3): sym_mats.append(text.pop(0).lstrip())
# Extract the columns
nmats = 4
if ifield == (nfields-1) and (nsym_ltgk % 4 > 0):
nmats = nsym_ltgk % 4 # Last field might have nmats < 4
# Print the operations of the little group.
for ii in xrange(nmats):
cols = cut_cols(sym_mats, ncols=4)
print cols
eval_tnons = "" # Fractional translations (e.g. 3/4) have to be evaluated and converted to float
for tk in cols[3].split(): eval_tnons += str(eval("1.*"+tk)) + " "
print eval_tnons
write("".join([ c for c in cols[:3]]) + "\n")
write(eval_tnons + "\n")
#####################################
# Parse the section with the irreps #
#####################################
# Move to the beginning of the section.
search_str = "The little group of the k-vector has " #6 allowed irreps.
line = text.pop(0)
while search_str not in line: line = text.pop(0)
nirreps = int(line.replace(search_str, "").split()[0])
line = text.pop(0) # Remove comment: The matrices, corresponding to all of the little group elements are :
write(str(nirreps).ljust(WIDTH) + " # Number of irreps of the little group.\n")
# Extract the fields with the Irreducible representations.
# Each irrep starts with an header of the form: Irrep (M)(5) , dimension 2
re_irrep_header = re.compile("\s*Irrep \((\w+)\)\((\d+)\) , dimension (\d+)\s*")
irrep_name = list()
irrep_dim = list()
irrep_num = list()
irrep_field = list()
txt_buf = None
str_end = "The full-group irreps for the generators of the space group are :"
for line in text:
#print line
if str_end in line: break
m = re_irrep_header.match(line)
if m:
if txt_buf: irrep_field.append(txt_buf)
irrep_name.append (m.group(1))
irrep_num.append(int(m.group(2)))
irrep_dim.append(int(m.group(3)))
txt_buf = list()
else:
txt_buf.append(line)
irrep_field.append(txt_buf) # Add last field
assert nirreps == irrep_num[-1]
for irp in range(nirreps):
st = str(irrep_num[irp]) + " " + str(irrep_dim[irp]) + " " + str(irrep_name[irp])
write(st.ljust(WIDTH) + " # irrep_index, irrep_dim, irrep_name\n")
#for line in irrep_field[irp]: print line
irreps_k = parse_irrep_k(nsym_ltgk, irrep_dim[irp], irrep_field[irp])
for isym in range(nsym_ltgk):
mat_str = "".join(c for c in irreps_k[isym]).lstrip()
#mat_str = mat_str.replace(" ","")
#mat_str = mat_str.replace(")",") ")
write(str(isym+1).ljust(5) + mat_str + "\n")
def parse_irrep_k(nsym, dim, text):
"""This function parses text in the form
1 2
(1.000, 0.0) (0.000, 0.0) (1.000,120.0) (0.000, 0.0)
(0.000, 0.0) (1.000, 0.0) (0.000, 0.0) (1.000,240.0)
"""
irreps_k = [ None for isym in range(nsym) ]
while len(text)>0:
sym_ids = [ int(idx) for idx in text.pop(0).split()] # Read the indices of the symmetries
nsym_in_row = len(sym_ids)
#print sym_ids
rows = list() # Get the irreps of this symmetries
for irow in range(dim):
row = text.pop(0)
#print row
rows.append(row)
for isym in sym_ids: #range(nsym_in_row):
cols = cut_cols(rows, separator=")", ncols=dim)
#print " isym =", isym, "cols= " ,cols
irreps_k[isym-1] = cols
return irreps_k
def cut_cols(rows, separator=None, ncols=1):
columns = list()
for icol in xrange(ncols):
col = ""
for il in xrange(len(rows)):
line = rows[il]
#print line
tokens = line.split(separator, 1)
head = tokens[0]
if len(tokens) > 1:
tail= tokens[1]
else:
tail = "" # separator might not be in line.
rows[il] = tail
if not separator:
col += head + " "
else:
col += head + separator
columns.append(col)
return columns
def write_ltgroup_file(fname, ita_spgnum, basis, kpoints, labels):
"""Write file with the irreps of the little group that can be read by abinit."""
nkpts = len(kpoints)
fh = open(fname,"w")
write = fh.write
# Header (two rows)
fvers = 1
write("# Little group file generated by " + os.path.basename(__file__) + "\n")
write(str(fvers).ljust(WIDTH) + " # Version\n")
write(str(ita_spgnum).ljust(WIDTH) + " # ITA space group\n")
write(str(basis).ljust(WIDTH) + " # Basis\n")
write(str(nkpts).ljust(WIDTH) + " # Number of k-points in database\n")
# Write the complete list of k-points first
for kpt, kname in zip(kpoints, labels):
str_kname = ""
for kc in kpt: str_kname += str(kc) + " "
str_kname += kname
write(str_kname.ljust(WIDTH) + "\n")
for kpt, kname in zip(kpoints, labels):
k_table = download_klgroup_table(ita_spgnum, basis, kpt, kname)
parse_klgroup_table(k_table, fh)
fh.close()
return None
###############################################################################
###############################################################################
if __name__ == "__main__":
#if True:
if False:
# Little group tables
ita_spgnum = 227
basis = "p"
kcoords = [0.5, 0.0, 0.0]
label = "M"
#ktable_text = download_klgroup_table(ita_spgnum, basis, kcoords, label)
#parse_klgroup_table(ktable_text)
kpoints = [kcoords]
labels = [label]
lgroup_fname = "lgroup_" + str(ita_spgnum)
write_ltgroup_file(lgroup_fname, ita_spgnum, basis, kpoints, labels)
sys.exit(2)
if False:
fnames = download_ptgroup_tables(["C2v"], stripHTML=True)
sys.exit(2)
p = PtGroupParser()
#fcheck = "pgroup_T"
#fcheck = "pgroup_O"
fcheck = ["pgroup_C1", "pgroup_Ci"]
#fcheck = None
#for ii, pg in enumerate(PTG_NUMS):
# doublename = ptg_names[ii]
##
# fname = "ptgroup_" + doublename[0] + ".txt"
# fname_new = "ptg_" + doublename[0] + ".txt"
# print "renaming " + fname + "into" + fname_new
# os.rename(fname, fname_new)
#print "done"
#sys.exit(0)
dirname = "./ptgroup_data/"
#ttt = open("tmp","w")
all_irreps = {}
for ii, pg in enumerate(PTG_NUMS):
doublename = ptg_names[ii]
fname = dirname + "ptg_" + doublename[0] + ".txt"
#if fcheck and fname not in fcheck: continue
#print "analysing " + str(doublename)
pgroup = p.parse(fname)
#pgroup.check()
#pgroup.show_character_table()
#print pgroup._mk_classes()
#if True or doublename[0] == "C2":
key = doublename[0]
assert key not in all_irreps
d = pgroup.to_dict()
#pprint(d)
all_irreps[key] = d
#import json
#s = json.dumps(d)
#print(s)
# >>>> Write Fortran routines <<<<
#finc_name = "ptg_" + doublename[0] + ".F90"
#print "Writing Fortran file: " + finc_name
#fh = open(finc_name,"w")
#fh = sys.stdout
#pgroup.dump_Fortran_sub(fh)
fh.close
#ttt.write(" CASE ('" + doublename[1] + "')\n")
#subname = "ptg_" + doublename[0].strip()
#ttt.write(" call %s (ptg_name,nsym,nclass,sym,class_ids,class_names,Irr)\n" % subname )
pprint(all_irreps)
|
abinit/abinit
|
src/43_ptgroups/ptg.py
|
Python
|
gpl-3.0
| 42,905
|
[
"ABINIT"
] |
8e220095a13cef14298c20e334a3befd8a865c81bc4d429059b7786d5fcaf9ec
|
#!/usr/bin/env python
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Main Madpack installation executable.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import sys
import getpass
import re
import os
import glob
import traceback
import subprocess
import datetime
import tempfile
import shutil
import unittest
from upgrade_util import ChangeHandler
from upgrade_util import ViewDependency
from upgrade_util import TableDependency
from upgrade_util import ScriptCleaner
from itertools import izip_longest
# Required Python version
py_min_ver = [2, 6]
# Check python version
if sys.version_info[:2] < py_min_ver:
print("ERROR: python version too old (%s). You need %s or greater." %
('.'.join(str(i) for i in sys.version_info[:3]), '.'.join(str(i) for i in py_min_ver)))
exit(1)
# Find MADlib root directory. This file is installed to
# $MADLIB_ROOT/madpack/madpack.py, so to get $MADLIB_ROOT we need to go
# two levels up in the directory hierarchy. We use (a) os.path.realpath and
# (b) __file__ (instead of sys.argv[0]) because madpack.py could be called
# (a) through a symbolic link and (b) not as the main module.
maddir = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/..") # MADlib root dir
sys.path.append(maddir + "/madpack")
# Import MADlib python modules
import argparse
import configyml
# Some read-only variables
this = os.path.basename(sys.argv[0]) # name of this script
# Default directories
maddir_conf = maddir + "/config" # Config dir
maddir_lib = maddir + "/lib/libmadlib.so" # C/C++ libraries
# Read the config files
ports = configyml.get_ports(maddir_conf) # object made of Ports.yml
rev = configyml.get_version(maddir_conf) # MADlib OS-level version
portid_list = []
for port in ports:
portid_list.append(port)
SUPPORTED_PORTS = ('postgres', 'greenplum', 'hawq')
# Global variables
portid = None # Target port ID (eg: pg90, gp40)
dbconn = None # DB Connection object
dbver = None # DB version
con_args = {} # DB connection arguments
verbose = None # Verbose flag
keeplogs = None
tmpdir = None
is_hawq2 = False
def _make_dir(dir):
"""
# Create a temp dir
# @param dir temp directory path
"""
if not os.path.isdir(dir):
try:
os.makedirs(dir)
except:
print "ERROR: can not create directory: %s. Check permissions." % dir
exit(1)
# ------------------------------------------------------------------------------
def _error(msg, stop):
"""
Error message wrapper
@param msg error message
@param stop program exit flag
"""
# Print to stdout
print this + ' : ERROR : ' + msg
# stack trace is not printed
if stop:
exit(2)
# ------------------------------------------------------------------------------
def _info(msg, verbose=True):
"""
Info message wrapper (verbose)
@param msg info message
@param verbose prints only if True
"""
# Print to stdout
if verbose:
print this + ' : INFO : ' + msg
# ------------------------------------------------------------------------------
def run_query(sql, show_error, con_args=con_args):
# Define sqlcmd
sqlcmd = 'psql'
delimiter = ' <$madlib_delimiter$> '
# Test the DB cmd line utility
std, err = subprocess.Popen(['which', sqlcmd], stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
if std == '':
_error("Command not found: %s" % sqlcmd, True)
# Run the query
runcmd = [sqlcmd,
'-h', con_args['host'].split(':')[0],
'-p', con_args['host'].split(':')[1],
'-d', con_args['database'],
'-U', con_args['user'],
'-F', delimiter,
'--no-password',
'--no-psqlrc',
'--no-align',
'-c', sql]
runenv = os.environ
if 'password' in con_args:
runenv["PGPASSWORD"] = con_args['password']
runenv["PGOPTIONS"] = '-c search_path=public -c client_min_messages=error'
std, err = subprocess.Popen(runcmd, env=runenv, stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
if err:
if show_error:
_error("SQL command failed: \nSQL: %s \n%s" % (sql, err), False)
if 'password' in err:
raise EnvironmentError
else:
raise Exception
# Convert the delimited output into a dictionary
results = [] # list of rows
i = 0
for line in std.splitlines():
if i == 0:
cols = [name for name in line.split(delimiter)]
else:
row = {} # dict of col_name:col_value pairs
c = 0
for val in line.split(delimiter):
row[cols[c]] = val
c += 1
results.insert(i, row)
i += 1
# Drop the last line: "(X rows)"
try:
results.pop()
except:
pass
return results
# ------------------------------------------------------------------------------
def _internal_run_query(sql, show_error):
"""
Runs a SQL query on the target platform DB
using the default command-line utility.
Very limited:
- no text output with "new line" characters allowed
@param sql query text to execute
@param show_error displays the SQL error msg
"""
return run_query(sql, show_error, con_args)
# ------------------------------------------------------------------------------
def _get_relative_maddir(maddir, port):
""" Return a relative path version of maddir
GPDB and HAWQ installations have a symlink outside of GPHOME that
links to the current GPHOME. After a DB upgrade, this symlink is updated to
the new GPHOME.
'maddir_lib', which uses the absolute path of GPHOME, is hardcoded into each
madlib function definition. Replacing the GPHOME path with the equivalent
relative path makes it simpler to perform DB upgrades without breaking MADlib.
"""
if port not in ('greenplum', 'hawq'):
# do nothing for postgres
return maddir
# e.g. maddir_lib = $GPHOME/madlib/Versions/1.9/lib/libmadlib.so
# 'madlib' is supposed to be in this path, which is the default folder
# used by GPPKG to install madlib
try:
abs_gphome, tail = maddir.split('madlib/')
except ValueError:
return maddir
link_name = 'greenplum-db' if port == 'greenplum' else 'hawq'
# Check outside $GPHOME if there is a symlink to this absolute path
# os.pardir is equivalent to ..
# os.path.normpath removes the extraneous .. from that path
rel_gphome = os.path.normpath(os.path.join(abs_gphome, os.pardir, link_name))
if os.path.islink(rel_gphome) and os.path.realpath(rel_gphome) == os.path.realpath(abs_gphome):
# if the relative link exists and is pointing to current location
return os.path.join(rel_gphome, 'madlib', tail)
else:
return maddir
# ------------------------------------------------------------------------------
def _run_sql_file(schema, maddir_mod_py, module, sqlfile,
tmpfile, logfile, pre_sql, upgrade=False,
sc=None):
"""
Run SQL file
@param schema name of the target schema
@param maddir_mod_py name of the module dir with Python code
@param module name of the module
@param sqlfile name of the file to parse
@param tmpfile name of the temp file to run
@param logfile name of the log file (stdout)
@param pre_sql optional SQL to run before executing the file
@param upgrade are we upgrading as part of this sql run
@param sc object of ScriptCleaner
"""
# Check if the SQL file exists
if not os.path.isfile(sqlfile):
_error("Missing module SQL file (%s)" % sqlfile, False)
raise ValueError("Missing module SQL file (%s)" % sqlfile)
# Prepare the file using M4
try:
f = open(tmpfile, 'w')
# Add the before SQL
if pre_sql:
f.writelines([pre_sql, '\n\n'])
f.flush()
# Find the madpack dir (platform specific or generic)
if os.path.isdir(maddir + "/ports/" + portid + "/" + dbver + "/madpack"):
maddir_madpack = maddir + "/ports/" + portid + "/" + dbver + "/madpack"
else:
maddir_madpack = maddir + "/madpack"
maddir_ext_py = maddir + "/lib/python"
m4args = ['m4',
'-P',
'-DMADLIB_SCHEMA=' + schema,
'-DPLPYTHON_LIBDIR=' + maddir_mod_py,
'-DEXT_PYTHON_LIBDIR=' + maddir_ext_py,
'-DMODULE_PATHNAME=' + maddir_lib,
'-DMODULE_NAME=' + module,
'-I' + maddir_madpack,
sqlfile]
_info("> ... parsing: " + " ".join(m4args), verbose)
subprocess.call(m4args, stdout=f)
f.close()
except:
_error("Failed executing m4 on %s" % sqlfile, False)
raise Exception
# Only update function definition
sub_module = ''
if upgrade:
# get filename from complete path without the extension
sub_module = os.path.splitext(os.path.basename(sqlfile))[0]
_info(sub_module, False)
if sub_module not in sc.get_change_handler().newmodule:
sql = open(tmpfile).read()
sql = sc.cleanup(sql)
open(tmpfile, 'w').write(sql)
# Run the SQL using DB command-line utility
if portid in ('greenplum', 'postgres', 'hawq'):
sqlcmd = 'psql'
# Test the DB cmd line utility
std, err = subprocess.Popen(['which', sqlcmd], stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
if not std:
_error("Command not found: %s" % sqlcmd, True)
runcmd = [sqlcmd, '-a',
'-v', 'ON_ERROR_STOP=1',
'-h', con_args['host'].split(':')[0],
'-p', con_args['host'].split(':')[1],
'-d', con_args['database'],
'-U', con_args['user'],
'--no-password',
'-f', tmpfile]
runenv = os.environ
if 'password' in con_args:
runenv["PGPASSWORD"] = con_args['password']
runenv["PGOPTIONS"] = '-c client_min_messages=notice'
# Open log file
try:
log = open(logfile, 'w')
except:
_error("Cannot create log file: %s" % logfile, False)
raise Exception
# Run the SQL
try:
_info("> ... executing " + tmpfile, verbose)
retval = subprocess.call(runcmd, env=runenv, stdout=log, stderr=log)
except:
_error("Failed executing %s" % tmpfile, False)
raise Exception
finally:
log.close()
return retval
# ------------------------------------------------------------------------------
def _get_madlib_dbrev(schema):
"""
Read MADlib version from database
@param dbconn database conection object
@param schema MADlib schema name
"""
try:
row = _internal_run_query("SELECT count(*) AS cnt FROM pg_tables " +
"WHERE schemaname='" + schema + "' AND " +
"tablename='migrationhistory'", True)
if int(row[0]['cnt']) > 0:
row = _internal_run_query("""SELECT version FROM %s.migrationhistory
ORDER BY applied DESC LIMIT 1""" % schema, True)
if row:
return row[0]['version']
except:
_error("Failed reading MADlib db version", True)
return None
# ------------------------------------------------------------------------------
def _get_dbver():
""" Read version number from database (of form X.Y) """
try:
versionStr = _internal_run_query("SELECT pg_catalog.version()", True)[0]['version']
if portid == 'postgres':
match = re.search("PostgreSQL[a-zA-Z\s]*(\d+\.\d+)", versionStr)
elif portid == 'greenplum':
# for Greenplum the 3rd digit is necessary to differentiate
# 4.3.5+ from versions < 4.3.5
match = re.search("Greenplum[a-zA-Z\s]*(\d+\.\d+\.\d+)", versionStr)
elif portid == 'hawq':
match = re.search("HAWQ[a-zA-Z\s]*(\d+\.\d+)", versionStr)
return None if match is None else match.group(1)
except:
_error("Failed reading database version", True)
# ------------------------------------------------------------------------------
def _check_db_port(portid):
"""
Make sure we are connected to the expected DB platform
@param portid expected DB port id - to be validates
"""
# Postgres
try:
row = _internal_run_query("SELECT version() AS version", True)
except:
_error("Cannot validate DB platform type", True)
if row and row[0]['version'].lower().find(portid) >= 0:
if portid == 'postgres':
if row[0]['version'].lower().find('greenplum') < 0:
return True
elif portid == 'greenplum':
if row[0]['version'].lower().find('hawq') < 0:
return True
elif portid == 'hawq':
return True
return False
# ------------------------------------------------------------------------------
def _is_rev_gte(left, right):
""" Return if left >= right
Args:
@param left: list. Revision numbers in a list form (as returned by
_get_rev_num).
@param right: list. Revision numbers in a list form (as returned by
_get_rev_num).
Returns:
Boolean
If left and right are all numeric then regular list comparison occurs.
If either one contains a string, then comparison occurs till both have int.
First list to have a string is considered smaller
(including if the other does not have an element in corresponding index)
Examples:
[1, 9, 0] >= [1, 9, 0]
[1, 9, 1] >= [1, 9, 0]
[1, 9, 1] >= [1, 9]
[1, 10] >= [1, 9, 1]
[1, 9, 0] >= [1, 9, 0, 'dev']
[1, 9, 1] >= [1, 9, 0, 'dev']
[1, 9, 0] >= [1, 9, 'dev']
[1, 9, 'rc'] >= [1, 9, 'dev']
[1, 9, 'rc', 0] >= [1, 9, 'dev', 1]
[1, 9, 'rc', '1'] >= [1, 9, 'rc', '1']
"""
def all_numeric(l):
return not l or all(isinstance(i, int) for i in l)
if all_numeric(left) and all_numeric(right):
return left >= right
else:
for i, (l_e, r_e) in enumerate(izip_longest(left, right)):
if isinstance(l_e, int) and isinstance(r_e, int):
if l_e == r_e:
continue
else:
return l_e > r_e
elif isinstance(l_e, int) or isinstance(r_e, int):
# [1, 9, 0] > [1, 9, 'dev']
# [1, 9, 0] > [1, 9]
return isinstance(l_e, int)
else:
# both are not int
if r_e is None:
# [1, 9, 'dev'] < [1, 9]
return False
else:
return l_e is None or left[i:] >= right[i:]
return True
# ----------------------------------------------------------------------
def _get_rev_num(rev):
"""
Convert version string into number for comparison
@param rev version text
It is expected to follow Semantic Versioning (semver.org)
Valid inputs:
1.9.0, 1.10.0, 2.5.0
1.0.0-alpha, 1.0.0-alpha.1, 1.0.0-0.3.7, 1.0.0-x.7.z.92
1.0.0+20130313144700, 1.0.0-beta+exp.sha.5114f85
"""
try:
rev_parts = re.split('[-+_]', rev)
# get numeric part of the version string
num = [int(i) for i in rev_parts[0].split('.')]
num += [0] * (3 - len(num)) # normalize num to be of length 3
# get identifier part of the version string
if len(rev_parts) > 1:
num.extend(map(str, rev_parts[1:]))
if not num:
num = [0]
return num
except:
# invalid revision
return [0]
# ------------------------------------------------------------------------------
def _print_revs(rev, dbrev, con_args, schema):
"""
Print version information
@param rev OS-level MADlib version
@param dbrev DB-level MADlib version
@param con_args database connection arguments
@param schema MADlib schema name
"""
_info("MADlib tools version = %s (%s)" % (str(rev), sys.argv[0]), True)
if con_args:
try:
_info("MADlib database version = %s (host=%s, db=%s, schema=%s)"
% (dbrev, con_args['host'], con_args['database'], schema), True)
except:
_info("MADlib database version = [Unknown] (host=%s, db=%s, schema=%s)"
% (dbrev, con_args['host'], con_args['database'], schema), True)
return
# ------------------------------------------------------------------------------
def _plpy_check(py_min_ver):
"""
Check pl/python existence and version
@param py_min_ver min Python version to run MADlib
"""
_info("Testing PL/Python environment...", True)
# Check PL/Python existence
rv = _internal_run_query("SELECT count(*) AS CNT FROM pg_language "
"WHERE lanname = 'plpythonu'", True)
if int(rv[0]['cnt']) > 0:
_info("> PL/Python already installed", verbose)
else:
_info("> PL/Python not installed", verbose)
_info("> Creating language PL/Python...", True)
try:
_internal_run_query("CREATE LANGUAGE plpythonu;", True)
except:
_error('Cannot create language plpythonu. Stopping installation...', False)
raise Exception
# Check PL/Python version
_internal_run_query("DROP FUNCTION IF EXISTS plpy_version_for_madlib();", False)
_internal_run_query("""
CREATE OR REPLACE FUNCTION plpy_version_for_madlib()
RETURNS TEXT AS
$$
import sys
# return '.'.join(str(item) for item in sys.version_info[:3])
return str(sys.version_info[:3]).replace(',','.').replace(' ','').replace(')','').replace('(','')
$$
LANGUAGE plpythonu;
""", True)
rv = _internal_run_query("SELECT plpy_version_for_madlib() AS ver;", True)
python = rv[0]['ver']
py_cur_ver = [int(i) for i in python.split('.')]
if py_cur_ver >= py_min_ver:
_info("> PL/Python version: %s" % python, verbose)
else:
_error("PL/Python version too old: %s. You need %s or greater"
% (python, '.'.join(str(i) for i in py_min_ver)), False)
raise Exception
_internal_run_query("DROP FUNCTION IF EXISTS plpy_version_for_madlib();", False)
_info("> PL/Python environment OK (version: %s)" % python, True)
# ------------------------------------------------------------------------------
def _db_install(schema, dbrev, testcase):
"""
Install MADlib
@param schema MADlib schema name
@param dbrev DB-level MADlib version
@param testcase command-line args for a subset of modules
"""
_info("Installing MADlib into %s schema..." % schema.upper(), True)
temp_schema = schema + '_v' + ''.join(map(str, _get_rev_num(dbrev)))
# Check the status of MADlib objects in database
madlib_exists = False if dbrev is None else True
# Test if schema is writable
try:
_internal_run_query("CREATE TABLE %s.__madlib_test_table (A INT);" % schema, False)
_internal_run_query("DROP TABLE %s.__madlib_test_table;" % schema, False)
schema_writable = True
except:
schema_writable = False
# CASE #1: Target schema exists with MADlib objects:
if schema_writable and madlib_exists:
# work-around before UDT is available in HAWQ
if portid == 'hawq':
_info("***************************************************************************", True)
_info("* Schema MADLIB already exists", True)
_info("* For HAWQ, MADlib objects will be overwritten to the 'MADLIB' schema", True)
_info("* It may drop any database objects (tables, views, etc.) that depend on 'MADLIB' SCHEMA!!!!!!!!!!!!!", True)
_info("***************************************************************************", True)
_info("Would you like to continue? [Y/N]", True)
go = raw_input('>>> ').upper()
while go not in ('Y', 'YES', 'N', 'NO'):
go = raw_input('Yes or No >>> ').upper()
if go in ('N', 'NO'):
_info('Installation stopped.', True)
return
# Rolling back in HAWQ will drop catalog functions. For exception, we
# simply push the exception to the caller to terminate the install
_db_create_objects(schema, None, testcase=testcase, hawq_debug=True)
else:
_info("***************************************************************************", True)
_info("* Schema %s already exists" % schema.upper(), True)
_info("* Installer will rename it to %s" % temp_schema.upper(), True)
_info("***************************************************************************", True)
_info("Would you like to continue? [Y/N]", True)
go = raw_input('>>> ').upper()
while go not in ('Y', 'YES', 'N', 'NO'):
go = raw_input('Yes or No >>> ').upper()
if go in ('N', 'NO'):
_info('Installation stopped.', True)
return
# Rename MADlib schema
_db_rename_schema(schema, temp_schema)
# Create MADlib schema
try:
_db_create_schema(schema)
except:
_db_rollback(schema, temp_schema)
# Create MADlib objects
try:
_db_create_objects(schema, temp_schema, testcase=testcase)
except:
_db_rollback(schema, temp_schema)
# CASE #2: Target schema exists w/o MADlib objects:
# For HAWQ, after the DB initialization, there is no
# madlib.migrationhistory table, thus madlib_exists is False
elif schema_writable and not madlib_exists:
# Create MADlib objects
try:
_db_create_objects(schema, None, testcase=testcase)
except:
_error("Building database objects failed. "
"Before retrying: drop %s schema OR install MADlib into "
"a different schema." % schema.upper(), True)
#
# CASE #3: Target schema does not exist:
#
elif not schema_writable:
if portid == 'hawq' and not is_hawq2:
# Rolling back in HAWQ will drop catalog functions. For exception, we
# simply push the exception to the caller to terminate the install
raise Exception("MADLIB schema is required for HAWQ")
_info("> Schema %s does not exist" % schema.upper(), verbose)
# Create MADlib schema
try:
_db_create_schema(schema)
except:
_db_rollback(schema, None)
# Create MADlib objects
try:
_db_create_objects(schema, None, testcase=testcase)
except:
_db_rollback(schema, None)
_info("MADlib %s installed successfully in %s schema." % (str(rev), schema.upper()), True)
# ------------------------------------------------------------------------------
def _db_upgrade(schema, dbrev):
"""
Upgrade MADlib
@param schema MADlib schema name
@param dbrev DB-level MADlib version
"""
if _is_rev_gte(_get_rev_num(dbrev), _get_rev_num(rev)):
_info("Current MADlib version already up to date.", True)
return
if _is_rev_gte([1,8],_get_rev_num(dbrev)):
_error("""
MADlib versions prior to v1.9 are not supported for upgrade.
Please try upgrading to v1.9.1 and then upgrade to this version.
""", True)
return
_info("Upgrading MADlib into %s schema..." % schema.upper(), True)
_info("\tDetecting dependencies...", True)
_info("\tLoading change list...", True)
ch = ChangeHandler(schema, portid, con_args, maddir, dbrev, is_hawq2)
_info("\tDetecting table dependencies...", True)
td = TableDependency(schema, portid, con_args)
_info("\tDetecting view dependencies...", True)
vd = ViewDependency(schema, portid, con_args)
abort = False
if td.has_dependency():
_info("*" * 50, True)
_info("\tFollowing user tables/indexes are dependent on MADlib objects:", True)
_info(td.get_dependency_str(), True)
_info("*" * 50, True)
cd_udt = [udt for udt in td.get_depended_udt() if udt in ch.udt]
if len(cd_udt) > 0:
_error("""
User has objects dependent on following updated MADlib types!
{0}
These objects need to be dropped before upgrading.
""".format('\n\t\t\t'.join(cd_udt)), False)
# we add special handling for 'linregr_result'
if 'linregr_result' in cd_udt:
_info("""Dependency on 'linregr_result' could be due to objects
created from the output of the aggregate 'linregr'.
Please refer to the Linear Regression documentation
<http://madlib.incubator.apache.org/docs/latest/group__grp__linreg.html#warning>
for the recommended solution.
""", False)
abort = True
c_udoc = ch.get_udoc_oids()
d_udoc = td.get_depended_udoc_oids()
cd_udoc = [udoc for udoc in d_udoc if udoc in c_udoc]
if len(cd_udoc) > 0:
_error("""
User has objects dependent on the following updated MADlib operator classes!
oid={0}
These objects need to be dropped before upgrading.
""".format('\n\t\t\t'.join(cd_udoc)), False)
abort = True
if vd.has_dependency():
_info("*" * 50, True)
_info("\tFollowing user views are dependent on MADlib objects:", True)
_info(vd.get_dependency_graph_str(), True)
_info("*" * 50, True)
c_udf = ch.get_udf_signature()
d_udf = vd.get_depended_func_signature('UDF')
cd_udf = [udf for udf in d_udf if udf in c_udf]
if len(cd_udf) > 0:
_error("""
User has objects dependent on following updated MADlib functions!
{0}
These objects will fail to work with the updated functions and
need to be dropped before starting upgrade again.
""".format('\n\t\t\t\t\t'.join(cd_udf)), False)
abort = True
c_uda = ch.get_uda_signature()
d_uda = vd.get_depended_func_signature('UDA')
cd_uda = [uda for uda in d_uda if uda in c_uda]
if len(cd_uda) > 0:
_error("""
User has objects dependent on following updated MADlib aggregates!
{0}
These objects will fail to work with the new aggregates and
need to be dropped before starting upgrade again.
""".format('\n\t\t\t\t\t'.join(cd_uda)), False)
abort = True
c_udo = ch.get_udo_oids()
d_udo = vd.get_depended_opr_oids()
cd_udo = [udo for udo in d_udo if udo in c_udo]
if len(cd_udo) > 0:
_error("""
User has objects dependent on following updated MADlib operators!
oid={0}
These objects will fail to work with the new operators and
need to be dropped before starting upgrade again.
""".format('\n\t\t\t\t\t'.join(cd_udo)), False)
abort = True
if abort:
_error("""------- Upgrade aborted. -------
Backup and drop all objects that depend on MADlib before trying upgrade again.
Use madpack reinstall to automatically drop these objects only if appropriate.""", True)
else:
_info("No dependency problem found, continuing to upgrade ...", True)
_info("\tReading existing UDAs/UDTs...", False)
sc = ScriptCleaner(schema, portid, con_args, ch)
_info("Script Cleaner initialized ...", False)
ch.drop_changed_uda()
ch.drop_changed_udoc()
ch.drop_changed_udo()
ch.drop_changed_udc()
ch.drop_changed_udf()
ch.drop_changed_udt() # assume dependent udf for udt does not change
ch.drop_traininginfo_4dt() # used types: oid, text, integer, float
_db_create_objects(schema, None, True, sc)
_info("MADlib %s upgraded successfully in %s schema." % (str(rev), schema.upper()), True)
# ------------------------------------------------------------------------------
def _db_rename_schema(from_schema, to_schema):
"""
Rename schema
@param from_schema name of the schema to rename
@param to_schema new name for the schema
"""
_info("> Renaming schema %s to %s" % (from_schema.upper(), to_schema.upper()), True)
try:
_internal_run_query("ALTER SCHEMA %s RENAME TO %s;" % (from_schema, to_schema), True)
except:
_error('Cannot rename schema. Stopping installation...', False)
raise Exception
# ------------------------------------------------------------------------------
def _db_create_schema(schema):
"""
Create schema
@param from_schema name of the schema to rename
@param to_schema new name for the schema
"""
_info("> Creating %s schema" % schema.upper(), True)
try:
_internal_run_query("CREATE SCHEMA %s;" % schema, True)
except:
_info('Cannot create new schema. Rolling back installation...', True)
pass
# ------------------------------------------------------------------------------
def _db_create_objects(schema, old_schema, upgrade=False, sc=None, testcase="",
hawq_debug=False):
"""
Create MADlib DB objects in the schema
@param schema Name of the target schema
@param sc ScriptCleaner object
@param testcase Command-line args for modules to install
@param hawq_debug
"""
if not upgrade and not hawq_debug:
# Create MigrationHistory table
try:
_info("> Creating %s.MigrationHistory table" % schema.upper(), True)
_internal_run_query("DROP TABLE IF EXISTS %s.migrationhistory;" % schema, True)
sql = """CREATE TABLE %s.migrationhistory
(id serial, version varchar(255),
applied timestamp default current_timestamp);""" % schema
_internal_run_query(sql, True)
except:
_error("Cannot crate MigrationHistory table", False)
raise Exception
# Copy MigrationHistory table for record keeping purposes
if old_schema:
try:
_info("> Saving data from %s.MigrationHistory table" % old_schema.upper(), True)
sql = """INSERT INTO %s.migrationhistory (version, applied)
SELECT version, applied FROM %s.migrationhistory
ORDER BY id;""" % (schema, old_schema)
_internal_run_query(sql, True)
except:
_error("Cannot copy MigrationHistory table", False)
raise Exception
# Stamp the DB installation
try:
_info("> Writing version info in MigrationHistory table", True)
_internal_run_query("INSERT INTO %s.migrationhistory(version) "
"VALUES('%s')" % (schema, str(rev)), True)
except:
_error("Cannot insert data into %s.migrationhistory table" % schema, False)
raise Exception
# Run migration SQLs
if upgrade:
_info("> Creating/Updating objects for modules:", True)
else:
_info("> Creating objects for modules:", True)
caseset = (set([test.strip() for test in testcase.split(',')])
if testcase != "" else set())
modset = {}
for case in caseset:
if case.find('/') > -1:
[mod, algo] = case.split('/')
if mod not in modset:
modset[mod] = []
if algo not in modset[mod]:
modset[mod].append(algo)
else:
modset[case] = []
# Loop through all modules/modules
# portspecs is a global variable
for moduleinfo in portspecs['modules']:
# Get the module name
module = moduleinfo['name']
# Skip if doesn't meet specified modules
if modset is not None and len(modset) > 0 and module not in modset:
continue
_info("> - %s" % module, True)
# Find the Python module dir (platform specific or generic)
if os.path.isdir(maddir + "/ports/" + portid + "/" + dbver + "/modules/" + module):
maddir_mod_py = maddir + "/ports/" + portid + "/" + dbver + "/modules"
else:
maddir_mod_py = maddir + "/modules"
# Find the SQL module dir (platform specific or generic)
if os.path.isdir(maddir + "/ports/" + portid + "/modules/" + module):
maddir_mod_sql = maddir + "/ports/" + portid + "/modules"
elif os.path.isdir(maddir + "/modules/" + module):
maddir_mod_sql = maddir + "/modules"
else:
# This was a platform-specific module, for which no default exists.
# We can just skip this module.
continue
# Make a temp dir for log files
cur_tmpdir = tmpdir + "/" + module
_make_dir(cur_tmpdir)
# Loop through all SQL files for this module
mask = maddir_mod_sql + '/' + module + '/*.sql_in'
sql_files = glob.glob(mask)
if not sql_files:
_error("No files found in: %s" % mask, True)
# Execute all SQL files for the module
for sqlfile in sql_files:
algoname = os.path.basename(sqlfile).split('.')[0]
if portid == 'hawq' and not is_hawq2 and algoname in ('svec'):
continue
# run only algo specified
if module in modset and len(modset[module]) > 0 \
and algoname not in modset[module]:
continue
# Set file names
tmpfile = cur_tmpdir + '/' + os.path.basename(sqlfile) + '.tmp'
logfile = cur_tmpdir + '/' + os.path.basename(sqlfile) + '.log'
retval = _run_sql_file(schema, maddir_mod_py, module, sqlfile,
tmpfile, logfile, None, upgrade,
sc)
# Check the exit status
if retval != 0:
_error("TEST CASE RESULTed executing %s" % tmpfile, False)
_error("Check the log at %s" % logfile, False)
raise Exception
# ------------------------------------------------------------------------------
def _db_rollback(drop_schema, keep_schema):
"""
Rollback installation
@param drop_schema name of the schema to drop
@param keep_schema name of the schema to rename and keep
"""
_info("Rolling back the installation...", True)
if not drop_schema:
_error('No schema name to drop. Stopping rollback...', True)
# Drop the current schema
_info("> Dropping schema %s" % drop_schema.upper(), verbose)
try:
_internal_run_query("DROP SCHEMA %s CASCADE;" % (drop_schema), True)
except:
_error("Cannot drop schema %s. Stopping rollback..." % drop_schema.upper(), True)
# Rename old to current schema
if keep_schema:
_db_rename_schema(keep_schema, drop_schema)
_info("Rollback finished successfully.", True)
raise Exception
# ------------------------------------------------------------------------------
def unescape(string):
"""
Unescape separation characters in connection strings, i.e., remove first
backslash from "\/", "\@", "\:", and "\\".
"""
if string is None:
return None
else:
return re.sub(r'\\(?P<char>[/@:\\])', '\g<char>', string)
# ------------------------------------------------------------------------------
def parseConnectionStr(connectionStr):
"""
@brief Parse connection strings of the form
<tt>[username[/password]@][hostname][:port][/database]</tt>
Separation characters (/@:) and the backslash (\) need to be escaped.
@returns A tuple (username, password, hostname, port, database). Field not
specified will be None.
"""
match = re.search(
r'((?P<user>([^/@:\\]|\\/|\\@|\\:|\\\\)+)' +
r'(/(?P<password>([^/@:\\]|\\/|\\@|\\:|\\\\)*))?@)?' +
r'(?P<host>([^/@:\\]|\\/|\\@|\\:|\\\\)+)?' +
r'(:(?P<port>[0-9]+))?' +
r'(/(?P<database>([^/@:\\]|\\/|\\@|\\:|\\\\)+))?', connectionStr)
return (
unescape(match.group('user')),
unescape(match.group('password')),
unescape(match.group('host')),
match.group('port'),
unescape(match.group('database')))
# ------------------------------------------------------------------------------
def parse_arguments():
parser = argparse.ArgumentParser(
prog="madpack",
description='MADlib package manager (' + str(rev) + ')',
argument_default=False,
formatter_class=argparse.RawTextHelpFormatter,
epilog="""Example:
$ madpack install -s madlib -p greenplum -c gpadmin@mdw:5432/testdb
This will install MADlib objects into a Greenplum database called TESTDB
running on server MDW:5432. Installer will try to login as GPADMIN
and will prompt for password. The target schema will be MADLIB.
""")
help_msg = """One of the following options:
install : run sql scripts to load into DB
upgrade : run sql scripts to upgrade
uninstall : run sql scripts to uninstall from DB
reinstall : performs uninstall and install
version : compare and print MADlib version (binaries vs database objects)
install-check : test all installed modules
(uninstall is currently unavailable for the HAWQ port)"""
choice_list = ['install', 'update', 'upgrade', 'uninstall',
'reinstall', 'version', 'install-check']
parser.add_argument('command', metavar='COMMAND', nargs=1,
choices=choice_list, help=help_msg)
parser.add_argument(
'-c', '--conn', metavar='CONNSTR', nargs=1, dest='connstr', default=None,
help="""Connection string of the following syntax:
[user[/password]@][host][:port][/database]
If not provided default values will be derived for PostgerSQL and Greenplum:
- user: PGUSER or USER env variable or OS username
- pass: PGPASSWORD env variable or runtime prompt
- host: PGHOST env variable or 'localhost'
- port: PGPORT env variable or '5432'
- db: PGDATABASE env variable or OS username""")
parser.add_argument('-s', '--schema', nargs=1, dest='schema',
metavar='SCHEMA', default='madlib',
help="Target schema for the database objects.")
parser.add_argument('-p', '--platform', nargs=1, dest='platform',
metavar='PLATFORM', choices=portid_list,
help="Target database platform, current choices: " + str(portid_list))
parser.add_argument('-v', '--verbose', dest='verbose',
action="store_true", help="Verbose mode.")
parser.add_argument('-l', '--keeplogs', dest='keeplogs', default=False,
action="store_true", help="Do not remove installation log files.")
parser.add_argument('-d', '--tmpdir', dest='tmpdir', default='/tmp/',
help="Temporary directory location for installation log files.")
parser.add_argument('-t', '--testcase', dest='testcase', default="",
help="Module names to test, comma separated. Effective only for install-check.")
# Get the arguments
return parser.parse_args()
def main(argv):
args = parse_arguments()
global verbose
verbose = args.verbose
_info("Arguments: " + str(args), verbose)
global keeplogs
keeplogs = args.keeplogs
global tmpdir
try:
tmpdir = tempfile.mkdtemp('', 'madlib.', args.tmpdir)
except OSError, e:
tmpdir = e.filename
_error("cannot create temporary directory: '%s'." % tmpdir, True)
# Parse SCHEMA
if len(args.schema[0]) > 1:
schema = args.schema[0].lower()
else:
schema = args.schema.lower()
# Parse DB Platform (== PortID) and compare with Ports.yml
global portid
if args.platform:
try:
# Get the DB platform name == DB port id
portid = args.platform[0].lower()
ports[portid]
except:
portid = None
_error("Can not find specs for port %s" % (args.platform[0]), True)
else:
portid = None
# Parse CONNSTR (only if PLATFORM and DBAPI2 are defined)
if portid:
connStr = "" if args.connstr is None else args.connstr[0]
(c_user, c_pass, c_host, c_port, c_db) = parseConnectionStr(connStr)
# Find the default values for PG and GP
if portid in SUPPORTED_PORTS:
if c_user is None:
c_user = os.environ.get('PGUSER', getpass.getuser())
if c_pass is None:
c_pass = os.environ.get('PGPASSWORD', None)
if c_host is None:
c_host = os.environ.get('PGHOST', 'localhost')
if c_port is None:
c_port = os.environ.get('PGPORT', '5432')
if c_db is None:
c_db = os.environ.get('PGDATABASE', c_user)
# Set connection variables
global con_args
con_args['host'] = c_host + ':' + c_port
con_args['database'] = c_db
con_args['user'] = c_user
if c_pass is not None:
con_args['password'] = c_pass
# Try connecting to the database
_info("Testing database connection...", verbose)
try:
# check for password only if required
_internal_run_query("SELECT 1", False)
except EnvironmentError:
con_args['password'] = getpass.getpass("Password for user %s: " % c_user)
_internal_run_query("SELECT 1", False)
except:
_error('Failed to connect to database', True)
# Get DB version
global dbver
dbver = _get_dbver()
global is_hawq2
if portid == "hawq" and _is_rev_gte(_get_rev_num(dbver), _get_rev_num('2.0')):
is_hawq2 = True
else:
is_hawq2 = False
# HAWQ < 2.0 has hard-coded schema name 'madlib'
if portid == 'hawq' and not is_hawq2 and schema.lower() != 'madlib':
_error("*** Installation is currently restricted only to 'madlib' schema ***", True)
# update maddir to use a relative path if available
global maddir
maddir = _get_relative_maddir(maddir, portid)
# Get MADlib version in DB
dbrev = _get_madlib_dbrev(schema)
portdir = os.path.join(maddir, "ports", portid)
supportedVersions = [dirItem for dirItem in os.listdir(portdir)
if os.path.isdir(os.path.join(portdir, dirItem)) and
re.match("^\d+", dirItem)]
if dbver is None:
dbver = ".".join(
map(str, max([versionStr.split('.')
for versionStr in supportedVersions])))
_info("Could not parse version string reported by {DBMS}. Will "
"default to newest supported version of {DBMS} "
"({version}).".format(DBMS=ports[portid]['name'],
version=dbver), True)
else:
_info("Detected %s version %s." % (ports[portid]['name'], dbver),
True)
if portid == "hawq":
# HAWQ (starting 2.0) and GPDB (starting 5.0) uses semantic versioning,
# which implies all HAWQ 2.x or GPDB 5.x versions will have binary
# compatibility. Hence, we can keep single folder for all 2.X / 5.X.
if (_is_rev_gte(_get_rev_num(dbver), _get_rev_num('2.0')) and
not _is_rev_gte(_get_rev_num(dbver), _get_rev_num('3.0'))):
is_hawq2 = True
dbver = '2'
elif portid == 'greenplum':
# similar to HAWQ above, collapse all 5.X versions
if (_is_rev_gte(_get_rev_num(dbver), _get_rev_num('5.0')) and
not _is_rev_gte(_get_rev_num(dbver), _get_rev_num('6.0'))):
dbver = '5'
# Due to the ABI incompatibility between 4.3.4 and 4.3.5,
# MADlib treats 4.3.5+ as DB version 4.3ORCA which is different
# from 4.3. The name is suffixed with ORCA since optimizer (ORCA) is
# 'on' by default in 4.3.5
elif _is_rev_gte(_get_rev_num(dbver), _get_rev_num('4.3.4')):
dbver = '4.3ORCA'
else:
# only need the first two digits for <= 4.3.4
dbver = '.'.join(dbver.split('.')[:2])
if not os.path.isdir(os.path.join(portdir, dbver)):
_error("This version is not among the %s versions for which "
"MADlib support files have been installed (%s)." %
(ports[portid]['name'], ", ".join(supportedVersions)), True)
# Validate that db platform is correct
if not _check_db_port(portid):
_error("Invalid database platform specified.", True)
# Adjust MADlib directories for this port (if they exist)
global maddir_conf
if os.path.isdir(maddir + "/ports/" + portid + "/" + dbver + "/config"):
maddir_conf = maddir + "/ports/" + portid + "/" + dbver + "/config"
else:
maddir_conf = maddir + "/config"
global maddir_lib
if os.path.isfile(maddir + "/ports/" + portid + "/" + dbver +
"/lib/libmadlib.so"):
maddir_lib = maddir + "/ports/" + portid + "/" + dbver + \
"/lib/libmadlib.so"
else:
maddir_lib = maddir + "/lib/libmadlib.so"
# Get the list of modules for this port
global portspecs
portspecs = configyml.get_modules(maddir_conf)
else:
con_args = None
dbrev = None
# Parse COMMAND argument and compare with Ports.yml
# Debugging...
# print "OS rev: " + str(rev) + " > " + str(_get_rev_num(rev))
# print "DB rev: " + str(dbrev) + " > " + str(_get_rev_num(dbrev))
# Make sure we have the necessary parameters to continue
if args.command[0] != 'version':
if not portid:
_error("Missing -p/--platform parameter.", True)
if not con_args:
_error("Unknown problem with database connection string: %s" % con_args, True)
# COMMAND: version
if args.command[0] == 'version':
_print_revs(rev, dbrev, con_args, schema)
# COMMAND: uninstall/reinstall
if args.command[0] in ('uninstall',) and (portid == 'hawq' and not is_hawq2):
_error("madpack uninstall is currently not available for HAWQ", True)
if args.command[0] in ('uninstall', 'reinstall') and (portid != 'hawq' or is_hawq2):
if _get_rev_num(dbrev) == [0]:
_info("Nothing to uninstall. No version found in schema %s." % schema.upper(), True)
return
# Find any potential data to lose
affected_objects = _internal_run_query("""
SELECT
n1.nspname AS schema,
relname AS relation,
attname AS column,
typname AS type
FROM
pg_attribute a,
pg_class c,
pg_type t,
pg_namespace n,
pg_namespace n1
WHERE
n.nspname = '%s'
AND t.typnamespace = n.oid
AND a.atttypid = t.oid
AND c.oid = a.attrelid
AND c.relnamespace = n1.oid
AND c.relkind = 'r'
ORDER BY
n1.nspname, relname, attname, typname""" % schema.lower(), True)
_info("*** Uninstalling MADlib ***", True)
_info("***********************************************************************************", True)
_info("* Schema %s and all database objects depending on it will be dropped!" % schema.upper(), True)
if affected_objects:
_info("* If you continue the following data will be lost (schema : table.column : type):", True)
for ao in affected_objects:
_info('* - ' + ao['schema'] + ' : ' + ao['relation'] + '.' +
ao['column'] + ' : ' + ao['type'], True)
_info("***********************************************************************************", True)
_info("Would you like to continue? [Y/N]", True)
go = raw_input('>>> ').upper()
while go != 'Y' and go != 'N':
go = raw_input('Yes or No >>> ').upper()
# 2) Do the uninstall/drop
if go == 'N':
_info('No problem. Nothing dropped.', True)
return
elif go == 'Y':
_info("> dropping schema %s" % schema.upper(), verbose)
try:
_internal_run_query("DROP SCHEMA %s CASCADE;" % (schema), True)
except:
_error("Cannot drop schema %s." % schema.upper(), True)
_info('Schema %s (and all dependent objects) has been dropped.' % schema.upper(), True)
_info('MADlib uninstalled successfully.', True)
else:
return
# COMMAND: install/reinstall
if args.command[0] in ('install', 'reinstall'):
# Refresh MADlib version in DB, None for GP/PG
if args.command[0] == 'reinstall':
print "Setting MADlib database version to be None for reinstall"
dbrev = None
_info("*** Installing MADlib ***", True)
# 1) Compare OS and DB versions.
# noop if OS <= DB.
_print_revs(rev, dbrev, con_args, schema)
if _is_rev_gte(_get_rev_num(dbrev), _get_rev_num(rev)):
_info("Current MADlib version already up to date.", True)
return
# proceed to create objects if nothing installed in DB or for HAWQ < 2.0
elif dbrev is None or (portid == 'hawq' and not is_hawq2):
pass
# error and refer to upgrade if OS > DB
else:
_error("""Aborting installation: existing MADlib version detected in {0} schema
To upgrade the {0} schema to MADlib v{1} please run the following command:
madpack upgrade -s {0} -p {2} [-c ...]
""".format(schema, rev, portid), True)
# 2) Run installation
try:
_plpy_check(py_min_ver)
_db_install(schema, dbrev, args.testcase)
except:
_error("MADlib installation failed.", True)
# COMMAND: upgrade
if args.command[0] in ('upgrade', 'update'):
_info("*** Upgrading MADlib ***", True)
dbrev = _get_madlib_dbrev(schema)
# 1) Check DB version. If None, nothing to upgrade.
if not dbrev:
_info("MADlib is not installed in {schema} schema and there "
"is nothing to upgrade. Please use install "
"instead.".format(schema=schema.upper()),
True)
return
# 2) Compare OS and DB versions. Continue if OS > DB.
_print_revs(rev, dbrev, con_args, schema)
if _is_rev_gte(_get_rev_num(dbrev), _get_rev_num(rev)):
_info("Current MADlib version is already up-to-date.", True)
return
if float('.'.join(dbrev.split('.')[0:2])) < 1.0:
_info("The version gap is too large, upgrade is supported only for "
"packages greater than or equal to v1.0.", True)
return
# 3) Run upgrade
try:
_plpy_check(py_min_ver)
_db_upgrade(schema, dbrev)
except Exception as e:
# Uncomment the following lines when debugging
print "Exception: " + str(e)
print sys.exc_info()
traceback.print_tb(sys.exc_info()[2])
_error("MADlib upgrade failed.", True)
# COMMAND: install-check
if args.command[0] == 'install-check':
# 1) Compare OS and DB versions. Continue if OS = DB.
if _get_rev_num(dbrev) != _get_rev_num(rev):
_print_revs(rev, dbrev, con_args, schema)
_info("Versions do not match. Install-check stopped.", True)
return
# Create install-check user
test_user = ('madlib_' +
rev.replace('.', '').replace('-', '_') +
'_installcheck')
try:
_internal_run_query("DROP USER IF EXISTS %s;" % (test_user), False)
except:
_internal_run_query("DROP OWNED BY %s CASCADE;" % (test_user), True)
_internal_run_query("DROP USER IF EXISTS %s;" % (test_user), True)
_internal_run_query("CREATE USER %s;" % (test_user), True)
_internal_run_query("GRANT USAGE ON SCHEMA %s TO %s;" % (schema, test_user), True)
# 2) Run test SQLs
_info("> Running test scripts for:", verbose)
caseset = (set([test.strip() for test in args.testcase.split(',')])
if args.testcase != "" else set())
modset = {}
for case in caseset:
if case.find('/') > -1:
[mod, algo] = case.split('/')
if mod not in modset:
modset[mod] = []
if algo not in modset[mod]:
modset[mod].append(algo)
else:
modset[case] = []
# Loop through all modules
for moduleinfo in portspecs['modules']:
# Get module name
module = moduleinfo['name']
# Skip if doesn't meet specified modules
if modset is not None and len(modset) > 0 and module not in modset:
continue
# JIRA: MADLIB-1078 fix
# Skip pmml during install-check (when run without the -t option).
# We can still run install-check on pmml with '-t' option.
if not modset and module in ['pmml']:
continue
_info("> - %s" % module, verbose)
# Make a temp dir for this module (if doesn't exist)
cur_tmpdir = tmpdir + '/' + module + '/test' # tmpdir is a global variable
_make_dir(cur_tmpdir)
# Find the Python module dir (platform specific or generic)
if os.path.isdir(maddir + "/ports/" + portid + "/" + dbver + "/modules/" + module):
maddir_mod_py = maddir + "/ports/" + portid + "/" + dbver + "/modules"
else:
maddir_mod_py = maddir + "/modules"
# Find the SQL module dir (platform specific or generic)
if os.path.isdir(maddir + "/ports/" + portid + "/modules/" + module):
maddir_mod_sql = maddir + "/ports/" + portid + "/modules"
else:
maddir_mod_sql = maddir + "/modules"
# Prepare test schema
test_schema = "madlib_installcheck_%s" % (module)
_internal_run_query("DROP SCHEMA IF EXISTS %s CASCADE; CREATE SCHEMA %s;" %
(test_schema, test_schema), True)
_internal_run_query("GRANT ALL ON SCHEMA %s TO %s;" %
(test_schema, test_user), True)
# Switch to test user and prepare the search_path
pre_sql = '-- Switch to test user:\n' \
'SET ROLE %s;\n' \
'-- Set SEARCH_PATH for install-check:\n' \
'SET search_path=%s,%s;\n' \
% (test_user, test_schema, schema)
# Loop through all test SQL files for this module
sql_files = maddir_mod_sql + '/' + module + '/test/*.sql_in'
for sqlfile in sorted(glob.glob(sql_files), reverse=True):
# work-around for HAWQ
algoname = os.path.basename(sqlfile).split('.')[0]
# run only algo specified
if module in modset and len(modset[module]) > 0 \
and algoname not in modset[module]:
continue
# Set file names
tmpfile = cur_tmpdir + '/' + os.path.basename(sqlfile) + '.tmp'
logfile = cur_tmpdir + '/' + os.path.basename(sqlfile) + '.log'
# If there is no problem with the SQL file
milliseconds = 0
# Run the SQL
run_start = datetime.datetime.now()
retval = _run_sql_file(schema, maddir_mod_py, module,
sqlfile, tmpfile, logfile, pre_sql)
# Runtime evaluation
run_end = datetime.datetime.now()
milliseconds = round((run_end - run_start).seconds * 1000 +
(run_end - run_start).microseconds / 1000)
# Check the exit status
if retval != 0:
result = 'FAIL'
keeplogs = True
# Since every single statement in the test file gets logged,
# an empty log file indicates an empty or a failed test
elif os.path.isfile(logfile) and os.path.getsize(logfile) > 0:
result = 'PASS'
# Otherwise
else:
result = 'ERROR'
# Output result
print "TEST CASE RESULT|Module: " + module + \
"|" + os.path.basename(sqlfile) + "|" + result + \
"|Time: %d milliseconds" % (milliseconds)
if result == 'FAIL':
_error("Failed executing %s" % tmpfile, False)
_error("Check the log at %s" % logfile, False)
# Cleanup test schema for the module
_internal_run_query("DROP SCHEMA IF EXISTS %s CASCADE;" % (test_schema), True)
# Drop install-check user
_internal_run_query("DROP OWNED BY %s CASCADE;" % (test_user), True)
_internal_run_query("DROP USER %s;" % (test_user), True)
# -----------------------------------------------------------------------
# Unit tests
# -----------------------------------------------------------------------
class RevTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get_rev_num(self):
# not using assertGreaterEqual to keep Python 2.6 compatibility
self.assertTrue(_get_rev_num('4.3.10') >= _get_rev_num('4.3.5'))
self.assertTrue(_get_rev_num('1.9.10-dev') >= _get_rev_num('1.9.9'))
self.assertNotEqual(_get_rev_num('1.9.10-dev'), _get_rev_num('1.9.10'))
self.assertEqual(_get_rev_num('1.9.10'), [1, 9, 10])
self.assertEqual(_get_rev_num('1.0.0+20130313144700'), [1, 0, 0, '20130313144700'])
self.assertNotEqual(_get_rev_num('1.0.0+20130313144700'),
_get_rev_num('1.0.0-beta+exp.sha.5114f85'))
def test_is_rev_gte(self):
# 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta <
# 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0
self.assertTrue(_is_rev_gte([], []))
self.assertTrue(_is_rev_gte([1, 9], [1, None]))
self.assertFalse(_is_rev_gte([1, None], [1, 9]))
self.assertTrue(_is_rev_gte(_get_rev_num('4.3.10'), _get_rev_num('4.3.5')))
self.assertTrue(_is_rev_gte(_get_rev_num('1.9.0'), _get_rev_num('1.9.0')))
self.assertTrue(_is_rev_gte(_get_rev_num('1.9.1'), _get_rev_num('1.9.0')))
self.assertTrue(_is_rev_gte(_get_rev_num('1.9.1'), _get_rev_num('1.9')))
self.assertTrue(_is_rev_gte(_get_rev_num('1.9.0'), _get_rev_num('1.9.0-dev')))
self.assertTrue(_is_rev_gte(_get_rev_num('1.9.1'), _get_rev_num('1.9-dev')))
self.assertTrue(_is_rev_gte(_get_rev_num('1.9.0-dev'), _get_rev_num('1.9.0-dev')))
self.assertTrue(_is_rev_gte([1, 9, 'rc', 1], [1, 9, 'dev', 0]))
self.assertFalse(_is_rev_gte(_get_rev_num('1.9.1'), _get_rev_num('1.10')))
self.assertFalse(_is_rev_gte([1, 9, 'dev', 1], [1, 9, 'rc', 0]))
self.assertFalse(_is_rev_gte([1, 9, 'alpha'], [1, 9, 'alpha', 0]))
self.assertFalse(_is_rev_gte([1, 9, 'alpha', 1], [1, 9, 'alpha', 'beta']))
self.assertFalse(_is_rev_gte([1, 9, 'alpha.1'], [1, 9, 'alpha.beta']))
self.assertFalse(_is_rev_gte([1, 9, 'beta', 2], [1, 9, 'beta', 4]))
self.assertFalse(_is_rev_gte([1, 9, 'beta', '1'], [1, 9, 'rc', '0']))
self.assertFalse(_is_rev_gte([1, 9, 'rc', 1], [1, 9, 0]))
self.assertFalse(_is_rev_gte([1, 9, '0.2'], [1, 9, '0.3']))
self.assertFalse(_is_rev_gte([1, 9, 'build2'], [1, 9, 'build3']))
self.assertFalse(_is_rev_gte(_get_rev_num('1.0.0+20130313144700'),
_get_rev_num('1.0.0-beta+exp.sha.5114f85')))
# ------------------------------------------------------------------------------
# Start Here
# ------------------------------------------------------------------------------
if __name__ == "__main__":
RUN_TESTS = False
if RUN_TESTS:
unittest.main()
else:
# Run main
main(sys.argv[1:])
# Optional log files cleanup
# keeplogs and tmpdir are global variables
if not keeplogs:
shutil.rmtree(tmpdir)
else:
print "INFO: Log files saved in " + tmpdir
|
rashmi815/incubator-madlib
|
src/madpack/madpack.py
|
Python
|
apache-2.0
| 62,969
|
[
"ORCA"
] |
faecb8f299ae07dba36294579e65510ccf79c407c76d2e5405e43fa661e1a026
|
# -*- coding: utf-8 -*-
from __future__ import print_function
"""test_streamer.py:
Test script for Streamer class.
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2016, Dilawar Singh"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import moose
import threading
import numpy as np
import time
import os
import sys
print('[INFO] Using moose form %s' % moose.__file__)
all_done_ = False
# Poll the file to see that we are really writing to it.
def test_sanity( ):
a = moose.Table( '/t1' )
b = moose.Table( '/t1/t1' )
c = moose.Table( '/t1/t1/t1' )
print(a)
print(b)
print(c)
st = moose.Streamer( '/s' )
st.outfile = 'a.txt'
assert st.outfile == 'a.txt'
st.addTable( a )
assert( st.numTables == 1 )
st.addTable( b )
assert( st.numTables == 2 )
st.addTable( c )
assert( st.numTables == 3 )
st.addTable( c )
assert( st.numTables == 3 )
st.addTable( c )
assert( st.numTables == 3 )
st.removeTable( c )
assert( st.numTables == 2 )
st.removeTable( c )
assert( st.numTables == 2 )
st.removeTable( a )
assert( st.numTables == 1 )
st.removeTable( b )
assert( st.numTables == 0 )
st.removeTable( b )
assert( st.numTables == 0 )
print( 'Sanity test passed' )
st.addTables( [a, b, c ])
assert st.numTables == 3
st.removeTables( [a, a, c] )
assert st.numTables == 1
def buildSystem(outfile):
if moose.exists('/compt'):
moose.delete('/compt')
compt = moose.CubeMesh( '/compt' )
assert compt
r = moose.Reac( '/compt/r' )
a = moose.Pool( '/compt/a' )
a.concInit = 1
b = moose.Pool( '/compt/b' )
b.concInit = 2
c = moose.Pool( '/compt/c' )
c.concInit = 0.5
moose.connect( r, 'sub', a, 'reac' )
moose.connect( r, 'prd', b, 'reac' )
moose.connect( r, 'prd', c, 'reac' )
r.Kf = 0.1
r.Kb = 0.01
tabA = moose.Table2( '/compt/a/tab' )
tabB = moose.Table2( '/compt/tabB' )
tabC = moose.Table2( '/compt/tabB/tabC' )
print(tabA, tabB, tabC)
moose.connect( tabA, 'requestOut', a, 'getConc' )
moose.connect( tabB, 'requestOut', b, 'getConc' )
moose.connect( tabC, 'requestOut', c, 'getConc' )
# Now create a streamer and use it to write to a stream
st = moose.Streamer( '/compt/streamer' )
st.outfile = outfile
print("outfile set to: %s " % st.outfile )
st.addTable( tabA )
st.addTables( [ tabB, tabC ] )
assert st.numTables == 3
return st
def test_abit_more():
stCSV = buildSystem('data.csv')
moose.reinit( )
moose.start(100)
csvData = np.loadtxt(stCSV.outfile, skiprows=1)
stNumpy = buildSystem('data.npy')
moose.reinit()
moose.start(100)
npData = np.load(stNumpy.outfile)
assert csvData.shape[0] == npData.shape[0], npData.shape
assert csvData.shape[1] == len(npData.dtype) # cols
for i, name in enumerate(npData.dtype.names):
assert (csvData[:,i] == npData[name]).all()
def main( ):
test_sanity( )
test_abit_more( )
if __name__ == '__main__':
main()
|
dilawar/moose-core
|
tests/core/test_streamer.py
|
Python
|
gpl-3.0
| 3,284
|
[
"MOOSE"
] |
90b24bd18b1b96230363505cfc1cb1c2e7607f8b453d909d9b9cca21f030ec20
|
from __future__ import annotations
from dials.array_family import flex
from dials.array_family.flex import Binner
from dials.util.report import Array, Report, Table
def flex_ios(val, var):
"""
Compute I/sigma or return zero for each element.
"""
assert len(val) == len(var)
result = flex.double(len(val), 0)
indices = flex.size_t(range(len(val))).select(var > 0)
val = val.select(indices)
var = var.select(indices)
assert var.all_gt(0)
result.set_selected(indices, val / flex.sqrt(var))
return result
def generate_integration_report(experiment, reflections, n_resolution_bins=20):
"""
Generate the integration report
"""
from cctbx import crystal, miller
from dials.algorithms.statistics import (
pearson_correlation_coefficient,
spearman_correlation_coefficient,
)
def overall_report(data):
# Start by adding some overall numbers
report = {
"n": len(reflections),
"n_full": data["full"].count(True),
"n_partial": data["full"].count(False),
"n_overload": data["over"].count(True),
"n_ice": data["ice"].count(True),
"n_summed": data["sum"].count(True),
"n_fitted": data["prf"].count(True),
"n_integated": data["int"].count(True),
"n_invalid_bg": data["ninvbg"].count(True),
"n_invalid_fg": data["ninvfg"].count(True),
"n_failed_background": data["fbgd"].count(True),
"n_failed_summation": data["fsum"].count(True),
"n_failed_fitting": data["fprf"].count(True),
}
# Compute mean background
try:
report["mean_background"] = flex.mean(
data["background.mean"].select(data["int"])
)
except Exception:
report["mean_background"] = 0.0
# Compute mean I/Sigma summation
try:
report["ios_sum"] = flex.mean(data["intensity.sum.ios"].select(data["sum"]))
except Exception:
report["ios_sum"] = 0.0
# Compute mean I/Sigma profile fitting
try:
report["ios_prf"] = flex.mean(data["intensity.prf.ios"].select(data["prf"]))
except Exception:
report["ios_prf"] = 0.0
# Compute the mean profile correlation
try:
report["cc_prf"] = flex.mean(
data["profile.correlation"].select(data["prf"])
)
except Exception:
report["cc_prf"] = 0.0
# Compute the correlations between summation and profile fitting
try:
mask = data["sum"] & data["prf"]
Isum = data["intensity.sum.value"].select(mask)
Iprf = data["intensity.prf.value"].select(mask)
report["cc_pearson_sum_prf"] = pearson_correlation_coefficient(Isum, Iprf)
report["cc_spearman_sum_prf"] = spearman_correlation_coefficient(Isum, Iprf)
except Exception:
report["cc_pearson_sum_prf"] = 0.0
report["cc_spearman_sum_prf"] = 0.0
# Return the overall report
return report
def binned_report(binner, index, data):
# Create the indexers
indexer_all = binner.indexer(index)
indexer_sum = binner.indexer(index.select(data["sum"]))
indexer_prf = binner.indexer(index.select(data["prf"]))
indexer_int = binner.indexer(index.select(data["int"]))
# Add some stats by resolution
report = {
"bins": list(binner.bins()),
"n_full": list(indexer_all.sum(data["full"])),
"n_partial": list(indexer_all.sum(~data["full"])),
"n_overload": list(indexer_all.sum(data["over"])),
"n_ice": list(indexer_all.sum(data["ice"])),
"n_summed": list(indexer_all.sum(data["sum"])),
"n_fitted": list(indexer_all.sum(data["prf"])),
"n_integrated": list(indexer_all.sum(data["int"])),
"n_invalid_bg": list(indexer_all.sum(data["ninvbg"])),
"n_invalid_fg": list(indexer_all.sum(data["ninvfg"])),
"n_failed_background": list(indexer_all.sum(data["fbgd"])),
"n_failed_summation": list(indexer_all.sum(data["fsum"])),
"n_failed_fitting": list(indexer_all.sum(data["fprf"])),
}
# Compute mean background
try:
report["mean_background"] = list(
indexer_int.mean(data["background.mean"].select(data["int"]))
)
except Exception:
report["mean_background"] = [0.0] * len(binner)
# Compute mean I/Sigma summation
try:
report["ios_sum"] = list(
indexer_sum.mean(data["intensity.sum.ios"].select(data["sum"]))
)
except Exception:
report["ios_sum"] = [0.0] * len(binner)
# Compute mean I/Sigma profile fitting
try:
report["ios_prf"] = list(
indexer_prf.mean(data["intensity.prf.ios"].select(data["prf"]))
)
except Exception:
report["ios_prf"] = [0.0] * len(binner)
# Compute the mean profile correlation
try:
report["cc_prf"] = list(
indexer_prf.mean(data["profile.correlation"].select(data["prf"]))
)
except Exception:
report["cc_prf"] = [0.0] * len(binner)
try:
report["rmsd_xy"] = list(
indexer_sum.mean(data["xyz.rmsd"].select(data["sum"]))
)
except Exception:
report["rmsd_xy"] = [0.0] * len(binner)
# Return the binned report
return report
def resolution_bins(experiment, hkl, nbins):
# Create the crystal symmetry object
cs = crystal.symmetry(
space_group=experiment.crystal.get_space_group(),
unit_cell=experiment.crystal.get_unit_cell(),
)
# Create the resolution binner object
ms = miller.set(cs, hkl)
ms.setup_binner(n_bins=nbins)
binner = ms.binner()
brange = list(binner.range_used())
bins = [binner.bin_d_range(brange[0])[0]]
for i in brange:
bins.append(binner.bin_d_range(i)[1])
return flex.double(reversed(bins))
def select(data, indices):
# Select rows from columns
result = {key: value.select(indices) for key, value in data.items()}
return result
# Check the required columns are there
assert "miller_index" in reflections
assert "d" in reflections
assert "flags" in reflections
assert "bbox" in reflections
assert "xyzcal.px" in reflections
assert "partiality" in reflections
assert "intensity.sum.value" in reflections
assert "intensity.sum.variance" in reflections
# Get the flag enumeration
flags = flex.reflection_table.flags
# Get some keys from the data
data = {}
for key in [
"miller_index",
"xyzcal.px",
"xyzobs.px.value",
"d",
"bbox",
"background.mean",
"partiality",
"intensity.sum.value",
"intensity.sum.variance",
"intensity.prf.value",
"intensity.prf.variance",
"profile.correlation",
]:
if key in reflections:
data[key] = reflections[key]
# Compute some flag stuff
data["full"] = data["partiality"] > 0.997300203937
data["over"] = reflections.get_flags(flags.overloaded)
data["ice"] = reflections.get_flags(flags.in_powder_ring)
data["sum"] = reflections.get_flags(flags.integrated_sum)
data["prf"] = reflections.get_flags(flags.integrated_prf)
data["int"] = reflections.get_flags(flags.integrated, all=False)
data["ninvbg"] = reflections.get_flags(flags.background_includes_bad_pixels)
data["ninvfg"] = reflections.get_flags(flags.foreground_includes_bad_pixels)
data["fbgd"] = reflections.get_flags(flags.failed_during_background_modelling)
data["fsum"] = reflections.get_flags(flags.failed_during_summation)
data["fprf"] = reflections.get_flags(flags.failed_during_profile_fitting)
# Try to calculate the i over sigma for summation
data["intensity.sum.ios"] = flex_ios(
data["intensity.sum.value"], data["intensity.sum.variance"]
)
# Try to calculate the i over sigma for profile fitting
try:
data["intensity.prf.ios"] = flex_ios(
data["intensity.prf.value"], data["intensity.prf.variance"]
)
except Exception:
pass
# Try to calculate the rmsd between observation and prediction
try:
xcal, ycal, zcal = data["xyzcal.px"].parts()
xobs, yobs, zobs = data["xyzobs.px.value"].parts()
data["xyz.rmsd"] = flex.sqrt(flex.pow2(xcal - xobs) + flex.pow2(ycal - yobs))
except Exception:
pass
# Create the resolution binner
resolution_binner = Binner(
resolution_bins(experiment, data["miller_index"], n_resolution_bins)
)
# Create the frame binner object
try:
array_range = experiment.imageset.get_array_range()
except Exception:
array_range = (0, len(experiment.imageset))
frame_binner = Binner(
flex.int(range(array_range[0], array_range[1] + 1)).as_double()
)
# Create the overall report
overall = overall_report(data)
# Create high/low resolution reports
hl_binner = resolution_binner.indexer(data["d"])
high_summary = overall_report(select(data, hl_binner.indices(0)))
low_summary = overall_report(select(data, hl_binner.indices(n_resolution_bins - 1)))
high_summary["dmin"] = resolution_binner.bins()[0]
high_summary["dmax"] = resolution_binner.bins()[1]
low_summary["dmin"] = resolution_binner.bins()[n_resolution_bins - 1]
low_summary["dmax"] = resolution_binner.bins()[n_resolution_bins]
overall["dmin"] = high_summary["dmin"]
overall["dmax"] = low_summary["dmax"]
# Create the overall report
summary = {
"overall": overall,
"low": low_summary,
"high": high_summary,
}
# Create a report binned by resolution
resolution = binned_report(resolution_binner, data["d"], data)
# Create the report binned by image
image = binned_report(frame_binner, data["xyzcal.px"].parts()[2], data)
# Return the report
return {"summary": summary, "resolution": resolution, "image": image}
class IntegrationReport(Report):
"""
A class to store the integration report
"""
def __init__(self, experiments, reflections):
"""
Create the integration report
:param experiments: The experiment list
:param reflections: The reflection table
"""
# Initialise the report class
super().__init__()
# Split the tables by experiment id
tables = reflections.split_by_experiment_id()
assert len(tables) == len(experiments)
# Initialise the dictionary
report_list = []
# Generate an integration report for each experiment
for i, (expr, data) in enumerate(zip(experiments, tables)):
report_list.append(generate_integration_report(expr, data))
# Construct the per image table
table = Table()
table.name = "integration.image.summary"
table.title = "Summary vs image number"
table.cols.append(("id", "ID"))
table.cols.append(("image", "Image"))
table.cols.append(("n_full", "# full"))
table.cols.append(("n_part", "# part"))
table.cols.append(("n_over", "# over"))
table.cols.append(("n_ice", "# ice"))
table.cols.append(("n_sum", "# sum"))
table.cols.append(("n_prf", "# prf"))
table.cols.append(("ibg", "Ibg"))
table.cols.append(("ios_sum", "I/sigI\n (sum)"))
table.cols.append(("ios_prf", "I/sigI\n (prf)"))
table.cols.append(("cc_prf", "CC prf"))
table.cols.append(("rmsd_xy", "RMSD XY"))
for j, report in enumerate(report_list):
report = report["image"]
for i in range(len(report["bins"]) - 1):
table.rows.append(
[
"%d" % j,
"%d" % (report["bins"][i] + 1),
"%d" % report["n_full"][i],
"%d" % report["n_partial"][i],
"%d" % report["n_overload"][i],
"%d" % report["n_ice"][i],
"%d" % report["n_summed"][i],
"%d" % report["n_fitted"][i],
f"{report['mean_background'][i]:.2f}",
f"{report['ios_sum'][i]:.2f}",
f"{report['ios_prf'][i]:.2f}",
f"{report['cc_prf'][i]:.2f}",
f"{report['rmsd_xy'][i]:.2f}",
]
)
self.add_table(table)
# Construct the per resolution table
table = Table()
table.name = "integration.resolution.summary"
table.title = "Summary vs resolution"
table.cols.append(("id", "ID"))
table.cols.append(("dmin", "d min"))
table.cols.append(("n_full", "# full"))
table.cols.append(("n_part", "# part"))
table.cols.append(("n_over", "# over"))
table.cols.append(("n_ice", "# ice"))
table.cols.append(("n_sum", "# sum"))
table.cols.append(("n_prf", "# prf"))
table.cols.append(("ibg", "Ibg"))
table.cols.append(("ios_sum", "I/sigI\n (sum)"))
table.cols.append(("ios_prf", "I/sigI\n (prf)"))
table.cols.append(("cc_prf", "CC prf"))
table.cols.append(("rmsd_xy", "RMSD XY"))
for j, report in enumerate(report_list):
report = report["resolution"]
for i in range(len(report["bins"]) - 1):
table.rows.append(
[
"%d" % j,
f"{report['bins'][i]:.2f}",
"%d" % report["n_full"][i],
"%d" % report["n_partial"][i],
"%d" % report["n_overload"][i],
"%d" % report["n_ice"][i],
"%d" % report["n_summed"][i],
"%d" % report["n_fitted"][i],
f"{report['mean_background'][i]:.2f}",
f"{report['ios_sum'][i]:.2f}",
f"{report['ios_prf'][i]:.2f}",
f"{report['cc_prf'][i]:.2f}",
f"{report['rmsd_xy'][i]:.2f}",
]
)
self.add_table(table)
# Create the overall table
for j, report in enumerate(report_list):
report = report["summary"]
summary = report["overall"]
high = report["high"]
low = report["low"]
table = Table()
table.name = "integration.overall.summary"
table.title = "Summary for experiment %d" % j
table.cols.append(("item", "Item"))
table.cols.append(("overall", "Overall"))
table.cols.append(("low", "Low"))
table.cols.append(("high", "High"))
desc_fmt_key = [
("dmin", "%.2f", "dmin"),
("dmax", "%.2f", "dmax"),
("number fully recorded", "%d", "n_full"),
("number partially recorded", "%d", "n_partial"),
("number with invalid background pixels", "%d", "n_invalid_bg"),
("number with invalid foreground pixels", "%d", "n_invalid_fg"),
("number with overloaded pixels", "%d", "n_overload"),
("number in powder rings", "%d", "n_ice"),
("number processed with summation", "%d", "n_summed"),
("number processed with profile fitting", "%d", "n_fitted"),
("number failed in background modelling", "%d", "n_failed_background"),
("number failed in summation", "%d", "n_failed_summation"),
("number failed in profile fitting", "%d", "n_failed_fitting"),
("ibg", "%.2f", "mean_background"),
("i/sigi (summation)", "%.2f", "ios_sum"),
("i/sigi (profile fitting)", "%.2f", "ios_prf"),
("cc prf", "%.2f", "cc_prf"),
("cc_pearson sum/prf", "%.2f", "cc_pearson_sum_prf"),
("cc_spearman sum/prf", "%.2f", "cc_spearman_sum_prf"),
]
for desc, fmt, key in desc_fmt_key:
table.rows.append(
[desc, fmt % summary[key], fmt % low[key], fmt % high[key]]
)
self.add_table(table)
class ProfileModelReport(Report):
"""
A class to store the profile model report
"""
def __init__(self, experiments, fitter, reflections):
"""
Create the integration report
:param experiments: The experiment list
:param profile_model: The profile model
:param reflections: The reflection table
"""
# Initialise the report class
super().__init__()
# Create the table
table = Table()
# Set the title
table.name = "profile.summary"
table.title = "Summary of profile model"
# Add the columns
table.cols.append(("id", "ID"))
table.cols.append(("profile", "Profile"))
table.cols.append(("created", "Created"))
table.cols.append(("x", "X (px)"))
table.cols.append(("y", "Y (px)"))
table.cols.append(("z", "Z (im)"))
table.cols.append(("n_reflections", "# reflections"))
# Create the summary for each profile model
for i in range(len(fitter)):
model = fitter[i]
for j in range(len(model)):
table.rows.append(
[
"%d" % i,
"%d" % j,
f"{model.valid(j)}",
f"{model.coord(j)[0]:.2f}",
f"{model.coord(j)[1]:.2f}",
f"{model.coord(j)[2]:.2f}",
"%d" % model.n_reflections(j),
]
)
# Add the table
self.add_table(table)
# Add the profiles
for i in range(len(fitter)):
model = fitter[i]
for j in range(len(model)):
if model.valid(j):
array = Array()
array.name = "profile.model.%d.%d" % (i, j)
array.title = "Profile model (id: %d, profile: %d)" % (i, j)
array.data = model.data(j)
self.add_array(array)
class ProfileValidationReport(Report):
"""
A class to store the profile validation report
"""
def __init__(self, experiments, profile_fitter, reflections, num_folds):
"""
Create the integration report
:param experiments: The experiment list
:param profile_model: The profile model
:param reflections: The reflection table
"""
# Initialise the report class
super().__init__()
# Create the table
table = Table()
# Set the title
table.name = "validation.summary"
table.title = "Summary of profile validation "
# Add the columns
table.cols.append(("id", "ID"))
table.cols.append(("subsample", "Sub-sample"))
table.cols.append(("n_valid", "# validated"))
table.cols.append(("cc", "<CC>"))
table.cols.append(("nrmsd", "<NRMSD>"))
# Split the reflections
reflection_tables = reflections.split_by_experiment_id()
assert len(reflection_tables) == len(experiments)
assert len(profile_fitter) == num_folds
# Create the summary for each profile model
for i in range(len(reflection_tables)):
reflection_table = reflection_tables[i]
reflection_table = reflection_table.select(
reflection_table.get_flags(reflection_table.flags.integrated_prf)
)
index = reflection_table["profile.index"]
cc = reflection_table["profile.correlation"]
nrmsd = reflection_table["profile.rmsd"]
for j in range(num_folds):
mask = index == j
num_validated = mask.count(True)
if num_validated == 0:
mean_cc = 0
mean_nrmsd = 0
else:
mean_cc = flex.mean(cc.select(mask))
mean_nrmsd = flex.mean(nrmsd.select(mask))
table.rows.append(
[
"%d" % i,
"%d" % j,
"%d" % num_validated,
f"{mean_cc:.2f}",
f"{mean_nrmsd:.2f}",
]
)
# Add the table
self.add_table(table)
|
dials/dials
|
algorithms/integration/report.py
|
Python
|
bsd-3-clause
| 21,120
|
[
"CRYSTAL"
] |
d096661c9f1d0cbc3d9f94bed489fa2f745666c52ad7df5b639bb631a1d29385
|
"""
============================
Neural Coding Reward Example
============================
A demonstration to use Spykes' functionality to reproduce Ramkumar et al's
"Premotor and Motor Cortices Encode Reward."
"""
# Authors: Mayank Agrawal <mayankagrawal96@gmail.com>
#
# License: MIT
########################################################
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from spykes.plot.neurovis import NeuroVis
from spykes.io.datasets import load_reward_data
########################################################
# 0 Overview: Reproduce Figure
# -----------------------------
#
# 0.1 Article
# ~~~~~~~~~~~~~
#
# Ramkumar, Pavan, et al. "Premotor and Motor Cortices Encode Reward."
# PloS one 11.8 (2016)
#
# [`link to
# paper
# <http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0160851>`__]
#
#
# 0.2 Dataset
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Download all files [`here
# <https://figshare.com/articles/Ramkumar_et_al_2016_Premotor_and_motor_cortices_encode_reward/3573447>`__]
# However, we'll only be looking at Mihili_07112013.mat (Monkey M, Session 1)
# and Mihili_08062013.mat (Monkey M, Session 4)
#
# 0.3 Initialization
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
event = 'rewardTime'
condition = 'rewardBool'
window = [-500, 1500]
binsize = 10
########################################################
#
# 1 First Graph of Panel A
# --------------------
sess_one, sess_four = load_reward_data()
########################################################
#
# 1.1 Initiate all Neurons
# ~~~~~~~~~~~~~~~~~
#
def get_spike_time(raw_data, neuron_number):
spike_times = raw_data['alldays'][0]['PMd_units'][0][:]
spike_times = spike_times[neuron_number - 1][0][1:]
spike_times = [i[0] for i in spike_times]
return spike_times
########################################################
def initiate_neurons(raw_data):
neuron_list = list()
for i in range((raw_data['alldays'][0]['PMd_units'][0][:]).shape[0]):
spike_times = get_spike_time(raw_data, i + 1)
# instantiate neuron
neuron = NeuroVis(spike_times, name='PMd %d' % (i + 1))
neuron_list.append(neuron)
return neuron_list
########################################################
neuron_list = initiate_neurons(sess_four)
########################################################
#
# 1.2 Get Event Times
# ~~~~~~~~~~~~~
def create_data_frame(raw_data):
data_df = pd.DataFrame()
uncertainty_conditions = list()
center_target_times = list()
reward_times = list()
reward_outcomes = list()
for i in range(raw_data['alldays'].shape[0]):
meta_data = raw_data['alldays'][i]['tt'][0]
uncertainty_conditions.append(meta_data[:, 2])
center_target_times.append(meta_data[:, 3])
reward_times.append(meta_data[:, 6])
reward_outcomes.append(meta_data[:, 7])
data_df['uncertaintyCondition'] = np.concatenate(uncertainty_conditions)
data_df['centerTargetTime'] = np.concatenate(center_target_times)
data_df['rewardTime'] = np.concatenate(reward_times)
data_df['rewardOutcome'] = np.concatenate(reward_outcomes)
data_df['rewardBool'] = data_df['rewardOutcome'].map(lambda s: s == 32)
# find time in between previous reward onset and start of current trial
# shouldn't be more than 1500ms
start_times = data_df['centerTargetTime']
last_reward_times = np.roll(data_df['rewardTime'], 1)
diffs = start_times - last_reward_times
diffs[0] = 0
data_df['consecutiveBool'] = diffs.map(lambda s: s <= 1.5)
return data_df[((data_df['uncertaintyCondition'] == 5.0) |
(data_df['uncertaintyCondition'] == 50.0)) &
data_df['consecutiveBool']]
########################################################
data_df = create_data_frame(sess_four)
print(len(data_df))
data_df.head()
########################################################
#
# 1.3 Match Peak Velocities
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
def find_velocities_in_range(raw_data, dataframe, min_vel, max_vel, min_time,
max_time):
all_velocities = raw_data['alldays'][0]['kin'][0]['vel'][0][0]
max_velocities = np.empty(len(dataframe))
peak_times = np.empty(len(dataframe))
for i in range(len(dataframe)):
# find time range for potential peak velocity
start_time = dataframe['rewardTime'][i] + .2
end_time = dataframe['rewardTime'][i] + 1.5
# find velocities in the time range
indices = (all_velocities[:, 0] >= start_time) & (
all_velocities[:, 0] <= end_time)
in_time = all_velocities[indices]
# find max velocity in given time range
velocity_norms = np.square(in_time[:, 1]) + np.square(in_time[:, 2])
max_velocity_index = np.argmax(velocity_norms)
max_velocities[i] = velocity_norms[max_velocity_index]**.5
peak_times[i] = in_time[max_velocity_index, 0]
dataframe['maxVelocity'] = max_velocities
dataframe['peakTimesDiff'] = peak_times - dataframe['rewardTime']
return dataframe[((dataframe['maxVelocity'] >= min_vel) &
(dataframe['maxVelocity'] <= max_vel)) &
((dataframe['peakTimesDiff'] >= min_time) &
(dataframe['peakTimesDiff'] <= max_time))]
########################################################
trials_df = find_velocities_in_range(
sess_four, data_df.reset_index(), 11, 16, .55, .95)
print(len(trials_df))
trials_df.head()
########################################################
#
# 1.4 Plot PSTHs
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Before Matching
neuron_number = 60
neuron = neuron_list[neuron_number - 1]
plt.figure(figsize=(10, 5))
psth = neuron.get_psth(event=event,
conditions=condition,
df=data_df,
window=[-500, 1500],
binsize=25,
event_name='Reward Time')
plt.title('neuron %s: Reward' % neuron.name)
plt.show()
########################################################
#
# After Velocity Matching
neuron_number = 60
neuron = neuron_list[neuron_number - 1]
plt.figure(figsize=(10, 5))
psth = neuron.get_psth(event=event,
conditions=condition,
df=trials_df,
window=[-500, 1500],
binsize=25,
event_name='Reward Time')
plt.title('neuron %s: Reward' % neuron.name)
plt.show()
########################################################
#
# 2 First Graph of Panel C
# --------------------
neuron_list = initiate_neurons(sess_one)
data_df = create_data_frame(sess_one)
########################################################
#
# 2.1 Normalize PSTHs
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
def normalize_psth(neuron, dataframe):
psth = neuron.get_psth(event=event,
conditions=condition,
df=dataframe,
window=window,
binsize=binsize,
plot=False)
# find all max rates, and find max of max rates
max_rates = list()
for i, cond_id in enumerate(np.sort(psth['data'].keys())):
max_rates.append(np.amax(psth['data'][cond_id]['mean']))
max_rate = max(max_rates)
# divide all means by max to normalize
for i, cond_id in enumerate(np.sort(psth['data'].keys())):
psth['data'][cond_id]['mean'] /= max_rate
psth['data'][cond_id]['sem'] = 0 # population SEM calculated later
return psth
########################################################
neuron = neuron_list[0] # example
new_psth = normalize_psth(neuron, data_df)
neuron.plot_psth(new_psth, event, condition)
########################################################
#
# 2.2 Find Population Average
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
psth_dict = {}
for cond_id in np.sort(psth['data'].keys()):
psth_dict[cond_id] = list()
# add all normalized psth's
for neuron in neuron_list:
norm_psth = normalize_psth(neuron, data_df)
for cond_id in np.sort(psth['data'].keys()):
psth_dict[cond_id].append(norm_psth['data'][cond_id]['mean'])
for key in psth_dict:
psth_dict[key] = np.array(psth_dict[key])
# get base psth
base_neuron = neuron_list[0]
psth = normalize_psth(base_neuron, data_df)
# update mean and SEM to reflect population
for cond_id in np.sort(psth['data'].keys()):
psth['data'][cond_id]['mean'] = np.mean(psth_dict[cond_id], axis=0)
psth['data'][cond_id]['sem'] = (
np.var(psth_dict[cond_id], axis=0) / len(neuron_list))**.5
########################################################
#
# 2.3 Plot PSTH
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
plt.figure(figsize=(10, 5))
neuron.plot_psth(psth, event, condition)
plt.title("")
plt.show()
|
codekansas/spykes
|
examples/plot_neural_coding_reward_example.py
|
Python
|
mit
| 8,919
|
[
"NEURON"
] |
cc17f471338a6a30c7f98c557fba875b60a6141ded60cbd6ded946397abec675
|
# (c) 2012-2018, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
from django.db import transaction
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.forms import AdminPasswordChangeForm
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from django.utils.html import escape
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext, ugettext_lazy as _
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from .forms import CustomUserChangeForm, CustomUserCreationForm
from .models import CustomUser
csrf_protect_m = method_decorator(csrf_protect)
sensitive_post_parameters_m = method_decorator(sensitive_post_parameters())
class CustomUserAdmin(admin.ModelAdmin):
"""
The default UserAdmin class, but with changes for our CustomUser
where `first_name` and `last_name` are replaced by `full_name` and
`short_name`
"""
add_form_template = 'admin/auth/user/add_form.html'
change_user_password_template = None
fieldsets = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'), {'fields': ('full_name', 'short_name', 'email')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {'classes': ('wide',),
'fields': ('username', 'password1', 'password2')}),
)
form = CustomUserChangeForm
add_form = CustomUserCreationForm
change_password_form = AdminPasswordChangeForm
list_display = ('username', 'email', 'full_name', 'short_name', 'is_staff')
list_filter = ('is_staff', 'is_superuser', 'is_active', 'groups')
search_fields = ('username', 'full_name', 'short_name', 'email')
ordering = ('username',)
filter_horizontal = ('groups', 'user_permissions',)
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super(CustomUserAdmin, self).get_fieldsets(request, obj)
def get_form(self, request, obj=None, **kwargs):
"""
Use special form during user creation
"""
defaults = {}
if obj is None:
defaults.update({
'form': self.add_form,
'fields': admin.util.flatten_fieldsets(self.add_fieldsets),
})
defaults.update(kwargs)
return super(CustomUserAdmin, self).get_form(request, obj, **defaults)
def get_urls(self):
from django.conf.urls import url
return [
url(r'^(\d+)/password/$', self.admin_site.admin_view(
self.user_change_password))
] + super(CustomUserAdmin, self).get_urls()
def lookup_allowed(self, lookup, value):
# See #20078: we don't want to allow any lookups involving passwords.
if lookup.startswith('password'):
return False
return super(CustomUserAdmin, self).lookup_allowed(lookup, value)
@sensitive_post_parameters_m
@csrf_protect_m
@transaction.atomic()
def add_view(self, request, form_url='', extra_context=None):
# It's an error for a user to have add permission but NOT change
# permission for users. If we allowed such users to add users, they
# could create superusers, which would mean they would essentially have
# the permission to change users. To avoid the problem entirely, we
# disallow users from adding users if they don't have change
# permission.
if not self.has_change_permission(request):
if self.has_add_permission(request) and settings.DEBUG:
# Raise Http404 in debug mode so that the user gets a helpful
# error message.
raise Http404(
'Your user does not have the "Change user" permission. In '
'order to add users, Django requires that your user '
'account have both the "Add user" and "Change user" '
'permissions set.')
raise PermissionDenied
if extra_context is None:
extra_context = {}
username_field = self.model._meta.get_field(self.model.USERNAME_FIELD)
defaults = {
'auto_populated_fields': (),
'username_help_text': username_field.help_text,
}
extra_context.update(defaults)
return super(CustomUserAdmin, self).add_view(request, form_url,
extra_context)
@sensitive_post_parameters_m
def user_change_password(self, request, id, form_url=''):
if not self.has_change_permission(request):
raise PermissionDenied
user = get_object_or_404(self.queryset(request), pk=id)
if request.method == 'POST':
form = self.change_password_form(user, request.POST)
if form.is_valid():
form.save()
msg = ugettext('Password changed successfully.')
messages.success(request, msg)
return HttpResponseRedirect('..')
else:
form = self.change_password_form(user)
fieldsets = [(None, {'fields': list(form.base_fields)})]
adminForm = admin.helpers.AdminForm(form, fieldsets, {})
context = {
'title': _('Change password: %s') % escape(user.get_username()),
'adminForm': adminForm,
'form_url': form_url,
'form': form,
'is_popup': '_popup' in request.REQUEST,
'add': True,
'change': False,
'has_delete_permission': False,
'has_change_permission': True,
'has_absolute_url': False,
'opts': self.model._meta,
'original': user,
'save_as': False,
'show_save': True,
}
return TemplateResponse(request, self.change_user_password_template or
'admin/auth/user/change_password.html',
context, current_app=self.admin_site.name)
def response_add(self, request, obj, post_url_continue=None):
"""
Determines the HttpResponse for the add_view stage. It mostly defers to
its superclass implementation but is customized because the User model
has a slightly different workflow.
"""
# We should allow further modification of the user just added i.e. the
# 'Save' button should behave like the 'Save and continue editing'
# button except in two scenarios:
# * The user has pressed the 'Save and add another' button
# * We are adding a user in a popup
if '_addanother' not in request.POST and '_popup' not in request.POST:
request.POST['_continue'] = 1
return super(CustomUserAdmin, self).response_add(request, obj,
post_url_continue)
admin.site.register(CustomUser, CustomUserAdmin)
|
chouseknecht/galaxy
|
galaxy/accounts/admin.py
|
Python
|
apache-2.0
| 8,011
|
[
"Galaxy"
] |
b8d4cc0aa5ffd3eb7e898ef18b98c3284e4775a1195be8508715554966e67d8c
|
"""utils.py
Some general utilities used in various testing routines.
(C) 2016 The Baccus Lab
"""
import os
import numpy as np
from pyret.filtertools import _gaussian_function
from pyret import spiketools
def get_default_filter_size():
"""Return the default x, y, and temporal size of a spatiotemporal filter."""
return 10, 10, 50
def get_default_movie_frame():
"""Return the default movie frame used in testing movie-generating code."""
return 10
def create_default_fake_filter():
"""Return the default temporal, spatial, and spatiotemporal filters."""
nx, ny, nt = get_default_filter_size()
time = np.arange(nt)
return create_spatiotemporal_filter(nx, ny, nt)
def create_default_fake_spikes():
"""Return the default spike times and labels."""
spikes = np.arange(10)
labels = np.array((1, 1, 1, 1, 1, 2, 2, 2, 2, 2))
return spikes, labels
def create_default_fake_rates():
"""Return the default firing rates."""
spikes = np.arange(10)
time = np.linspace(0, 10, 100)
binned = spiketools.binspikes(spikes, time)
rate = spiketools.estfr(binned, time)
return rate
def create_temporal_filter(n, norm=True):
"""Returns a fake temporal linear filter that superficially resembles
those seen in retinal ganglion cells.
Parameters
----------
n : int
Number of time points in the filter.
norm : bool, optional
If True, normalize the filter to have unit 2-norm. Defaults to True.
Returns
-------
f : ndarray
The fake linear filter
"""
time_axis = np.linspace(0, 2 * np.pi, n)
filt = np.exp(-1. * time_axis) * np.sin(time_axis)
return filt / np.linalg.norm(filt) if norm else filt
def create_spatiotemporal_filter(nx, ny, nt, norm=True):
"""Returns a fake 3D spatiotemporal filter.
The filter is created as the outer product of a 2D gaussian with a fake
temporal filter as returned by `create_temporal_filter()`.
Parameters
----------
nx, ny : int
Number of points in the two spatial dimensions of the stimulus.
nt : int
Number of time points in the stimulus.
norm : bool, optional
If True, normalize the filter to have unit 2-norm. Defaults to True.
Returns
-------
t : ndarray
The temporal filter used.
s : ndarray
The spatial filter used.
f : ndarray
The full spatiotemporal linear filter, shaped (nt, nx, ny).
"""
temporal_filter = create_temporal_filter(nt, norm)
grid = np.meshgrid(np.arange(nx), np.arange(ny), indexing='ij')
points = np.array([each.flatten() for each in grid])
gaussian = _gaussian_function(points, int(ny / 2), int(nx / 2), 1, 0, 1).reshape(nx, ny)
if norm:
gaussian /= np.linalg.norm(gaussian)
# Outer product
filt = np.einsum('i,jk->ijk', temporal_filter, gaussian)
return (temporal_filter, gaussian,
filt / np.linalg.norm(filt) if norm else filt)
|
baccuslab/pyret
|
tests/utils.py
|
Python
|
mit
| 3,002
|
[
"Gaussian"
] |
7d740ec1c7e2d167cd3ab4b80818fac3e31571dddcab84fbc8a8c0425f2e7dcc
|
from rdkit.Chem import MolSurf
from rdkit.Chem.EState import EState_VSA as RDKit_EState_VSA
from ._base import Descriptor
__all__ = ("LabuteASA", "PEOE_VSA", "SMR_VSA", "SlogP_VSA", "EState_VSA", "VSA_EState")
class LabuteASA(Descriptor):
r"""Labute's Approximate Surface Area descriptor(rdkit wrapper)."""
since = "1.0.0"
__slots__ = ()
explicit_hydrogens = False
def description(self):
return "Labute's Approximate Surface Area"
@classmethod
def preset(cls, version):
yield cls()
def __str__(self):
return self.__class__.__name__
def parameters(self):
return ()
def calculate(self):
return MolSurf.LabuteASA(self.mol)
rtype = float
class MoeTypeBase(Descriptor):
__slots__ = ("_k",)
explicit_hydrogens = False
_module = MolSurf
@classmethod
def preset(cls, version):
return map(cls, range(1, cls.k_max))
def description(self):
return self._fn.__doc__
@property
def _fn(self):
return getattr(self._module, str(self))
def __str__(self):
return self.__class__.__name__ + str(self._k)
def parameters(self):
return (self._k,)
def __init__(self, k=1):
assert 1 <= k <= self.k_max
self._k = k
def calculate(self):
return self._fn(self.mol)
rtype = float
class PEOE_VSA(MoeTypeBase):
r"""MOE type descriptors using gasteiger charge and surface area contribution(rdkit wrapper).
:type k: int
:param k: (:math:`1 <= k <= k_{\rm max}`)
"""
since = "1.0.0"
__slots__ = ()
k_max = 14
class SMR_VSA(MoeTypeBase):
r"""MOE type descriptors using Wildman-Crippen MR and surface area contribution(rdkit wrapper).
:type k: int
:param k: (:math:`1 <= k <= k_{\rm max}`)
"""
since = "1.0.0"
__slots__ = ()
k_max = 10
class SlogP_VSA(MoeTypeBase):
r"""MOE type descriptors using Wildman-Crippen LogP and surface area contribution(rdkit wrapper).
:type k: int
:param k: (:math:`1 <= k <= k_{\rm max}`)
"""
since = "1.0.0"
__slots__ = ()
k_max = 12
class EState_VSA(MoeTypeBase):
r"""MOE type descriptors using EState indices and surface area contribution(rdkit wrapper).
:type k: int
:param k: (:math:`1 <= k <= k_{\rm max}`)
"""
since = "1.0.0"
__slots__ = ()
_module = RDKit_EState_VSA
k_max = 11
class VSA_EState(MoeTypeBase):
r"""MOE type descriptors using EState indices and surface area contribution(rdkit wrapper).
:type k: int
:param k: (:math:`1 <= k <= k_{\rm max}`)
"""
since = "1.0.0"
__slots__ = ()
_module = RDKit_EState_VSA
k_max = 10
|
mordred-descriptor/mordred
|
mordred/MoeType.py
|
Python
|
bsd-3-clause
| 2,718
|
[
"MOE",
"RDKit"
] |
8ead0ae1c3814f0ba84d1bbdb3adb1194a429e5876fbf183cbf540ee9668c262
|
# Copyright (c) 2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of Google Inc. nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utility functions being used for data processing."""
import collections
import dataclasses
import itertools
from typing import Any, Dict, List, Optional, Union
from absl import logging
import numpy as np
import pysam
import tensorflow as tf
from deepconsensus.models import data_providers
from deepconsensus.utils import dc_constants
Issue = dc_constants.Issue
class SubreadGrouper(collections.Iterator):
"""Returns all subreads belonging to a single zmw as a list."""
def __init__(self, subreads_to_ccs, reader_threads):
self.bam_reader = pysam.AlignmentFile(
subreads_to_ccs, check_sq=False, threads=reader_threads)
self.keep_iter = True
self.subread_group = []
# Setup subread group.
first_read = next(self.bam_reader)
self.zmw = first_read.get_tag('zm')
# Only add read if it is mapped.
if not first_read.is_unmapped:
self.subread_group.append(first_read)
def __next__(self) -> List[pysam.libcalignedsegment.AlignedSegment]:
if not self.keep_iter:
raise StopIteration
while self.keep_iter:
try:
read = next(self.bam_reader)
if read.is_unmapped:
continue
except StopIteration:
self.keep_iter = False
break
read_zmw = read.get_tag('zm')
if read_zmw == self.zmw:
self.subread_group.append(read)
elif read_zmw != self.zmw:
subreads_set = self.subread_group
self.subread_group = [read]
self.zmw = read_zmw
if subreads_set:
return subreads_set
if self.subread_group:
return self.subread_group
else:
raise StopIteration
def right_pad(arr: np.ndarray, length: int, value: Any) -> np.ndarray:
"""Right-pad an array with a given value.
Args:
arr: A numpy array (1 x n)
length: The length of arr after padding.
value: Pad value.
Returns:
A padded array
"""
# This function does not check for valid padding lengths.
pad_amt = length - len(arr)
return np.pad(arr, (0, pad_amt), 'constant', constant_values=value)[:length]
@dataclasses.dataclass
class Read(collections.Sequence):
"""Used to represent ccs alignments."""
name: str
bases: np.ndarray
cigar: np.ndarray
pw: np.ndarray
ip: np.ndarray
sn: np.ndarray
strand: dc_constants.Strand
ccs_idx: np.ndarray = np.empty(0)
# truth_idx and truth_range only used with label reads.
truth_idx: np.ndarray = np.empty(0)
# truth range is a dict containing contig, begin, end.
# It is not modified when slicing is performed.
# The truth_range['contig'] and truth_idx are used calculate
# label_coords from sliced regions.
# truth_range bounds are [begin, end) in keeping with bed format.
truth_range: Union[Dict[str, Any], None] = None
# Alignment Variables
seq_indices: np.ndarray = np.empty(0)
is_insertion: np.ndarray = np.empty(0)
seq_len: int = 0
idx_seq: int = 0
idx_spaced: int = 0
done: bool = False
def setup_spacing(self):
"""Set up an array for storing spaced indices."""
self.seq_indices = np.zeros(len(self.bases), dtype=np.int)
self.is_insertion = self.cigar == dc_constants.PYSAM_CINS
self.seq_len = len(self.bases)
def move(self):
"""For each position, track its spaced index.
Example:
Sequence -> seq_indices -> put_spacing().
'AAAA' -> [0, 1, 3, 4] -> 'AA AA'
'MMIM'
"""
self.seq_indices[self.idx_seq] = self.idx_spaced
self.idx_seq += 1
self.idx_spaced += 1
def add_gap(self):
self.idx_spaced += 1
def is_out_of_bounds(self):
return self.idx_seq >= self.seq_len
def next_is_insertion(self):
# When insertions are encountered in the label, add them in
# to maintain spacing correctly.
if self.truth_range:
while self.is_insertion[self.idx_seq]:
# For label insertions, insert bases.
self.seq_indices[self.idx_seq] = self.idx_spaced
self.idx_seq += 1
self.idx_spaced += 1
return False
# pysam.CINS must be cast as an int or this block runs very slow.
return self.is_insertion[self.idx_seq]
def put_spacing(self, seq_len):
"""Generate spaced sequences and replace the originals."""
spaced_seq = np.repeat(dc_constants.GAP_OR_PAD, seq_len)
spaced_pw = np.zeros(seq_len, dtype=np.uint8)
spaced_ip = np.zeros(seq_len, dtype=np.uint8)
spaced_ccs_idx = np.repeat(-1, seq_len)
spaced_seq[self.seq_indices] = self.bases
spaced_pw[self.seq_indices] = self.pw
spaced_ip[self.seq_indices] = self.ip
spaced_ccs_idx[self.seq_indices] = self.ccs_idx
if self.truth_range:
spaced_cigar = np.repeat(dc_constants.PYSAM_CHARD_CLIP, seq_len)
spaced_cigar[self.seq_indices] = self.cigar
self.cigar = spaced_cigar
truth_pos = np.repeat(-1, seq_len)
truth_idx = np.arange(self.truth_range['begin'], self.truth_range['end'])
truth_aln_base = np.isin(self.cigar,
dc_constants.PYSAM_READ_ADVANCING_OPS)
assert len(truth_pos[truth_aln_base]) == len(truth_idx)
truth_pos[truth_aln_base] = truth_idx
self.truth_idx = truth_pos
self.bases = spaced_seq
self.pw = spaced_pw
self.ip = spaced_ip
self.ccs_idx = spaced_ccs_idx
@property
def bases_encoded(self) -> np.ndarray:
bases_encoded = np.ndarray(
self.bases.shape, dtype=dc_constants.NP_DATA_TYPE)
for k, base in enumerate(dc_constants.VOCAB):
bases_encoded[self.bases == base] = k
return bases_encoded
@property
def zmw(self) -> int:
return int(self.name.split('/')[1])
@property
def label_coords(self) -> str:
# Reports reference coordinates as chr:begin-end.
if self.is_label:
begin = self.label_bounds.start
end = self.label_bounds.stop
return f'{self.truth_range["contig"]}:{begin}-{end}'
return ''
@property
def is_label(self) -> bool:
return self.truth_range is not None
@property
def ccs_bounds(self) -> slice:
"""Return ccs min and max for a given slice."""
ccs_idx = np.ma.masked_array(self.ccs_idx, self.ccs_idx == -1)
if not ccs_idx.count():
# If no ccs coordinates are covered in this region, return an empty slice.
return slice(0, 0)
ccs_start = np.min(ccs_idx)
ccs_end = np.max(ccs_idx)
return slice(ccs_start, ccs_end)
@property
def label_bounds(self) -> slice:
"""Return label reference min and max positions for given slice."""
truth_idx = np.ma.masked_array(self.truth_idx, self.truth_idx == -1)
if not truth_idx.count():
# If no truth coords are covered in this region, return an empty slice.
return slice(0, 0)
truth_start = np.min(truth_idx)
truth_end = np.max(truth_idx)
return slice(truth_start, truth_end)
def ccs_slice(self, start, end) -> 'Read':
"""Perform slicing based on ccs coordinates. Coordinates are inclusive."""
# Note that these bounds are inclusive by design.
locs = np.where(np.logical_and(self.ccs_idx >= start,
self.ccs_idx <= end))[0]
if locs.any():
ccs_slice = slice(np.min(locs), np.max(locs) + 1)
else:
ccs_slice = slice(0, 0)
return Read(
name=self.name,
bases=self.bases[ccs_slice],
cigar=self.cigar[ccs_slice],
pw=self.pw[ccs_slice],
ip=self.ip[ccs_slice],
sn=self.sn,
strand=self.strand,
ccs_idx=self.ccs_idx[ccs_slice],
truth_idx=self.truth_idx[ccs_slice],
truth_range=self.truth_range)
def pad(self, pad_width):
return Read(
name=self.name,
bases=right_pad(self.bases, pad_width, dc_constants.GAP_OR_PAD),
cigar=right_pad(self.cigar, pad_width, dc_constants.PYSAM_CHARD_CLIP),
pw=right_pad(self.pw, pad_width, 0),
ip=right_pad(self.ip, pad_width, 0),
sn=self.sn,
strand=self.strand,
ccs_idx=right_pad(self.ccs_idx, pad_width, -1),
truth_idx=right_pad(self.truth_idx, pad_width, -1),
truth_range=self.truth_range)
def remove_gaps_and_pad(self, pad_width: int) -> Union['Read', None]:
"""Removes gaps from sequence and returns padded."""
# Useful for reducing label width.
keep = self.bases != dc_constants.GAP_OR_PAD
if sum(keep) > pad_width:
return None
return Read(
name=self.name,
bases=self.bases[keep],
cigar=self.cigar[keep],
pw=self.pw[keep],
ip=self.ip[keep],
sn=self.sn,
strand=self.strand,
ccs_idx=self.ccs_idx[keep],
truth_idx=self.truth_idx[keep],
truth_range=self.truth_range).pad(pad_width)
def __str__(self):
return ''.join(self.bases)
def __len__(self):
return len(self.bases)
def __getitem__(self, r_slice: Union[slice, int]) -> 'Read':
"""Implements slicing across all attributes."""
return Read(
name=self.name,
bases=self.bases[r_slice],
cigar=self.cigar[r_slice],
pw=self.pw[r_slice],
ip=self.ip[r_slice],
sn=self.sn,
strand=self.strand,
ccs_idx=self.ccs_idx[r_slice],
truth_idx=self.truth_idx[r_slice])
def __repr__(self):
if np.any(self.ccs_idx >= 0):
start = np.min(self.ccs_idx[self.ccs_idx >= 0])
end = np.max(self.ccs_idx, initial=0)
else:
start = 0
end = 0
return (f'Read({self.name}) : CCS({start}-{end}) L={len(self.bases)} ' +
self.label_coords).strip()
class DcConfig:
"""Option for controlling DcExample configuration and calculating indices."""
_HAS_DYNAMIC_ATTRIBUTES = True
# Features with n_rows = n_subreads.
n_subread_features = ['bases', 'pw', 'ip', 'strand']
fixed_height = 5 # ccs + sn
def __init__(self, max_passes: int, example_width: int, padding: int):
self.max_passes = max_passes
self.example_width = example_width
self.padding = padding
self.feature_rows = {
'bases': max_passes,
'pw': max_passes,
'ip': max_passes,
'strand': max_passes,
'ccs': 1,
'sn': 4
}
# Sets slices indicating rows for each feature type.
self.feature_indices = dict()
i_rows = 0
for k, v in self.feature_rows.items():
self.feature_indices[k] = slice(i_rows, i_rows + self.feature_rows[k])
setattr(self, k, i_rows)
i_rows += v
@classmethod
def from_shape(cls, subreads_shape, padding=0):
"""Construct DcConfig from subreads shape."""
height, width, _ = subreads_shape
max_passes = (height - cls.fixed_height) // len(DcConfig.n_subread_features)
if padding:
width = width - padding
return DcConfig(max_passes, width, padding)
def indices(self, feature: str, n_subreads: int = 0) -> slice:
"""Returns rows for a given feature."""
if n_subreads:
assert feature in DcConfig.n_subread_features
n_rows = min(n_subreads, self.max_passes)
return slice(getattr(self, feature), getattr(self, feature) + n_rows)
else:
assert feature not in DcConfig.n_subread_features
return slice(
getattr(self, feature),
getattr(self, feature) + self.feature_rows[feature])
@property
def tensor_height(self) -> int:
"""Returns total rows for tf.Example input."""
return sum(self.feature_rows.values())
@property
def tensor_width(self) -> int:
"""Returns total rows for tf.Example input."""
return self.example_width + self.padding
def to_dict(self):
"""Output configuration properties as dict."""
return {
# Encode values as strings to prevent downstream aggregation.
'max_passes': str(self.max_passes),
'example_width': str(self.example_width),
'padding': str(self.padding),
'tensor_height': str(self.tensor_height),
'tensor_width': str(self.tensor_width)
}
@dataclasses.dataclass
class DcExample:
"""Python container used to generate DeepConsensus tf.Example."""
name: str
reads: List[Read]
config: DcConfig
counter: collections.Counter = dataclasses.field(
default_factory=collections.Counter)
# Define cached variables.
_width: Optional[int] = None
_ccs_width: Optional[int] = None
@property
def contig(self):
if self.label:
return self.label.truth_range['contig']
return None
@property
def is_training(self) -> bool:
# If a label is in the last position we are in training mode.
return self.reads[-1].is_label
@property
def ccs(self) -> Read:
if self.is_training:
ccs_idx = -2
else:
ccs_idx = -1
return self.reads[ccs_idx]
@property
def label(self) -> Union[Read, None]:
if self.is_training:
return self.reads[-1]
return None
@property
def label_coords(self) -> str:
if self.is_training:
return self.label.label_coords
return ''
@property
def subreads(self) -> List[Read]:
if self.is_training:
return self.reads[:-2]
else:
return self.reads[:-1]
@property
def n_subreads(self) -> int:
# Returns the total number of subreads
return len(self.subreads)
@property
def keep_subreads(self) -> int:
# Returns usable number of subreads.
return min(self.config.max_passes, self.n_subreads)
@property
def width(self) -> int:
if self._width:
return self._width
else:
self._width = len(self.ccs.bases)
return self._width
@property
def ccs_width(self) -> int:
# Width - gaps at end.
if self._ccs_width:
return self._ccs_width
else:
self._ccs_width = len(str(self.ccs).rstrip())
return self._ccs_width
@property
def is_empty(self) -> bool:
return not (self.ccs.ccs_idx >= 0).any()
def iter_examples(self) -> 'DcExample':
"""Generates partitions from a given window."""
# Initiate counter
self.counter = collections.Counter()
example_width = self.config.example_width
padding = self.config.padding
total_width = example_width + padding
for start_pos in range(0, self.ccs_width, example_width):
window = self[start_pos:start_pos + example_width]
if start_pos > self.ccs_width:
break
if window.is_empty:
self.counter['n_examples_no_ccs_idx'] += 1
continue
# If the label extends beyond width + padding,
# remove gaps and right pad.
# Gaps are helpful for visualizing alignments, but are
# used during training.
if self.is_training and len(window.label.bases) > total_width:
adjusted_label = window.label.remove_gaps_and_pad(total_width)
# Even with this adjustment it is still possible for the label to
# be longer than the padded length. This is rare. Discard when training.
if not adjusted_label:
# Consider alternative solutions to incorporate these data.
self.counter['n_examples_label_overflow'] += 1
continue
self.counter['n_examples_adjusted_label'] += 1
window.reads[-1] = adjusted_label
# Apply padding:
reads = [x.pad(total_width) for x in window.reads]
yield DcExample(self.name, reads, self.config)
def stack_subread_feature(self, name):
"""Extract read feature and stack."""
max_passes = self.config.max_passes
return np.stack([getattr(x, name) for x in self.subreads[:max_passes]])
def extract_features(self):
"""Convert features to a 2D array."""
# Get shape (example_rows, width)
n_subreads = self.n_subreads
dims = (self.config.tensor_height, self.width)
data = np.zeros(shape=dims, dtype=dc_constants.NP_DATA_TYPE)
# Get feature indices.
bases_idx = self.config.indices('bases', n_subreads)
pw_idx = self.config.indices('pw', n_subreads)
ip_idx = self.config.indices('ip', n_subreads)
strand_idx = self.config.indices('strand', n_subreads)
ccs_idx = self.config.indices('ccs')
sn_idx = self.config.indices('sn')
# Set features.
data[bases_idx] = self.stack_subread_feature('bases_encoded')
data[pw_idx] = self.stack_subread_feature('pw')
data[ip_idx] = self.stack_subread_feature('ip')
# Format strand feature.
strand = self.stack_subread_feature('strand')
strand = strand.astype(dc_constants.NP_DATA_TYPE)
strand = np.repeat(np.expand_dims(strand, -1), self.width, -1)
data[strand_idx] = strand
data[ccs_idx] = self.ccs.bases_encoded
# Format sn rows.
sn = np.repeat(np.expand_dims(self.subreads[0].sn, -1), self.width, -1)
data[sn_idx] = sn
return np.expand_dims(data, -1)
def to_features_dict(self):
"""Convert DcExample to a dictionary for inference."""
data = self.extract_features()
# Add additional dimension.
features = {
'subreads': data,
'subreads/num_passes': self.keep_subreads,
'name': self.name,
'window_pos': self.ccs.ccs_bounds.start
}
return features
def tf_example(self) -> tf.train.Example:
"""Convert DcExample to tf.Example."""
data = self.extract_features()
# Add additional dimension.
example = tf.train.Example()
features = example.features
features.feature['subreads/encoded'].bytes_list.value.append(data.tobytes())
features.feature['subreads/shape'].int64_list.value.extend(data.shape)
features.feature['subreads/num_passes'].int64_list.value.append(
self.keep_subreads)
features.feature['name'].bytes_list.value.append(self.name.encode())
features.feature['window_pos'].int64_list.value.append(
self.ccs.ccs_bounds.start)
if self.is_training:
label = self.label.bases_encoded
features.feature['label/encoded'].bytes_list.value.append(label.tobytes())
features.feature['label/shape'].int64_list.value.extend(label.shape)
return example
def __getitem__(self, r_slice: Union[slice, int]) -> 'DcExample':
"""Implements windowed slicing of subreads and ccs_slicing of label."""
if isinstance(r_slice, int):
raise NotImplementedError
reads = self.subreads + [self.ccs]
reads = [x[r_slice] for x in reads]
if self.label:
ccs_slice = self.ccs[r_slice].ccs_bounds
reads.append(self.label.ccs_slice(ccs_slice.start, ccs_slice.stop))
return DcExample(self.name, reads, self.config)
def __repr__(self):
preview = self[:100]
start = preview.ccs.ccs_bounds.start
end = preview.ccs.ccs_bounds.stop
output = ''
output += (f'{self.name} CCS({start}-{end}) {self.label_coords}'.strip() +
f'\n{"-"*(preview.width+24)}\n')
for subread in self.subreads:
subread_range = subread.name.split('/')[2]
output += f'{subread_range:<20} {subread.strand} >{str(subread)}\n'
output += f'{"CCS":<22} >{str(preview.ccs)}\n'
if self.is_training:
label = str(self.label)
output += f'{"Label":<22} >{label}\n'
return output
def decode_bases(bases_encoded: np.ndarray) -> np.ndarray:
"""Reverses DcExample encode_bases."""
n_subreads, example_width = bases_encoded.shape
bases = np.stack([np.repeat(dc_constants.GAP_OR_PAD, example_width)] *
n_subreads)
for k, base in enumerate(dc_constants.VOCAB):
bases[bases_encoded == k] = base
return bases
def from_features_dict(features_dict: Dict[str, Any],
padding: int = 0) -> DcExample:
"""Converts features_dict partially back to a DcExample object for tests."""
dc_config = DcConfig.from_shape(
features_dict['subreads/shape'], padding=padding)
data = np.squeeze(features_dict['subreads'])
name = features_dict['name']
n_subreads = features_dict['subreads/num_passes']
# Note: The ccs start position is correct, but indices
# may not be accurate beyond the first position.
ccs_start_pos = features_dict['window_pos']
# Get feature indices.
bases_idx = dc_config.indices('bases', n_subreads)
pw_idx = dc_config.indices('pw', n_subreads)
ip_idx = dc_config.indices('ip', n_subreads)
strand_idx = dc_config.indices('strand', n_subreads)
ccs_idx = dc_config.indices('ccs')
sn_idx = dc_config.indices('sn')
# Convert 2D array back to features.
bases = decode_bases(data[bases_idx])
pw = data[pw_idx]
ip = data[ip_idx]
strand = data[strand_idx]
ccs = decode_bases(data[ccs_idx])[0]
sn = data[sn_idx][:, 1]
ccs_idx = np.repeat(-1, dc_config.tensor_width)
ccs_end_pos = ccs_start_pos + dc_config.example_width
ccs_idx[0:dc_config.example_width] = np.arange(ccs_start_pos, ccs_end_pos)
movie, zmw, _ = name.split('/')
# Generate DcExample
read_set = []
for i in range(n_subreads):
read = Read(
f'{movie}/{zmw}/{i}',
bases=bases[i],
cigar=np.repeat(np.uint8(pysam.CMATCH), dc_config.example_width),
pw=pw[i],
ip=ip[i],
sn=sn,
strand=dc_constants.Strand(int(strand[i][0])),
ccs_idx=ccs_idx)
read_set.append(read)
ccs_read = Read(
name=name,
bases=ccs,
cigar=np.repeat(np.uint8(pysam.CMATCH), dc_config.example_width),
pw=np.repeat(np.uint8(0), dc_config.example_width),
ip=np.repeat(np.uint8(0), dc_config.example_width),
sn=np.repeat(0, 4),
strand=dc_constants.Strand.UNKNOWN,
ccs_idx=ccs_idx)
read_set.append(ccs_read)
return DcExample(name=name, reads=read_set, config=dc_config)
def set_feature(feature, shape):
"""Read in feature and set shape."""
feature = np.frombuffer(feature, dtype=dc_constants.NP_DATA_TYPE)
feature = feature.reshape(shape)
return feature
def tf_example_to_features_dict(tf_example_proto_str, inference=False):
"""Convert tf.Example to features_dict."""
features = data_providers.parse_example(
tf_example_proto_str, inference=inference)
for key, val in features.items():
features[key] = val.numpy()
# Cast types
features['name'] = str(features['name'][0], 'UTF-8')
features['subreads/num_passes'] = int(features['subreads/num_passes'])
features['subreads'] = set_feature(features['subreads/encoded'],
features['subreads/shape'])
del features['subreads/encoded']
if not inference:
features['label'] = set_feature(features['label/encoded'],
features['label/shape'])
del features['label/encoded']
return features
def fetch_ccs_seq(ccs_seqname: str,
ccs_fasta: pysam.libcfaidx.FastaFile) -> Read:
"""Fetches a ccs sequence by name."""
ccs_seq = ccs_fasta.fetch(ccs_seqname)
ccs_seq = np.array(ccs_seq, 'c')
# The ccs ref sequences are 1:len(ccs_seq).
return Read(
name=ccs_seqname,
bases=ccs_seq,
cigar=np.repeat(np.uint8(pysam.CMATCH), len(ccs_seq)),
pw=np.repeat(np.uint8(0), len(ccs_seq)),
ip=np.repeat(np.uint8(0), len(ccs_seq)),
sn=np.repeat(0, 4),
strand=dc_constants.Strand.UNKNOWN,
ccs_idx=np.arange(len(ccs_seq)))
def fetch_label_alignment(
ccs_seqname: str, truth_to_ccs: pysam.AlignmentFile,
truth_range: Dict[str, Any]) -> Union[dc_constants.Issue, Read]:
"""Fetches a label aligned to ccs sequence."""
try:
truth_alignment = next(truth_to_ccs.fetch(ccs_seqname))
except (ValueError, StopIteration):
return Issue.TRUTH_ALIGNMENT_NOT_FOUND
if truth_alignment.is_supplementary:
return Issue.SUPP_TRUTH_ALIGNMENT
truth_alignment = expand_clip_indent(truth_alignment, truth_range)
return truth_alignment
def read_truth_bedfile(truth_bed: str) -> Dict[str, Dict[str, Any]]:
"""Reads in complete truth bed file and returns dict."""
bed_coords = {}
with open(truth_bed, 'r') as bedfile:
for line in bedfile:
contig, begin, end, ccs_seqname = line.strip().split('\t')[:4]
bed_record = {'contig': contig, 'begin': int(begin), 'end': int(end)}
bed_coords[ccs_seqname] = bed_record
return bed_coords
def read_truth_split(split_fname: str) -> Dict[str, str]:
"""Reads in split bed file and returns dict."""
contig_split = {}
split_regions = {}
for i in dc_constants.HUMAN_TRAIN_REGIONS:
split_regions[i] = 'train'
for i in dc_constants.HUMAN_EVAL_REGIONS:
split_regions[i] = 'eval'
for i in dc_constants.HUMAN_TEST_REGIONS:
split_regions[i] = 'test'
with open(split_fname, 'r') as f:
for line in f:
contig, chrom = line.split()
if chrom in split_regions:
contig_split[contig] = split_regions[chrom]
return contig_split
def expand_clip_indent(read: pysam.AlignedSegment,
truth_range: Union[Dict[str, Any], None] = None) -> Read:
"""Adds GAP_OR_PAD tokens and clips reads.
For both subreads and label:
* Expand sequence by placing gaps where deletions are present in alignment.
* Remove bases that are part of soft-clips.
* Indent alignment if start position is > 0.
* Reverse ip/pw values when the strand is reverse.
Args:
read: a pysam aligned segment representing a subread, ccs, or label aln.
truth_range: truth genome alignment coordinates. If supplied, it is
assumed this is the label alignment.
Returns:
ExpandedRead
"""
# Extract read and reference indices.
aligned_pairs = read.get_aligned_pairs()
read_idx = np.array([x[0] if x[0] is not None else -1 for x in aligned_pairs])
ccs_idx = np.array([x[1] if x[1] is not None else -1 for x in aligned_pairs])
aln_len = len(read_idx)
# Create empty expanded read objects.
new_seq = np.repeat(dc_constants.GAP_OR_PAD, aln_len)
new_pw = np.repeat(np.uint8(0), aln_len)
new_ip = np.repeat(np.uint8(0), aln_len)
# Fill read objects based on aligned read idx positions.
new_seq[read_idx >= 0] = list(read.seq)
if read.is_reverse:
strand = dc_constants.Strand.REVERSE
else:
strand = dc_constants.Strand.FORWARD
# pw/ip values are never set for labels.
# truth_range is used to test if we are working with a label Read.
if not truth_range:
# Reverse ip/pw values if the strand is reversed.
pw_vals = read.get_tag('pw')
ip_vals = read.get_tag('ip')
if strand == dc_constants.Strand.REVERSE:
pw_vals = pw_vals[::-1]
ip_vals = ip_vals[::-1]
new_pw[read_idx >= 0] = pw_vals
new_ip[read_idx >= 0] = ip_vals
sn = np.array(read.get_tag('sn'))
else:
sn = np.empty(0)
# Extract additional read properties.
cigar_seq = itertools.chain.from_iterable([[x] * y for x, y in read.cigar])
new_cigar = np.fromiter(cigar_seq, dtype=np.uint8)
# Filter hard_clip from cigar.
new_cigar = new_cigar[new_cigar != dc_constants.PYSAM_CHARD_CLIP]
# Trim sequence if it is soft-padded.
if np.sum(new_cigar == dc_constants.PYSAM_CSOFT_CLIP) > 0:
new_seq[new_cigar ==
dc_constants.PYSAM_CSOFT_CLIP] = dc_constants.GAP_OR_PAD
# TODO: binary search ignoring -1 vals here.
qstart = np.where(read_idx == read.query_alignment_start)[0][0]
qend = np.where(read_idx == read.query_alignment_end - 1)[0][0] + 1
# Trim soft-padded segments from truth regions.
if truth_range:
op, op_len = read.cigartuples[0]
if op == dc_constants.PYSAM_CSOFT_CLIP:
truth_range['begin'] = truth_range['begin'] + op_len
op, op_len = read.cigartuples[-1]
if op == dc_constants.PYSAM_CSOFT_CLIP:
truth_range['end'] = truth_range['end'] - op_len
new_seq = new_seq[qstart:qend]
new_pw = new_pw[qstart:qend]
new_ip = new_ip[qstart:qend]
new_cigar = new_cigar[qstart:qend]
ccs_idx = ccs_idx[qstart:qend]
# Indent sequence
if read.pos:
new_seq = np.insert(new_seq, 0, [dc_constants.GAP_OR_PAD] * read.pos)
# Add N cigar op at position 0 to indicate indent.
new_cigar = np.insert(new_cigar, 0,
np.repeat(int(pysam.CREF_SKIP), read.pos))
new_pw = np.insert(new_pw, 0, np.repeat(0, read.pos))
new_ip = np.insert(new_ip, 0, np.repeat(0, read.pos))
ccs_idx = np.insert(ccs_idx, 0, np.repeat(-1, read.pos))
return Read(
name=read.qname,
bases=new_seq,
cigar=new_cigar,
pw=new_pw,
ip=new_ip,
sn=sn,
strand=strand,
ccs_idx=ccs_idx,
truth_range=truth_range)
def space_out_subreads(subreads: List[Read]) -> List[Read]:
"""Spaces out subreads to make room for insertions in any subset of them."""
for r in subreads:
r.setup_spacing()
while not all([r.done for r in subreads]):
# This loops over bases in all subreads at once, from left to right.
any_insertions = False
for r in subreads:
if r.done:
continue
if r.next_is_insertion():
any_insertions = True
break
for r in subreads:
if r.done:
continue
if any_insertions and not r.next_is_insertion():
# If other reads have insertions, but this one does NOT, add a gap to
# this read to make space.
r.add_gap()
else:
# In all other cases, just take the next base and move on.
r.move()
if r.is_out_of_bounds():
# Finally, format reads with spacing.
r.done = True
# Right pad all spaced sequences so they have the same length.
max_len = max([r.idx_spaced for r in subreads])
for r in subreads:
r.put_spacing(max_len)
return subreads
def create_proc_feeder(subreads_to_ccs: str,
ccs_fasta: str,
dc_config: DcConfig,
truth_bed: Optional[str] = None,
truth_to_ccs: Optional[str] = None,
truth_split: Optional[str] = None,
limit: int = 0,
bam_reader_threads: int = 1):
"""Creates a generator to feed subread process jobs to a multiprocess pool."""
main_counter = collections.Counter()
# Initiate files
subread_grouper = SubreadGrouper(subreads_to_ccs, bam_reader_threads)
ccs_fasta = pysam.FastaFile(ccs_fasta)
is_training = truth_bed and truth_to_ccs and truth_split
if is_training:
# Load files required for training.
truth_to_ccs_bam = pysam.AlignmentFile(truth_to_ccs, require_index=True)
truth_ref_coords = read_truth_bedfile(truth_bed)
truth_split_dict = read_truth_split(truth_split)
def proc_feeder():
for read_set in subread_grouper:
main_counter['n_zmw_processed'] += 1
subreads = list(map(expand_clip_indent, read_set))
ccs_seqname = '/'.join(subreads[0].name.split('/')[:2] + ['ccs'])
# Fetch ccs sequence and append to subread set.
ccs_seq = fetch_ccs_seq(ccs_seqname, ccs_fasta)
subreads.append(ccs_seq)
if is_training:
# Fetch truth to ccs alignment.
truth_range = truth_ref_coords.get(ccs_seqname, None)
if not truth_range:
logging.info('No truth_range defined for %s.', ccs_seqname)
main_counter['n_zmw_missing_truth_range'] += 1
continue
label = fetch_label_alignment(ccs_seqname, truth_to_ccs_bam,
truth_range)
if label == Issue.TRUTH_ALIGNMENT_NOT_FOUND:
logging.info('Unable to fetch label alignment for %s.', ccs_seqname)
main_counter['n_zmw_no_label_alignment'] += 1
continue
elif label == Issue.SUPP_TRUTH_ALIGNMENT:
main_counter['n_zmw_truth_label_supp_alignment'] += 1
continue
subreads.append(label)
# pytype: disable=attribute-error
split = truth_split_dict.get(truth_range['contig'], None)
# pytype: enable=attribute-error
if not split:
logging.info('No split defined for %s.', ccs_seqname)
main_counter['n_zmw_missing_contig_split'] += 1
continue
else:
split = 'inference'
main_counter[f'n_zmw_{split}'] += 1
main_counter['n_zmw_pass'] += 1
yield (subreads, ccs_seqname, dc_config, split)
if limit and main_counter['n_zmw_pass'] >= limit:
break
return proc_feeder, main_counter
def subreads_to_dc_example(subreads: List[Read], ccs_seqname: str,
dc_config: DcConfig) -> DcExample:
"""Process subreads and return a DcExample object."""
aln_reads = space_out_subreads(subreads)
dc_example = DcExample(name=ccs_seqname, reads=aln_reads, config=dc_config)
return dc_example
|
google/deepconsensus
|
deepconsensus/preprocess/utils.py
|
Python
|
bsd-3-clause
| 33,756
|
[
"pysam"
] |
3e17fa5dbd03bf5d0ba08cea84343ea0b2b37bcd285768a6b2060eea27c19281
|
"""
DIRAC Times module
Support for basic Date and Time operations
based on system datetime module.
It provides common interface to UTC timestamps,
converter to string types and back.
The following datetime classes are used in the returned objects:
- dateTime = datetime.datetime
- date = datetime.date
- time = datetime.timedelta
Useful timedelta constant are also provided to
define time intervals.
Notice: datetime.timedelta objects allow multiplication and division by interger
but not by float. Thus:
- DIRAC.Times.second * 1.5 is not allowed
- DIRAC.Times.second * 3 / 2 is allowed
An timeInterval class provides a method to check
if a give datetime is in the defined interval.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import time as nativetime
import datetime
import six
import sys
__RCSID__ = "$Id$"
# Some useful constants for time operations
microsecond = datetime.timedelta(microseconds=1)
second = datetime.timedelta(seconds=1)
minute = datetime.timedelta(minutes=1)
hour = datetime.timedelta(hours=1)
day = datetime.timedelta(days=1)
week = datetime.timedelta(days=7)
dt = datetime.datetime(2000, 1, 1)
def timeThis(method):
""" Function to be used as a decorator for timing other functions/methods
"""
def timed(*args, **kw):
""" What actually times
"""
ts = nativetime.time()
result = method(*args, **kw)
if sys.stdout.isatty():
return result
te = nativetime.time()
pre = dt.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC ")
try:
pre += args[0].log.getName() + '/' + args[0].log.getSubName() + ' TIME: ' + args[0].transString
except AttributeError:
try:
pre += args[0].log.getName() + ' TIME: ' + args[0].transString
except AttributeError:
try:
pre += args[0].log.getName() + '/' + args[0].log.getSubName() + ' TIME: '
except AttributeError:
pre += 'TIME: '
except IndexError:
pre += 'TIME: '
argsLen = ''
if args:
try:
if isinstance(args[1], (list, dict)):
argsLen = "arguments len: %d" % len(args[1])
except IndexError:
if kw:
try:
if isinstance(list(list(kw.items())[0])[1], (list, dict)):
argsLen = "arguments len: %d" % len(list(list(kw.items())[0])[1])
except IndexError:
argsLen = ''
print("%s Exec time ===> function %r %s -> %2.2f sec" % (pre, method.__name__, argsLen, te - ts))
return result
return timed
def dateTime():
"""
Return current UTC datetime, as datetime.datetime object
"""
return dt.utcnow()
def date(myDateTime=None):
"""
Return current UTC date, as datetime.date object
if a _dateTimeType is pass as argument its associated date is returned
"""
if isinstance(myDateTime, _dateTimeType):
return myDateTime.date()
return dateTime().date()
def time(myDateTime=None):
"""
Return current UTC time, as datetime.time object
if a _dateTimeType is pass as argument its associated time is returned
"""
if not isinstance(myDateTime, _dateTimeType):
myDateTime = dateTime()
return myDateTime - datetime.datetime(myDateTime.year, myDateTime.month, myDateTime.day)
def toEpoch(dateTimeObject=None):
"""
Get seconds since epoch
"""
if not dateTimeObject:
dateTimeObject = dateTime()
return nativetime.mktime(dateTimeObject.timetuple())
def fromEpoch(epoch):
"""
Get datetime object from epoch
"""
return dt.fromtimestamp(epoch)
def to2K(dateTimeObject=None):
"""
Get seconds, with microsecond precission, since 2K
"""
if not dateTimeObject:
dateTimeObject = dateTime()
delta = dateTimeObject - dt
return delta.days * 86400 + delta.seconds + delta.microseconds / 1000000.
def from2K(seconds2K=None):
"""
Get date from seconds since 2K
"""
if not seconds2K:
seconds2K = to2K(dt)
return dt + int(seconds2K) * second + int(seconds2K % 1 * 1000000) * microsecond
def toString(myDate=None):
"""
Convert to String
if argument type is neither _dateTimeType, _dateType, nor _timeType
the current dateTime converted to String is returned instead
Notice: datetime.timedelta are converted to strings using the format:
[day] days [hour]:[min]:[sec]:[microsec]
where hour, min, sec, microsec are always positive integers,
and day carries the sign.
To keep internal consistency we are using:
[hour]:[min]:[sec]:[microsec]
where min, sec, microsec are alwys positive intergers and hour carries the
sign.
"""
if isinstance(myDate, _dateTimeType):
return str(myDate)
elif isinstance(myDate, _dateType):
return str(myDate)
elif isinstance(myDate, _timeType):
return '%02d:%02d:%02d.%06d' % (myDate.days * 24 + myDate.seconds / 3600,
myDate.seconds % 3600 / 60,
myDate.seconds % 60,
myDate.microseconds)
else:
return toString(dateTime())
def fromString(myDate=None):
"""
Convert date/time/datetime String back to appropriated objects
The format of the string it is assume to be that returned by toString method.
See notice on toString method
On Error, return None
"""
if isinstance(myDate, six.string_types):
if myDate.find(' ') > 0:
dateTimeTuple = myDate.split(' ')
dateTuple = dateTimeTuple[0].split('-')
try:
return (datetime.datetime(year=dateTuple[0],
month=dateTuple[1],
day=dateTuple[2]) +
fromString(dateTimeTuple[1]))
# return dt.combine( fromString( dateTimeTuple[0] ),
# fromString( dateTimeTuple[1] ) )
except Exception:
try:
return (datetime.datetime(year=int(dateTuple[0]),
month=int(dateTuple[1]),
day=int(dateTuple[2])) +
fromString(dateTimeTuple[1]))
except ValueError:
return None
# return dt.combine( fromString( dateTimeTuple[0] ),
# fromString( dateTimeTuple[1] ) )
elif myDate.find(':') > 0:
timeTuple = myDate.replace('.', ':').split(':')
try:
if len(timeTuple) == 4:
return datetime.timedelta(hours=int(timeTuple[0]),
minutes=int(timeTuple[1]),
seconds=int(timeTuple[2]),
microseconds=int(timeTuple[3]))
elif len(timeTuple) == 3:
try:
return datetime.timedelta(hours=int(timeTuple[0]),
minutes=int(timeTuple[1]),
seconds=int(timeTuple[2]),
microseconds=0)
except ValueError:
return None
else:
return None
except Exception:
return None
elif myDate.find('-') > 0:
dateTuple = myDate.split('-')
try:
return datetime.date(int(dateTuple[0]), int(dateTuple[1]), int(dateTuple[2]))
except Exception:
return None
return None
class timeInterval(object):
"""
Simple class to define a timeInterval object able to check if a given
dateTime is inside
"""
def __init__(self, initialDateTime, intervalTimeDelta):
"""
Initialization method, it requires the initial dateTime and the
timedelta that define the limits.
The upper limit is not included thus it is [begin,end)
If not properly initialized an error flag is set, and subsequent calls
to any method will return None
"""
if (not isinstance(initialDateTime, _dateTimeType) or
not isinstance(intervalTimeDelta, _timeType)):
self.__error = True
return None
self.__error = False
if intervalTimeDelta.days < 0:
self.__startDateTime = initialDateTime + intervalTimeDelta
self.__endDateTime = initialDateTime
else:
self.__startDateTime = initialDateTime
self.__endDateTime = initialDateTime + intervalTimeDelta
def includes(self, myDateTime):
"""
"""
if self.__error:
return None
if not isinstance(myDateTime, _dateTimeType):
return None
if myDateTime < self.__startDateTime:
return False
if myDateTime >= self.__endDateTime:
return False
return True
def queryTime(f):
""" Decorator to measure the function call time
"""
def measureQueryTime(*args, **kwargs):
start = nativetime.time()
result = f(*args, **kwargs)
if result['OK'] and 'QueryTime' not in result:
result['QueryTime'] = nativetime.time() - start
return result
return measureQueryTime
_dateTimeType = type(dateTime())
_dateType = type(date())
_timeType = type(time())
_allTimeTypes = (_dateTimeType, _timeType)
_allDateTypes = (_dateTimeType, _dateType)
_allTypes = (_dateTimeType, _dateType, _timeType)
|
yujikato/DIRAC
|
src/DIRAC/Core/Utilities/Time.py
|
Python
|
gpl-3.0
| 9,140
|
[
"DIRAC"
] |
fc49e643db17d58a2d708b9f5e6cca913b5d760208f1d0013c221790f5c2f69b
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 1 15:32:10 2017
@author: mschull
"""
import numpy as np
import math
from astropy.convolution import convolve
from astropy.convolution import Gaussian2DKernel, Box2DKernel
# script imports
# imports
def to_jd(datetime):
"""
Converts a given datetime object to Julian date.
Algorithm is copied from https://en.wikipedia.org/wiki/Julian_day
All variable names are consistent with the notation on the wiki page.
Parameters
----------
fmt
dt: datetime
Datetime object to convert to MJD
Returns
-------
jd: float
"""
dt = datetime
a = math.floor((14. - dt.month) / 12.)
y = dt.year + 4800. - a
m = dt.month + 12. * a - 3.
jdn = dt.day + math.floor((153. * m + 2.) / 5.) + 365. * y + math.floor(y / 4.) - math.floor(y / 100.) + math.floor(
y / 400.) - 32045.
jd = jdn + (dt.hour - 12.) / 24. + dt.minute / 1440. + dt.second / 86400. + dt.microsecond / 86400000000.
return jd
# ;
# ; PROCEDURE: SUNSET_SUNRISE
# ;
# ; CALLED BY: DISALEXI (found at end of file)
# ;
# ; PURPOSE:
# ; Computes solar time variables following Campbell & Norman 1998
# ;
# ;======================================================================================================
# PRO sunset_sunrise, julian, lon, lat, time_t
#
# COMMON com_time, t_rise, t_end, zs
def sunset_sunrise(dt, lon, lat, time_t):
julian = to_jd(dt)
# Sunrise time
julian_ = julian + (time_t / 24.)
j_cen = ((julian_ + 0.5 - 2451545.) / 36525.)
lon_sun = (280.46646 + j_cen * (36000.76983 + j_cen * 0.0003032) % 360.) - 360.
an_sun = 357.52911 + j_cen * (35999.05029 - 0.0001537 * j_cen)
ecc = 0.016708634 - j_cen * (0.000042037 + 0.0000001267 * j_cen)
ob_ecl = 23. + (26. + ((21.448 - j_cen * (46.815 + j_cen * (0.00059 - j_cen * 0.001813)))) / 60.) / 60.
ob_corr = ob_ecl + 0.00256 * np.cos(np.deg2rad(125.04 - 1934.136 * j_cen))
var_y = np.tan(np.deg2rad(ob_corr / 2.)) * np.tan(np.deg2rad(ob_corr / 2.))
eq_t = 4. * np.rad2deg(var_y * np.sin(np.deg2rad(2. * lon_sun)) - 2. * ecc * np.sin(np.deg2rad(an_sun))
+ 4. * ecc * var_y * np.sin(np.deg2rad(an_sun)) * np.cos(
np.deg2rad(2. * lon_sun)) - 0.5 * var_y *
var_y * np.sin(np.deg2rad(4. * lon_sun)) - 1.25 * ecc * ecc * np.sin(np.deg2rad(2 * an_sun)))
sun_eq = np.sin(np.deg2rad(an_sun)) * (1.914602 - j_cen * (0.004817 + 0.000014 * j_cen)) + \
np.sin(np.deg2rad(2. * an_sun)) * (0.019993 - 0.000101 * j_cen) + np.sin(
np.deg2rad(3. * an_sun)) * 0.000289
sun_true = sun_eq + lon_sun
sun_app = sun_true - 0.00569 - 0.00478 * np.sin(np.deg2rad((125.04 - 1934.136 * j_cen)))
d = np.rad2deg((np.arcsin(np.sin(np.deg2rad(ob_corr)) * np.sin(np.deg2rad(sun_app)))))
ha_t = np.rad2deg(np.arccos(
np.cos(np.deg2rad(90.833)) / (np.cos(lat) * np.cos(np.deg2rad(d))) - np.tan(lat) * np.tan(np.deg2rad(d))))
t_noon = (720. - 4. * np.rad2deg(lon) - eq_t) / 1440. * 24.
t_rise = ((t_noon / 24.) - (ha_t * 4. / 1440.)) * 24.
t_end = ((t_noon / 24.) + (ha_t * 4. / 1440.)) * 24.
ts_time = ((time_t / 24. * 1440 + eq_t + 4. * np.rad2deg(lon)) % 1440.)
ts_time[ts_time > 1440.] = ts_time[ts_time > 1440.] - 1440.
w = ts_time / 4. + 180.
w[ts_time / 4. >= 0] = ts_time[ts_time / 4. >= 0.] / 4. - 180.
zs = np.arccos(
(np.sin(lat) * np.sin(np.deg2rad(d))) + (np.cos(lat) * np.cos(np.deg2rad(d)) * np.cos(np.deg2rad(w))))
return t_rise, t_end, zs
# PRO albedo_separation, albedo, Rs_1, F, fc, aleafv, aleafn, aleafl, adeadv, adeadn, adeadl, z, t_air, zs, control
#
# COMMON com_alb, Rs_c, Rs_s, albedo_c, albedo_s, e_atm, rsoilv_itr, fg_itr
#
# ;*******************************************************************************************************************
# ; Compute Solar Components and atmospheric properties (Campbell & Norman 1998)
def albedo_separation(albedo, Rs_1, F, fc, aleafv, aleafn, aleafl, adeadv, adeadn, adeadl, z, t_air, zs, control):
# ; Compute Solar Components and atmospheric properties (Campbell & Norman 1998)
# DAYTIME
# Calculate potential (clear-sky) VIS and NIR solar components
airmas = (np.sqrt(np.cos(zs) ** 2 + .0025) - np.cos(zs)) / .00125 # Correct for curvature of atmos in airmas
zs_temp = zs.copy()
zs_temp[np.rad2deg(zs) >= 89.5] = np.deg2rad(89.5)
ind = np.rad2deg(zs) < 89.5
airmas[ind] = (airmas[ind] - 2.8 / (
90. - np.rad2deg(zs_temp[ind])) ** 2.) # Correct for refraction(good up to 89.5 deg.)
potbm1 = 600. * np.exp(-.160 * airmas)
potvis = (potbm1 + (600. - potbm1) * .4) * np.cos(zs)
potdif = (600. - potbm1) * .4 * np.cos(zs)
uu = 1.0 / np.cos(zs)
uu[uu <= 0.01] = 0.01
axlog = np.log10(uu)
a = 10 ** (-1.195 + .4459 * axlog - .0345 * axlog * axlog)
watabs = 1320. * a
potbm2 = 720. * np.exp(-.05 * airmas) - watabs
evaL = (720. - potbm2 - watabs) * .54 * np.cos(zs)
potnir = evaL + potbm2 * np.cos(zs)
fclear = Rs_1 / (potvis + potnir)
fclear[fclear > 1.] = 1.
fclear[np.cos(zs) <= 0.01] = 1.
fclear[fclear <= 0.01] = 0.01
# Partition SDN into VIS and NIR
fvis = potvis / (potvis + potnir)
fnir = potnir / (potvis + potnir)
# Estimate direct beam and diffuse fraction in VIS and NIR wavebands
fb1 = potbm1 * np.cos(zs) / potvis
fb2 = potbm2 * np.cos(zs) / potnir
ratiox = fclear.copy()
ratiox[fclear > 0.9] = 0.9
dirvis = fb1 * (1. - ((.9 - ratiox) / .7) ** .6667)
ind = dirvis >= fb1
dirvis[ind] = fb1[ind]
ratiox = fclear.copy()
ratiox[fclear > 0.88] = 0.88
dirnir = fb1 * (1. - ((.88 - ratiox) / .68) ** .6667)
ind = dirnir >= fb2
dirnir[ind] = fb1[ind]
ind = np.logical_and((dirvis < 0.01), (dirnir > 0.01))
dirvis[ind] = 0.011
ind = np.logical_and((dirnir < 0.01), (dirvis > 0.01))
dirnir[ind] = 0.011
difvis = 1. - dirvis
difnir = 1. - dirnir
# Correction for NIGHTIME
ind = np.cos(zs) <= 0.01
fvis[ind] = 0.5
fnir[ind] = 0.5
difvis[ind] = 1.
difnir[ind] = 1.
dirvis[ind] = 0.
dirnir[ind] = 0.
Rs0 = potvis + potnir
Rs0[ind] = 0.
# apparent emissivity (Sedlar and Hock, 2009: Cryosphere 3:75-84)
e_atm = 1. - (0.2811 * (
np.exp(-0.0003523 * ((t_air - 273.16) ** 2.)))) # atmospheric emissivity (clear-sly) Idso and Jackson (1969)
fclear[Rs0 <= 50.] = 1.
# **********************************************
# Compute Albedo
ratio_soil = 2.
if control == 1:
rsoilv = np.tile(0.12, np.shape(F))
fg = np.tile(1., np.shape(albedo))
z_inter = 9
# else:
# rsoilv = rsoilv_itr
# fg = fg_itr
# z_inter = 0.
for zzz in range(z_inter + 1): # +1 to do what IDL does
rsoiln = rsoilv * ratio_soil
# Weighted live/dead leaf average properties
ameanv = aleafv * fg + adeadv * (1. - fg)
ameann = aleafn * fg + adeadn * (1. - fg)
ameanl = aleafl * fg + adeadl * (1. - fg)
# DIFFUSE COMPONENT
# *******************************
# canopy reflection (deep canopy)
akd = -0.0683 * np.log(F) + 0.804 # Fit to Fig 15.4 for x=1
rcpyn = (1.0 - np.sqrt(ameann)) / (1.0 + np.sqrt(ameann)) # Eq 15.7
rcpyv = (1.0 - np.sqrt(ameanv)) / (1.0 + np.sqrt(ameanv))
rcpyl = (1.0 - np.sqrt(ameanl)) / (1.0 + np.sqrt(ameanl))
rdcpyn = 2.0 * akd * rcpyn / (akd + 1.0) # Eq 15.8
rdcpyv = 2.0 * akd * rcpyv / (akd + 1.0)
rdcpyl = 2.0 * akd * rcpyl / (akd + 1.0)
# canopy transmission (VIS)
expfac = np.sqrt(ameanv) * akd * F
expfac[expfac < 0.001] = 0.001
xnum = (rdcpyv * rdcpyv - 1.0) * np.exp(-expfac)
xden = (rdcpyv * rsoilv - 1.0) + rdcpyv * (rdcpyv - rsoilv) * np.exp(-2.0 * expfac)
taudv = xnum / xden # Eq 15.11
# canopy transmission (NIR)
expfac = np.sqrt(ameann) * akd * F
expfac[expfac < 0.001] = 0.001
xnum = (rdcpyn * rdcpyn - 1.0) * np.exp(-expfac)
xden = (rdcpyn * rsoiln - 1.0) + rdcpyn * (rdcpyn - rsoiln) * np.exp(-2.0 * expfac)
taudn = xnum / xden # Eq 15.11
# canopy transmission (LW)
taudl = np.exp(-np.sqrt(ameanl) * akd * F)
# diffuse albedo for generic canopy
fact = ((rdcpyn - rsoiln) / (rdcpyn * rsoiln - 1.0)) * np.exp(-2.0 * np.sqrt(ameann) * akd * F) # Eq 15.9
albdn = (rdcpyn + fact) / (1.0 + rdcpyn * fact)
fact = ((rdcpyv - rsoilv) / (rdcpyv * rsoilv - 1.0)) * np.exp(-2.0 * np.sqrt(ameanv) * akd * F) # Eq 15.9
albdv = (rdcpyv + fact) / (1.0 + rdcpyv * fact)
# BEAM COMPONENT
# *******************************
# canopy reflection (deep canopy)
akb = 0.5 / np.cos(zs)
akb[np.cos(zs) <= 0.01] = 0.5
rcpyn = (1.0 - np.sqrt(ameann)) / (1.0 + np.sqrt(ameann)) # Eq 15.7
rcpyv = (1.0 - np.sqrt(ameanv)) / (1.0 + np.sqrt(ameanv))
rbcpyn = 2.0 * akb * rcpyn / (akb + 1.0) # Eq 15.8
rbcpyv = 2.0 * akb * rcpyv / (akb + 1.0)
# beam albedo for generic canopy
fact = ((rbcpyn - rsoiln) / (rbcpyn * rsoiln - 1.0)) * np.exp(-2.0 * np.sqrt(ameann) * akb * F) # Eq 15.9
albbn = (rbcpyn + fact) / (1.0 + rbcpyn * fact)
fact = ((rbcpyv - rsoilv) / (rbcpyv * rsoilv - 1.0)) * np.exp(-2.0 * np.sqrt(ameanv) * akb * F) # Eq 15.9
albbv = (rbcpyv + fact) / (1.0 + rbcpyv * fact)
# weighted albedo (canopy)
albedo_c = fvis * (dirvis * albbv + difvis * albdv) + fnir * (dirnir * albbn + difnir * albdn)
ind = np.cos(zs) <= 0.01
albedo_c[ind] = (fvis[ind] * (difvis[ind] * albdv[ind]) + fnir[ind] * (difnir[ind] * albdn[ind]))
albedo_s = fvis * rsoilv + fnir * rsoiln
albedo_avg = (fc * albedo_c) + ((1 - fc) * albedo_s)
diff = albedo_avg - albedo
ind = np.logical_and((fc < 0.75), (diff <= -0.01))
rsoilv[ind] = rsoilv[ind] + 0.01
ind = np.logical_and((fc < 0.75), (diff > 0.01))
rsoilv[ind] = rsoilv[ind] - 0.01
ind = np.logical_and((fc >= 0.75), (diff <= -0.01))
fg[ind] = fg[ind] - 0.05
ind = np.logical_and((fc >= 0.75), (diff > 0.01))
fg[ind] = fg[ind] + 0.05
fg[fg > 1.] = 1.
fg[fg < 0.01] = 0.01
if control == 1:
fg_itr = fg
rsoilv_itr = rsoilv
ind = abs(diff) > 0.05
albedo_c[ind] = albedo[ind]
albedo_s[ind] = albedo[ind] # if a solution is not reached, alb_c=alb_s=alb
# Direct beam+scattered canopy transmission coeff (visible)
expfac = np.sqrt(ameanv) * akb * F
xnum = (rbcpyv * rbcpyv - 1.0) * np.exp(-expfac)
xden = (rbcpyv * rsoilv - 1.0) + rbcpyv * (rbcpyv - rsoilv) * np.exp(-2.0 * expfac)
taubtv = xnum / xden # Eq 15.11
# Direct beam+scattered canopy transmission coeff (NIR)
expfac = np.sqrt(ameann) * akb * F
xnum = (rbcpyn * rbcpyn - 1.0) * np.exp(-expfac)
xden = (rbcpyn * rsoiln - 1.0) + rbcpyn * (rbcpyn - rsoiln) * np.exp(-2.0 * expfac)
taubtn = xnum / xden # Eq 15.11
# shortwave radition components
tausolar = fvis * (difvis * taudv + dirvis * taubtv) + fnir * (difnir * taudn + dirnir * taubtn)
Rs_c = Rs_1 * (1. - tausolar)
Rs_s = Rs_1 * tausolar
return Rs_c, Rs_s, albedo_c, albedo_s, e_atm, rsoilv_itr, fg_itr
def compute_G0(Rn, Rn_s, albedo, ndvi, t_rise, t_end, time, EF_s):
w = 1 / (1 + (EF_s / 0.5) ** 8.)
c_g = (w * 0.35) + (
(1 - w) * 0.31) # maximum fraction of Rn,s that become G0 (0.35 for dry soil and 0.31 for wet soil)
t_g = (w * 100000.) + ((1 - w) * 74000.)
tnoon = 0.5 * (t_rise + t_end)
t_g0 = (time - tnoon) * 3600.
G0 = c_g * np.cos(2 * np.pi * (t_g0 + 10800.) / t_g) * Rn_s
ind = np.logical_and(ndvi <= 0, albedo <= 0.05)
G0[ind] = Rn[ind] * 0.5
return G0
# PRO compute_resistence, U, Ts, Tc, hc, F, d0, z0m, z0h, z_u, z_T, xl, leaf, leafs, leafc, fm, fh, fm_h
#
# COMMON com_res, r_ah, r_s, r_x, u_attr
def compute_resistence(U, Ts, Tc, hc, F, d0, z0m, z0h, z_u, z_T, xl, leaf, leafs, leafc, fm, fh, fm_h):
c_a = 0.004 # Free convective velocity constant for r_s modelling
c_b = 0.012 # Empirical constant for r_s modelling
c_c = 0.0025 # Empirical constant for r_s modelling (new formulation Kustas and Norman, 1999)
C = 175. # Parameter for canopy boundary-layer resistance (C=90 Grace '81, C=175 Cheubouni 2001, 144 Li '98)
# Computation of friction velocity and aerodynamic resistance
u_attr = 0.41 * U / ((np.log((z_u - d0) / z0m)) - fm)
u_attr[u_attr == 0] = 10.
u_attr[u_attr < 0] = 0.01
r_ah = ((np.log((z_T - d0) / z0h)) - fh) / u_attr / 0.41
r_ah[r_ah == 0] = 500.
r_ah[r_ah <= 1.] = 1.
# Computation of the resistance of the air between soil and canopy space
Uc = u_attr / 0.41 * ((np.log((hc - d0) / z0m)) - fm_h)
Uc[Uc <= 0] = 0.1
Us = Uc * np.exp(-leaf * (1. - (0.05 / hc)))
r_ss = 1. / (c_a + (c_b * (Uc * np.exp(-leafs * (1. - (0.05 / hc))))))
r_s1 = 1. / ((((abs(Ts - Tc)) ** (1. / 3.)) * c_c) + (c_b * Us))
r_s2 = 1. / (c_a + (c_b * Us))
r_s = (((r_ss - 1.) / 0.09 * (F - 0.01)) + 1.)
r_s[F > 0.1] = r_s1[F > 0.1] # linear fuction between 0(bare soil) anf the value at F=0.1
r_s[abs(Ts - Tc) < 1.] = r_s2[abs(Ts - Tc) < 1.]
r_s[F > 3.] = r_s2[F > 3.]
# Computation of the canopy boundary layer resistance
Ud = Uc * np.exp(-leafc * (1. - ((d0 + z0m) / hc)))
Ud[Ud <= 0.] = 100.
r_x = C / F * ((xl / Ud) ** 0.5)
r_x[Ud == 100.] = 0.1
return r_ah, r_s, r_x, u_attr
# PRO compute_Rn, albedo_c, albedo_s, t_air, Tc, Ts, e_atm, Rs_c, Rs_s, F
#
# COMMON com_Rn, Rn_s, Rn_c, Rn
def compute_Rn(albedo_c, albedo_s, t_air, Tc, Ts, e_atm, Rs_c, Rs_s, F):
kL = 0.95 # long-wave extinction coefficient [-]
eps_s = 0.94 # Soil Emissivity [-]
eps_c = 0.99 # Canopy emissivity [-]
Lc = eps_c * 0.0000000567 * (Tc ** 4.)
Ls = eps_s * 0.0000000567 * (Ts ** 4.)
Rle = e_atm * 0.0000000567 * (t_air ** 4.)
Rn_c = ((1. - albedo_c) * Rs_c) + ((1. - np.exp(-kL * F)) * (Rle + Ls - 2. * Lc))
Rn_s = ((1. - albedo_s) * Rs_s) + ((np.exp(-kL * F)) * Rle) + ((1. - np.exp(-kL * F)) * Lc) - Ls
Rn = Rn_s + Rn_c
return Rn_s, Rn_c, Rn
# PRO temp_separation, H_c, fc, t_air, t0, r_ah, r_x, r_s, r_air
#
# COMMON com_sep, Tc, Ts, Tac
def temp_separation(H_c, fc, t_air, t0, r_ah, r_x, r_s, r_air, cp):
Tc_lin = ((t_air / r_ah) + (t0 / r_s / (1. - fc)) + (
H_c * r_x / r_air / cp * ((1. / r_ah) + (1. / r_s) + (1. / r_x)))) / (
(1. / r_ah) + (1. / r_s) + (fc / r_s / (1. - fc)))
Td = (Tc_lin * (1 + (r_s / r_ah))) - (H_c * r_x / r_air / cp * (1. + (r_s / r_x) + (r_s / r_ah))) - (
t_air * r_s / r_ah)
delta_Tc = ((t0 ** 4.) - (fc * (Tc_lin ** 4.)) - ((1. - fc) * (Td ** 4.))) / (
(4. * (1. - fc) * (Td ** 3.) * (1. + (r_s / r_ah))) + (4. * fc * (Tc_lin ** 3.)))
Tc = (Tc_lin + delta_Tc)
Tc[fc < 0.10] = t0[fc < 0.10]
Tc[fc > 0.90] = t0[fc > 0.90]
# ======get Ts==================================================================
Delta = (t0 ** 4.) - (fc * (Tc ** 4.))
Delta[Delta <= 0.] = 10.
Ts = (Delta / (1 - fc)) ** 0.25
ind = ((t0 ** 4) - (fc * Tc ** 4.)) <= 0.
Ts[ind] = (t0[ind] - (fc[ind] * Tc[ind])) / (1 - fc[ind])
Ts[fc < 0.1] = t0[fc < 0.1]
Ts[fc > 0.9] = t0[fc > 0.9]
ind = (Tc <= (t_air - 10.))
Tc[ind] = (t_air[ind] - 10.)
ind = (Tc >= t_air + 50.)
Tc[ind] = (t_air[ind] + 50.)
ind = (Ts <= (t_air - 10.))
Ts[ind] = (t_air[ind] - 10.)
ind = (Ts >= t_air + 50.)
Ts[ind] = (t_air[ind] + 50.)
Tac = ((((t_air) / r_ah) + ((Ts) / r_s) + ((Tc) / r_x)) / ((1 / r_ah) + (1 / r_s) + (1 / r_x)))
return Tc, Ts, Tac
# PRO compute_stability, H, t0, r_air, u_attr, z_u, z_T, hc, d0, z0m, z0h
#
# COMMON com_stab, fm, fh, fm_h
def compute_stability(H, t0, r_air, cp, u_attr, z_u, z_T, hc, d0, z0m, z0h):
t0[t0 == 273.16] = 373.16
L_ob = -(r_air * cp * t0 * (u_attr ** 3.0) / 0.41 / 9.806 / H)
L_ob[L_ob >= 0.] = -99.
mm = ((1. - (16. * (z_u - d0) / L_ob)) ** 0.25)
mm_h = ((1. - (16. * (hc - d0) / L_ob)) ** 0.25)
mh = ((1. - (16. * (z_T - d0) / L_ob)) ** 0.25)
ind = L_ob == -99.
mm[ind] = 0.
mm_h[ind] = 0.
mh[ind] = 0.
fm = np.zeros(mh.shape)
ind = np.logical_and((L_ob < 100.), (L_ob > (-100.)))
fm[ind] = ((2.0 * np.log((1.0 + mm[ind]) / 2.0)) + (np.log((1.0 + (mm[ind] ** 2.)) / 2.0)) - (
2.0 * np.arctan(mm[ind])) + (np.pi / 2.))
fm_h = np.zeros(mh.shape)
fm_h[ind] = ((2.0 * np.log((1.0 + mm_h[ind]) / 2.0)) + (np.log((1.0 + (mm_h[ind] ** 2.)) / 2.0)) - (
2.0 * np.arctan(mm_h[ind])) + (np.pi / 2.))
fh = np.zeros(mh.shape)
fh[ind] = ((2.0 * np.log((1.0 + (mh[ind] ** 2.)) / 2.0)))
ind = (fm == (np.log((z_u - d0) / z0m)))
fm[ind] = fm[ind] + 1.
ind = (fm_h == (np.log((hc - d0) / z0m)))
fm_h[ind] = fm_h[ind] + 1.
return fm, fh, fm_h
def smooth(signal, owidth, edge_truncate=False):
"""Replicates the IDL ``SMOOTH()`` function.
Parameters
----------
signal : array-like
The array to be smoothed.
owidth : :class:`int` or array-like
Width of the smoothing window. Can be a scalar or an array with
length equal to the number of dimensions of `signal`.
edge_truncate : :class:`bool`, optional
Set `edge_truncate` to ``True`` to apply smoothing to all points.
Points near the edge are normally excluded from smoothing.
Returns
-------
array-like
A smoothed array with the same dimesions and type as `signal`.
Notes
-----
References
----------
http://www.exelisvis.com/docs/SMOOTH.html
Examples
--------
"""
if owidth % 2 == 0:
width = owidth + 1
else:
width = owidth
if width < 3:
return signal
n = signal.size
istart = int((width - 1) / 2)
iend = n - int((width + 1) / 2)
w2 = int(width / 2)
s = signal.copy()
for i in range(n):
if i < istart:
if edge_truncate:
s[i] = (np.nansum(signal[0:istart + i + 1]) +
(istart - i) * signal[0]) / float(width)
elif i > iend:
if edge_truncate:
s[i] = (np.nansum(signal[i - istart:n]) +
(i - iend) * signal[n - 1]) / float(width)
else:
s[i] = np.nansum(signal[i - w2:i + w2 + 1]) / float(width)
return s
def Smooth(v1, w, nanopt):
# v1 is the input 2D numpy array.
# w is the width of the square window along one dimension
# nanopt can be replace or propagate
'''
v1 = np.array(
[[3.33692829e-02, 6.79152655e-02, 9.66020487e-01, 8.56235492e-01],
[3.04355923e-01, np.nan , 4.86013025e-01, 1.00000000e+02],
[9.40659566e-01, 5.23314093e-01, np.nan , 9.09669768e-01],
[1.85165123e-02, 4.44609040e-02, 5.10472165e-02, np.nan ]])
w = 2
'''
# make a copy of the array for the output:
vout = np.copy(v1)
# If w is even, add one
if w % 2 == 0:
w = w + 1
# get the size of each dim of the input:
r, c = v1.shape
# Assume that w, the width of the window is always square.
startrc = (w - 1) / 2
stopr = r - ((w + 1) / 2) + 1
stopc = c - ((w + 1) / 2) + 1
# For all pixels within the border defined by the box size, calculate the average in the window.
# There are two options:
# Ignore NaNs and replace the value where possible.
# Propagate the NaNs
for col in range(startrc, stopc):
# Calculate the window start and stop columns
startwc = col - (w / 2)
stopwc = col + (w / 2) + 1
for row in range(startrc, stopr):
# Calculate the window start and stop rows
startwr = row - (w / 2)
stopwr = row + (w / 2) + 1
# Extract the window
window = v1[startwr:stopwr, startwc:stopwc]
if nanopt == 'replace':
# If we're replacing Nans, then select only the finite elements
window = window[np.isfinite(window)]
# Calculate the mean of the window
vout[row, col] = np.mean(window)
return vout
# FUNCTION interp_ta, Ta, bad, rid
# ;mask_full = where(Ta ne bad)
# ;t_air = Ta[mask_full]
# hold=where(Ta EQ bad, vct)
# if vct GT 0 then Ta[where(Ta EQ bad)]=!values.f_nan
# t_air=Ta
# ta_m = mean(t_air,/nan)
# ta_v = sqrt(variance(t_air,/nan))
#
# mask_bad = where(abs(Ta-ta_m) gt 10*ta_v, c_bad)
# Ta_temp = Ta
# IF c_bad ne 0 THEN BEGIN
# Ta_temp[mask_bad] = !Values.F_NAN
# ENDIF
#
# rid2=sqrt(rid)
# Ta_smooth = SMOOTH(Ta_temp, rid2/1., /EDGE_TRUNCATE, MISSING=ta_m, /NAN)
#
# RETURN, Ta_smooth
# END
def interp_ta(Ta, coarseRes, fineRes):
course2fineRatio = coarseRes ** 2 / fineRes ** 2
rid2 = int(np.sqrt(course2fineRatio))
ta_m = np.nanmean(Ta)
ta_v = np.nanstd(Ta)
mask_bad = (abs(Ta - ta_m) > 10. * ta_v)
Ta[np.where(mask_bad)] = np.nan
# =====using scipy==========
# local_mean = ndimage.uniform_filter(Ta, size=rid2,mode='nearest')
# return smooth(Ta, rid2,True)
# return Smooth(Ta, rid2, 'replace')
# =====using astropy==============
# We smooth with a Gaussian kernel with stddev=1
# It is a 9x9 array
rid2 = Gaussian2DKernel(rid2)
box_2D_kernel = Box2DKernel(rid2)
local_mean = convolve(Ta, box_2D_kernel)
return local_mean
|
bucricket/projectMAS
|
pydisalexi/TSEB_utils_usda.py
|
Python
|
bsd-3-clause
| 21,861
|
[
"Gaussian"
] |
e78d7ba57685d7baf7838997853c0a2afa43901a8c04f72055e8dd737b9ee18d
|
# init.py ---
#
# Filename: init.py
# Description:
# Author:
# Maintainer:
# Created: Wed May 23 21:24:11 2012 (+0530)
# Version:
# Last-Updated: Sat Aug 6 14:35:20 2016 (-0400)
# By: subha
# Update #: 75
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
# Initialize model prototypes
#
#
# Change log:
#
#
#
#
# Code:
import moose
import config
import nachans
import kchans
import archan
import cachans
import capool
_channels = {}
def init_chanlib():
"""Return a dict of channel name, channel prototype pairs. If the
channel prototypes have not been initialized, this functions
initializes the same."""
global _channels
if _channels:
return _channels
if not moose.exists(config.modelSettings.libpath):
moose.Neutral(config.modelSettings.libpath)
_channels.update(nachans.initNaChannelPrototypes())
_channels.update(kchans.initKChannelPrototypes())
_channels.update(archan.initARChannelPrototypes())
_channels.update(cachans.initCaChannelPrototypes())
_channels.update(capool.initCaPoolPrototypes())
_channels['spike'] = moose.SpikeGen('{}/spike'.format(config.modelSettings.libpath))
return _channels
init_chanlib()
#
# init.py ends here
|
BhallaLab/moose-examples
|
traub_2005/py/channelinit.py
|
Python
|
gpl-2.0
| 1,279
|
[
"MOOSE"
] |
ed0fee15d521b12fb1823bea11ee5712473a054e29dbb96e26ab0f1ad034b46b
|
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.generic import TemplateView
import django.contrib.auth.views as djauth
# For adding explicit grouping resource endpoints in API documentation.
from rest_framework_swagger.urlparser import UrlParser
from catmaid.control import (authentication, user, log, message, client, common,
project, stack, stackgroup, tile, tracing, stats, neuron_annotations as
annotations, textlabel, label, link, connector, neuron, node, treenode,
suppressed_virtual_treenode, skeleton, skeletonexport, treenodeexport,
cropping, data_view, ontology, classification, notifications, roi,
clustering, volume, flytem, dvid, useranalytics, user_evaluation,
search, graphexport, transaction, graph2, circles, analytics, review,
wiringdiagram, object, treenodetable)
from catmaid.views import CatmaidView
from catmaid.history import record_request_action as record_view
# A regular expression matching floating point and integer numbers
num = r'[-+]?[0-9]*\.?[0-9]+'
integer = r'[-+]?[0-9]+'
# A regular expression matching lists of integers with comma as delimiter
intlist = r'[0-9]+(,[0-9]+)*'
# A list of words, not containing commas
wordlist= r'\w+(,\w+)*'
app_name = 'catmaid'
# Add the main index.html page at the root:
urlpatterns = [
url(r'^$', ensure_csrf_cookie(CatmaidView.as_view(template_name='catmaid/index.html')), name="home"),
url(r'^version$', common.get_catmaid_version)
]
# Authentication and permissions
urlpatterns += [
url(r'^accounts/login$', authentication.login_user),
url(r'^accounts/logout$', authentication.logout_user),
url(r'^accounts/(?P<project_id>\d+)/all-usernames$', authentication.all_usernames),
url(r'^permissions$', authentication.user_project_permissions),
url(r'^classinstance/(?P<ci_id>\d+)/permissions$', authentication.get_object_permissions),
url(r'^register$', authentication.register),
]
# Users
urlpatterns += [
url(r'^user-list$', user.user_list),
url(r'^user-table-list$', user.user_list_datatable),
url(r'^user-profile/update$', user.update_user_profile),
url(r'^user/password_change/$', user.change_password, {'post_change_redirect': '/'}),
]
# Log
urlpatterns += [
url(r'^(?P<project_id>\d+)/logs/list$', log.list_logs),
url(r'^log/(?P<level>(info|error|debug))$', log.log_frontent_event),
]
# Transaction history
UrlParser.explicit_root_paths |= set(['{project_id}/transactions'])
urlpatterns += [
url(r'^(?P<project_id>\d+)/transactions/$', transaction.transaction_collection),
url(r'^(?P<project_id>\d+)/transactions/location$', transaction.get_location),
]
# Messages
urlpatterns += [
url(r'^messages/list$', message.list_messages),
url(r'^messages/mark_read$', message.read_message),
url(r'^messages/latestunreaddate', message.get_latest_unread_date),
]
# CATMAID client datastore and data access
urlpatterns += [
url(r'^client/datastores/$', client.ClientDatastoreList.as_view()),
url(r'^client/datastores/(?P<name>[\w-]+)$', client.ClientDatastoreDetail.as_view()),
url(r'^client/datastores/(?P<name>[\w-]+)/$', client.ClientDataList.as_view()),
]
# General project model access
urlpatterns += [
url(r'^projects/$', project.projects),
url(r'^projects/export$', project.export_projects),
]
# General stack model access
urlpatterns += [
url(r'^(?P<project_id>\d+)/stacks$', stack.stacks),
url(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/info$', stack.stack_info),
url(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/models$', stack.stack_models),
]
# General stack group access
urlpatterns += [
url(r'^(?P<project_id>\d+)/stackgroup/(?P<stackgroup_id>\d+)/info$', stackgroup.get_stackgroup_info),
]
# Tile access
urlpatterns += [
url(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/tile$', tile.get_tile),
url(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/put_tile$', tile.put_tile),
]
# Tracing general
urlpatterns += [
url(r'^(?P<project_id>\d+)/tracing/setup/rebuild$', tracing.rebuild_tracing_setup_view),
url(r'^(?P<project_id>\d+)/tracing/setup/test$', tracing.check_tracing_setup_view),
]
# Statistics
UrlParser.explicit_root_paths |= set(['{project_id}/stats'])
urlpatterns += [
url(r'^(?P<project_id>\d+)/stats/nodecount$', stats.stats_nodecount),
url(r'^(?P<project_id>\d+)/stats/editor$', stats.stats_editor),
url(r'^(?P<project_id>\d+)/stats/summary$', stats.stats_summary),
url(r'^(?P<project_id>\d+)/stats/history$', stats.stats_history),
url(r'^(?P<project_id>\d+)/stats/user-history$', stats.stats_user_history),
url(r'^(?P<project_id>\d+)/stats/user-activity$', stats.stats_user_activity),
]
# Annotations
urlpatterns += [
url(r'^(?P<project_id>\d+)/annotations/$', annotations.list_annotations),
url(r'^(?P<project_id>\d+)/annotations/query$', annotations.annotations_for_entities),
url(r'^(?P<project_id>\d+)/annotations/forskeletons$', annotations.annotations_for_skeletons),
url(r'^(?P<project_id>\d+)/annotations/table-list$', annotations.list_annotations_datatable),
url(r'^(?P<project_id>\d+)/annotations/add$', record_view("annotations.add")(annotations.annotate_entities)),
url(r'^(?P<project_id>\d+)/annotations/remove$', record_view("annotations.remove")(annotations.remove_annotations)),
url(r'^(?P<project_id>\d+)/annotations/(?P<annotation_id>\d+)/remove$', record_view("annotations.remove")(annotations.remove_annotation)),
url(r'^(?P<project_id>\d+)/annotations/query-targets$', annotations.query_annotated_classinstances),
]
# Text labels
urlpatterns += [
url(r'^(?P<project_id>\d+)/textlabel/create$', record_view("textlabels.create")(textlabel.create_textlabel)),
url(r'^(?P<project_id>\d+)/textlabel/delete$', record_view("textlabels.delete")(textlabel.delete_textlabel)),
url(r'^(?P<project_id>\d+)/textlabel/update$', record_view("textlabels.update")(textlabel.update_textlabel)),
url(r'^(?P<project_id>\d+)/textlabel/all', textlabel.textlabels),
]
# Treenode labels
urlpatterns += [
url(r'^(?P<project_id>\d+)/labels/$', label.labels_all),
url(r'^(?P<project_id>\d+)/labels-for-nodes$', label.labels_for_nodes),
url(r'^(?P<project_id>\d+)/labels/(?P<node_type>(treenode|location|connector))/(?P<node_id>\d+)/$', label.labels_for_node),
url(r'^(?P<project_id>\d+)/label/(?P<ntype>(treenode|location|connector))/(?P<location_id>\d+)/update$', record_view("labels.update")(label.label_update)),
url(r'^(?P<project_id>\d+)/label/(?P<ntype>(treenode|location|connector))/(?P<location_id>\d+)/remove$', record_view("labels.remove")(label.remove_label_link)),
url(r'^(?P<project_id>\d+)/label/remove$', record_view("labels.remove_unused")(label.label_remove)),
]
# Links
urlpatterns += [
url(r'^(?P<project_id>\d+)/link/create$', record_view("links.create")(link.create_link)),
url(r'^(?P<project_id>\d+)/link/delete$', record_view("links.remove")(link.delete_link)),
]
# Connector access
urlpatterns += [
url(r'^(?P<project_id>\d+)/connector/create$', record_view("connectors.create")(connector.create_connector)),
url(r'^(?P<project_id>\d+)/connector/delete$', record_view("connectors.remove")(connector.delete_connector)),
url(r'^(?P<project_id>\d+)/connector/table/list$', connector.list_connector),
url(r'^(?P<project_id>\d+)/connector/list/graphedge$', connector.graphedge_list),
url(r'^(?P<project_id>\d+)/connector/list/one_to_many$', connector.one_to_many_synapses),
url(r'^(?P<project_id>\d+)/connector/list/many_to_many$', connector.many_to_many_synapses),
url(r'^(?P<project_id>\d+)/connector/list/completed$', connector.list_completed),
url(r'^(?P<project_id>\d+)/connector/skeletons$', connector.connector_skeletons),
url(r'^(?P<project_id>\d+)/connector/edgetimes$', connector.connector_associated_edgetimes),
url(r'^(?P<project_id>\d+)/connector/info$', connector.connectors_info),
url(r'^(?P<project_id>\d+)/connector/user-info$', connector.connector_user_info),
url(r'^(?P<project_id>\d+)/connectors/(?P<connector_id>\d+)/$',
connector.connector_detail),
]
# Neuron access
UrlParser.explicit_root_paths |= set(['{project_id}/neurons'])
urlpatterns += [
url(r'^(?P<project_id>\d+)/neuron/(?P<neuron_id>\d+)/get-all-skeletons$', neuron.get_all_skeletons_of_neuron),
url(r'^(?P<project_id>\d+)/neuron/(?P<neuron_id>\d+)/give-to-user$', record_view("neurons.give_to_user")(neuron.give_neuron_to_other_user)),
url(r'^(?P<project_id>\d+)/neuron/(?P<neuron_id>\d+)/delete$', record_view("neurons.remove")(neuron.delete_neuron)),
url(r'^(?P<project_id>\d+)/neurons/(?P<neuron_id>\d+)/rename$', record_view("neurons.rename")(neuron.rename_neuron)),
url(r'^(?P<project_id>\d+)/neurons/from-models$', neuron.get_neuron_ids_from_models),
]
# Node access
UrlParser.explicit_root_paths |= set(['{project_id}/nodes'])
urlpatterns += [
url(r'^(?P<project_id>\d+)/node/(?P<node_id>\d+)/reviewed$', record_view("nodes.add_or_update_review")(node.update_location_reviewer)),
url(r'^(?P<project_id>\d+)/node/most_recent$', node.most_recent_treenode),
url(r'^(?P<project_id>\d+)/node/nearest$', node.node_nearest),
url(r'^(?P<project_id>\d+)/node/update$', record_view("nodes.update_location")(node.node_update)),
url(r'^(?P<project_id>\d+)/node/list$', node.node_list_tuples),
url(r'^(?P<project_id>\d+)/node/get_location$', node.get_location),
url(r'^(?P<project_id>\d+)/node/user-info$', node.user_info),
url(r'^(?P<project_id>\d+)/nodes/find-labels$', node.find_labels),
]
# Treenode access
UrlParser.explicit_root_paths |= set(['{project_id}/treenodes'])
urlpatterns += [
url(r'^(?P<project_id>\d+)/treenode/create$', record_view("treenodes.create")(treenode.create_treenode)),
url(r'^(?P<project_id>\d+)/treenode/insert$', record_view("treenodes.insert")(treenode.insert_treenode)),
url(r'^(?P<project_id>\d+)/treenode/delete$', record_view("treenodes.remove")(treenode.delete_treenode)),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/info$', treenode.treenode_info),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/children$', treenode.find_children),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/confidence$', record_view("treenodes.update_confidence")(treenode.update_confidence)),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/parent$', record_view("treenodes.update_parent")(treenode.update_parent)),
url(r'^(?P<project_id>\d+)/treenode/(?P<treenode_id>\d+)/radius$', record_view("treenodes.update_radius")(treenode.update_radius)),
url(r'^(?P<project_id>\d+)/treenodes/radius$', record_view("treenodes.update_radius")(treenode.update_radii)),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/previous-branch-or-root$', treenode.find_previous_branchnode_or_root),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/next-branch-or-end$', treenode.find_next_branchnode_or_end),
]
# Suppressed virtual treenode access
urlpatterns += [
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/suppressed-virtual/$',
record_view("treenodes.suppress_virtual_node", "POST")(suppressed_virtual_treenode.SuppressedVirtualTreenodeList.as_view())),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/suppressed-virtual/(?P<suppressed_id>\d+)$',
record_view("treenodes.unsuppress_virtual_node", "DELETE")(suppressed_virtual_treenode.SuppressedVirtualTreenodeDetail.as_view())),
]
# General skeleton access
urlpatterns += [
url(r'^(?P<project_id>\d+)/skeletons/$', skeleton.list_skeletons),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/node_count$', skeleton.node_count),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/neuronname$', skeleton.neuronname),
url(r'^(?P<project_id>\d+)/skeleton/neuronnames$', skeleton.neuronnames),
url(r'^(?P<project_id>\d+)/skeleton/node/(?P<treenode_id>\d+)/node_count$', skeleton.node_count),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/review/reset-own$', record_view("skeletons.reset_own_reviews")(skeleton.reset_own_reviewer_ids)),
url(r'^(?P<project_id>\d+)/skeletons/connectivity$', skeleton.skeleton_info_raw),
url(r'^(?P<project_id>\d+)/skeleton/connectivity_matrix$', skeleton.connectivity_matrix),
url(r'^(?P<project_id>\d+)/skeletons/review-status$', skeleton.review_status),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/statistics$', skeleton.skeleton_statistics),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/contributor_statistics$', skeleton.contributor_statistics),
url(r'^(?P<project_id>\d+)/skeleton/contributor_statistics_multiple$', skeleton.contributor_statistics_multiple),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/find-labels$', skeleton.find_labels),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/open-leaves$', skeleton.open_leaves),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/root$', skeleton.root_for_skeleton),
url(r'^(?P<project_id>\d+)/skeleton/split$', record_view("skeletons.split")(skeleton.split_skeleton)),
url(r'^(?P<project_id>\d+)/skeleton/ancestry$', skeleton.skeleton_ancestry),
url(r'^(?P<project_id>\d+)/skeleton/join$', record_view("skeletons.merge")(skeleton.join_skeleton)),
url(r'^(?P<project_id>\d+)/skeleton/reroot$', record_view("skeletons.reroot")(skeleton.reroot_skeleton)),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/permissions$', skeleton.get_skeleton_permissions),
url(r'^(?P<project_id>\d+)/skeletons/import$', record_view("skeletons.import")(skeleton.import_skeleton)),
url(r'^(?P<project_id>\d+)/skeleton/annotationlist$', skeleton.annotation_list),
url(r'^(?P<project_id>\d+)/skeletons/within-spatial-distance$', skeleton.within_spatial_distance),
url(r'^(?P<project_id>\d+)/skeletongroup/adjacency_matrix$', skeleton.adjacency_matrix),
url(r'^(?P<project_id>\d+)/skeletongroup/skeletonlist_subgraph', skeleton.skeletonlist_subgraph),
url(r'^(?P<project_id>\d+)/skeletongroup/all_shared_connectors', skeleton.all_shared_connectors),
]
# Skeleton export
urlpatterns += [
url(r'^(?P<project_id>\d+)/neuroml/neuroml_level3_v181$', skeletonexport.export_neuroml_level3_v181),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/swc$', skeletonexport.skeleton_swc),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/neuroml$', skeletonexport.skeletons_neuroml),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/json$', skeletonexport.skeleton_with_metadata),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/compact-json$', skeletonexport.skeleton_for_3d_viewer),
url(r'^(?P<project_id>\d+)/(?P<skeleton_id>\d+)/(?P<with_connectors>\d)/(?P<with_tags>\d)/compact-skeleton$', skeletonexport.compact_skeleton),
url(r'^(?P<project_id>\d+)/(?P<skeleton_id>\d+)/(?P<with_nodes>\d)/(?P<with_connectors>\d)/(?P<with_tags>\d)/compact-arbor$', skeletonexport.compact_arbor),
url(r'^(?P<project_id>\d+)/(?P<skeleton_id>\d+)/(?P<with_nodes>\d)/(?P<with_connectors>\d)/(?P<with_tags>\d)/compact-arbor-with-minutes$', skeletonexport.compact_arbor_with_minutes),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/review$', skeletonexport.export_review_skeleton),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/reviewed-nodes$', skeletonexport.export_skeleton_reviews),
url(r'^(?P<project_id>\d+)/skeletons/measure$', skeletonexport.measure_skeletons),
url(r'^(?P<project_id>\d+)/skeleton/connectors-by-partner$', skeletonexport.skeleton_connectors_by_partner),
url(r'^(?P<project_id>\d+)/skeletons/partners-by-connector$', skeletonexport.partners_by_connector),
]
# Treenode and Connector image stack archive export
urlpatterns += [
url(r'^(?P<project_id>\d+)/connectorarchive/export$', treenodeexport.export_connectors),
url(r'^(?P<project_id>\d+)/treenodearchive/export$', treenodeexport.export_treenodes),
]
# Cropping
urlpatterns += [
url(r'^(?P<project_id>\d+)/stack/(?P<stack_ids>%s)/crop/(?P<x_min>%s),(?P<x_max>%s)/(?P<y_min>%s),(?P<y_max>%s)/(?P<z_min>%s),(?P<z_max>%s)/(?P<zoom_level>\d+)/(?P<single_channel>[0|1])/$' % (intlist, num, num, num, num, num, num), cropping.crop),
url(r'^crop/download/(?P<file_path>.*)/$', cropping.download_crop)
]
# Tagging
urlpatterns += [
url(r'^(?P<project_id>\d+)/tags/list$', project.list_project_tags),
url(r'^(?P<project_id>\d+)/tags/clear$', record_view("projects.clear_tags")(project.update_project_tags)),
url(r'^(?P<project_id>\d+)/tags/(?P<tags>.*)/update$', record_view("projects.update_tags")(project.update_project_tags)),
]
urlpatterns += [
url(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/tags/list$', stack.list_stack_tags),
url(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/tags/clear$', record_view("stacks.clear_tags")(stack.update_stack_tags)),
url(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/tags/(?P<tags>.*)/update$', record_view("stacks.update_tags")(stack.update_stack_tags)),
]
# Data views
urlpatterns += [
url(r'^dataviews/list$', data_view.get_available_data_views, name='list_dataviews'),
url(r'^dataviews/default$', data_view.get_default_properties, name='default_dataview'),
url(r'^dataviews/show/(?P<data_view_id>\d+)$', data_view.get_data_view, name='show_dataview'),
url(r'^dataviews/show/default$', data_view.get_default_data_view, name='show_default_dataview'),
url(r'^dataviews/type/comment$', data_view.get_data_view_type_comment, name='get_dataview_type_comment'),
url(r'^dataviews/type/(?P<data_view_id>\d+)$', data_view.get_data_view_type, name='get_dataview_type'),
]
# Ontologies
urlpatterns += [
url(r'^ontology/knownroots$', ontology.get_known_ontology_roots),
url(r'^(?P<project_id>%s)/ontology/list$' % (integer), ontology.list_ontology),
url(r'^(?P<project_id>%s)/ontology/relations$' % (integer), ontology.get_available_relations),
url(r'^(?P<project_id>%s)/ontology/relations/add$' % (integer), record_view("ontologies.add_relation")(ontology.add_relation_to_ontology)),
url(r'^(?P<project_id>%s)/ontology/relations/rename$' % (integer), record_view("ontologies.rename_relation")(ontology.rename_relation)),
url(r'^(?P<project_id>%s)/ontology/relations/remove$' % (integer), record_view("ontologies.remove_relation")(ontology.remove_relation_from_ontology)),
url(r'^(?P<project_id>%s)/ontology/relations/removeall$' % (integer), record_view("ontologies.remove_all_relations")(ontology.remove_all_relations_from_ontology)),
url(r'^(?P<project_id>%s)/ontology/relations/list$' % (integer), ontology.list_available_relations),
url(r'^(?P<project_id>%s)/ontology/classes$' % (integer), ontology.get_available_classes),
url(r'^(?P<project_id>%s)/ontology/classes/add$' % (integer), record_view("ontologies.add_class")(ontology.add_class_to_ontology)),
url(r'^(?P<project_id>%s)/ontology/classes/rename$' % (integer), record_view("ontologies.rename_class")(ontology.rename_class)),
url(r'^(?P<project_id>%s)/ontology/classes/remove$' % (integer), record_view("ontologies.remove_class")(ontology.remove_class_from_ontology)),
url(r'^(?P<project_id>%s)/ontology/classes/removeall$' % (integer), record_view("ontologies.remove_all_classes")(ontology.remove_all_classes_from_ontology)),
url(r'^(?P<project_id>%s)/ontology/classes/list$' % (integer), ontology.list_available_classes),
url(r'^(?P<project_id>%s)/ontology/links/add$' % (integer), record_view("ontologies.add_link")(ontology.add_link_to_ontology)),
url(r'^(?P<project_id>%s)/ontology/links/remove$' % (integer), record_view("ontologies.remove_link")(ontology.remove_link_from_ontology)),
url(r'^(?P<project_id>%s)/ontology/links/removeselected$' % (integer), record_view("ontologies.remove_link")(ontology.remove_selected_links_from_ontology)),
url(r'^(?P<project_id>%s)/ontology/links/removeall$' % (integer), record_view("ontologies.remove_all_links")(ontology.remove_all_links_from_ontology)),
url(r'^(?P<project_id>%s)/ontology/restrictions/add$' % (integer), record_view("ontologies.add_restriction")(ontology.add_restriction)),
url(r'^(?P<project_id>%s)/ontology/restrictions/remove$' % (integer), record_view("ontologies.remove_restriction")(ontology.remove_restriction)),
url(r'^(?P<project_id>%s)/ontology/restrictions/(?P<restriction>[^/]*)/types$' % (integer), ontology.get_restriction_types),
]
# Classification
urlpatterns += [
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/number$'.format(integer),
classification.get_classification_number),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/show$'.format(integer),
classification.show_classification_editor),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/show/(?P<link_id>\d+)$'.format(integer),
classification.show_classification_editor),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/select$'.format(integer),
classification.select_classification_graph, name='select_classification_graph'),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/setup/test$'.format(integer),
classification.check_classification_setup_view, name='test_classification_setup'),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/setup/rebuild$'.format(integer),
record_view("classifications.rebuild_env")(classification.rebuild_classification_setup_view), name='rebuild_classification_setup'),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/new$'.format(integer),
record_view("classifications.add_graph")(classification.add_classification_graph), name='add_classification_graph'),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/list$'.format(integer),
classification.list_classification_graph, name='list_classification_graph'),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/list/(?P<link_id>\d+)$'.format(integer),
classification.list_classification_graph, name='list_classification_graph'),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/(?P<link_id>\d+)/remove$'.format(integer),
record_view("classifications.remove_graph")(classification.remove_classification_graph), name='remove_classification_graph'),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/instance-operation$'.format(integer),
record_view("classifications.update_graph")(classification.classification_instance_operation), name='classification_instance_operation'),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/(?P<link_id>\d+)/autofill$'.format(integer),
record_view("classifications.autofill_graph")(classification.autofill_classification_graph), name='autofill_classification_graph'),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/link$'.format(integer),
record_view("classifications.link_graph")(classification.link_classification_graph), name='link_classification_graph'),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/stack/(?P<stack_id>{0})/linkroi/(?P<ci_id>{0})/$'.format(integer),
record_view("classifications.link_roi")(classification.link_roi_to_classification), name='link_roi_to_classification'),
url(r'^classification/(?P<workspace_pid>{0})/export$'.format(integer),
classification.export, name='export_classification'),
url(r'^classification/(?P<workspace_pid>{0})/export/excludetags/(?P<exclusion_tags>{1})/$'.format(integer, wordlist),
classification.export, name='export_classification'),
url(r'^classification/(?P<workspace_pid>{0})/search$'.format(integer),
classification.search, name='search_classifications'),
url(r'^classification/(?P<workspace_pid>{0})/export_ontology$'.format(integer),
classification.export_ontology, name='export_ontology'),
]
# Notifications
urlpatterns += [
url(r'^(?P<project_id>\d+)/notifications/list$', notifications.list_notifications),
url(r'^(?P<project_id>\d+)/changerequest/approve$', record_view("change_requests.approve")(notifications.approve_change_request)),
url(r'^(?P<project_id>\d+)/changerequest/reject$', record_view("change_requests.reject")(notifications.reject_change_request)),
]
# Regions of interest
urlpatterns += [
url(r'^(?P<project_id>{0})/roi/(?P<roi_id>{0})/info$'.format(integer), roi.get_roi_info, name='get_roi_info'),
url(r'^(?P<project_id>{0})/roi/link/(?P<relation_id>{0})/stack/(?P<stack_id>{0})/ci/(?P<ci_id>{0})/$'.format(integer),
record_view("rois.create_link")(roi.link_roi_to_class_instance), name='link_roi_to_class_instance'),
url(r'^(?P<project_id>{0})/roi/(?P<roi_id>{0})/remove$'.format(integer), record_view("rois.remove_link")(roi.remove_roi_link), name='remove_roi_link'),
url(r'^(?P<project_id>{0})/roi/(?P<roi_id>{0})/image$'.format(integer), roi.get_roi_image, name='get_roi_image'),
url(r'^(?P<project_id>{0})/roi/add$'.format(integer), record_view("rois.create")(roi.add_roi), name='add_roi'),
]
# Clustering
urlpatterns += [
url(r'^clustering/(?P<workspace_pid>{0})/setup$'.format(integer),
record_view("clusterings.setup_env")(clustering.setup_clustering), name="clustering_setup"),
url(r'^clustering/(?P<workspace_pid>{0})/show$'.format(integer),
TemplateView.as_view(template_name="catmaid/clustering/display.html"),
name="clustering_display"),
]
# Volumes
urlpatterns += [
url(r'^(?P<project_id>\d+)/volumes/$', volume.volume_collection),
url(r'^(?P<project_id>\d+)/volumes/add$', record_view("volumes.create")(volume.add_volume)),
url(r'^(?P<project_id>\d+)/volumes/(?P<volume_id>\d+)/$', volume.volume_detail),
url(r'^(?P<project_id>\d+)/volumes/(?P<volume_id>\d+)/intersect$', volume.intersects),
]
# Front-end tests
urlpatterns += [
url(r'^tests$', login_required(CatmaidView.as_view(template_name="catmaid/tests.html")), name="frontend_tests"),
]
# Collection of various parts of the CATMAID API. These methods are usually
# one- or two-liners and having them in a separate statement would not improve
# readability. Therefore, they are all declared in this general statement.
urlpatterns += [
# User analytics and proficiency
url(r'^useranalytics$', useranalytics.plot_useranalytics),
url(r'^(?P<project_id>\d+)/userproficiency$', user_evaluation.evaluate_user),
url(r'^(?P<project_id>\d+)/graphexport/json$', graphexport.export_jsongraph),
# Graphs
url(r'^(?P<project_id>\d+)/skeletons/confidence-compartment-subgraph', graph2.skeleton_graph),
# Circles
url(r'^(?P<project_id>\d+)/graph/circlesofhell', circles.circles_of_hell),
url(r'^(?P<project_id>\d+)/graph/directedpaths', circles.find_directed_paths),
# Analytics
url(r'^(?P<project_id>\d+)/skeleton/analytics$', analytics.analyze_skeletons),
# Review
url(r'^(?P<project_id>\d+)/user/reviewer-whitelist$', review.reviewer_whitelist),
# Search
url(r'^(?P<project_id>\d+)/search$', search.search),
# Wiring diagram export
url(r'^(?P<project_id>\d+)/wiringdiagram/json$', wiringdiagram.export_wiring_diagram),
url(r'^(?P<project_id>\d+)/wiringdiagram/nx_json$', wiringdiagram.export_wiring_diagram_nx),
# Annotation graph export
url(r'^(?P<project_id>\d+)/annotationdiagram/nx_json$', object.convert_annotations_to_networkx),
# Treenode table
url(r'^(?P<project_id>\d+)/treenode/table/(?P<skid>\d+)/content$', treenodetable.treenode_table_content),
]
# Patterns for FlyTEM access
from catmaid.control.flytem import (project as flytemproject,
review as flytemreview, stack as flytemstack)
urlpatterns += [
url(r'^flytem/projects/$', flytemproject.projects),
url(r'^(?P<project_id>.+)/user/reviewer-whitelist$', flytemreview.reviewer_whitelist),
url(r'^flytem/(?P<project_id>.+)/stack/(?P<stack_id>.+)/info$', flytem.stack.stack_info),
url(r'^flytem/(?P<project_id>.+)/stacks$', flytemstack.stacks),
]
# Patterns for DVID access
from catmaid.control.dvid import (project as dvidproject,
review as dvidreview, stack as dvidstack)
urlpatterns += [
url(r'^dvid/projects/$', dvidproject.projects),
url(r'^(?P<project_id>.+)/user/reviewer-whitelist$', dvidreview.reviewer_whitelist),
url(r'^dvid/(?P<project_id>.+)/stack/(?P<stack_id>.+)/info$', dvidstack.stack_info),
url(r'^dvid/(?P<project_id>.+)/stacks$', dvidstack.stacks),
]
|
catsop/CATMAID
|
django/applications/catmaid/urls.py
|
Python
|
gpl-3.0
| 28,809
|
[
"NEURON"
] |
24e2c4ae4029bbcdf53f186df73907f9ddfd3d4b2a1668b2cd2751230d3728c6
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines the abstract base classes for battery-related classes.
Regardless of the kind of electrode, conversion or insertion, there are many
common definitions and properties, e.g., average voltage, capacity, etc. which
can be defined in a general way. The Abc for battery classes implements some of
these common definitions to allow sharing of common logic between them.
"""
__author__ = "Anubhav Jain, Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Feb 1, 2012"
__status__ = "Beta"
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Dict, Tuple
from monty.json import MSONable
from scipy.constants import N_A
from pymatgen.core import Composition, Element
from pymatgen.entries.computed_entries import ComputedEntry
@dataclass
class AbstractVoltagePair(MSONable):
"""
An Abstract Base Class for a Voltage Pair.
Attributes:
voltage : Voltage of voltage pair.
mAh: Energy in mAh.
mass_charge: Mass of charged pair.
mass_discharge: Mass of discharged pair.
vol_charge: Vol of charged pair.
vol_discharge: Vol of discharged pair.
frac_charge: Frac of working ion in charged pair.
frac_discharge: Frac of working ion in discharged pair.
working_ion_entry: Working ion as an entry.
framework : The compositions of one formula unit of the host material
"""
voltage: float
mAh: float
mass_charge: float
mass_discharge: float
vol_charge: float
vol_discharge: float
frac_charge: float
frac_discharge: float
working_ion_entry: ComputedEntry
_framework_formula: str # should be made into Composition whenever the as_dict and from dict are fixed
def __post_init__(self):
# ensure the the frame work is a reduced composition
self._framework_formula = self.framework.reduced_formula
@property
def working_ion(self) -> Element:
"""
working ion as pymatgen Element object
"""
return self.working_ion_entry.composition.elements[0]
@property
def framework(self) -> Composition:
"""
The composition object representing the framework
"""
return Composition(self._framework_formula)
@property
def x_charge(self) -> float:
"""
The number of working ions per formula unit of host in the charged state
"""
return self.frac_charge * self.framework.num_atoms / (1 - self.frac_charge)
@property
def x_discharge(self) -> float:
"""
The number of working ions per formula unit of host in the discharged state
"""
return self.frac_discharge * self.framework.num_atoms / (1 - self.frac_discharge)
@dataclass
class AbstractElectrode(Sequence, MSONable):
"""
An Abstract Base Class representing an Electrode. It is essentially a
sequence of VoltagePairs. Generally, subclasses only need to implement
three abstract properties: voltage_pairs, working_ion and
working_ion_entry.
The general concept is that all other battery properties such as capacity,
etc. are derived from voltage pairs.
One of the major challenges with representing battery materials is keeping
track of the normalization between different entries. For example, one
entry might be TiO2 with one unit cell whereas another is LiTi2O4 with two
unit cells. When computing battery properties, it is needed to always use
a universal reference state otherwise you have normalization errors (e.g.,
the energy of LiTi2O4 must be divided by two to be compared with TiO2).
For properties such as volume, mass, or mAh transferred within the voltage
pair, a universal convention is necessary. AbstractElectrode can query for
extrinsic properties of several different AbstractVoltagePairs belonging to
a single charge/discharge path and be confident that the normalization is
being carried out properly throughout, even if more AbstractVoltagePairs
are added later.
The universal normalization is defined by the reduced structural framework
of the entries, which is common along the entire charge/discharge path. For
example, LiTi2O4 has a reduced structural framework of TiO2. Another
example is Li9V6P16O58 which would have a reduced structural framework of
V3P8O29. Note that reduced structural frameworks need not be
charge-balanced or physical, e.g. V3P8O29 is not charge-balanced, they are
just a tool for normalization.
Example: for a LiTi2O4 -> TiO2 AbstractVoltagePair, extrinsic quantities
like mAh or cell volumes are given per TiO2 formula unit.
Developers implementing a new battery (other than the two general ones
already implemented) need to implement a VoltagePair and an Electrode.
Attributes:
voltage_pairs: Objects that represent each voltage step
working_ion: Representation of the working ion that only contains element type
working_ion_entry: Representation of the working_ion that contains the energy
framework: The compositions of one formula unit of the host material
"""
voltage_pairs: Tuple[AbstractVoltagePair]
working_ion_entry: ComputedEntry
_framework_formula: str # should be made into Composition whenever the as_dict and from dict are fixed
def __post_init__(self):
# ensure the the frame work is a reduced composition
self._framework_formula = self.framework.reduced_formula
def __getitem__(self, index):
return self.voltage_pairs[index]
def __contains__(self, obj):
return obj in self.voltage_pairs
def __iter__(self):
return self.voltage_pairs.__iter__()
def __len__(self):
return len(self.voltage_pairs)
@property
def working_ion(self):
"""
working ion as pymatgen Element object
"""
return self.working_ion_entry.composition.elements[0]
@property
def framework(self):
"""
The composition object representing the framework
"""
return Composition(self._framework_formula)
@property
def x_charge(self) -> float:
"""
The number of working ions per formula unit of host in the charged state
"""
return self.voltage_pairs[0].x_charge
@property
def x_discharge(self) -> float:
"""
The number of working ions per formula unit of host in the discharged state
"""
return self.voltage_pairs[-1].x_discharge
@property
def max_delta_volume(self):
"""
Maximum volume change along insertion
"""
vols = [v.vol_charge for v in self.voltage_pairs]
vols.extend([v.vol_discharge for v in self.voltage_pairs])
return max(vols) / min(vols) - 1
@property
def num_steps(self):
"""
The number of distinct voltage steps in from fully charge to discharge
based on the stable intermediate states
"""
return len(self.voltage_pairs)
@property
def max_voltage(self):
"""
Highest voltage along insertion
"""
return max([p.voltage for p in self.voltage_pairs])
@property
def min_voltage(self):
"""
Lowest voltage along insertion
"""
return min([p.voltage for p in self.voltage_pairs])
@property
def max_voltage_step(self):
"""
Maximum absolute difference in adjacent voltage steps
"""
steps = [
self.voltage_pairs[i].voltage - self.voltage_pairs[i + 1].voltage
for i in range(len(self.voltage_pairs) - 1)
]
return max(steps) if len(steps) > 0 else 0
@property
def normalization_mass(self):
"""
Returns: Mass used for normalization. This is the mass of the discharged
electrode of the last voltage pair.
"""
return self.voltage_pairs[-1].mass_discharge
@property
def normalization_volume(self):
"""
Returns: Mass used for normalization. This is the vol of the discharged
electrode of the last voltage pair.
"""
return self.voltage_pairs[-1].vol_discharge
def get_sub_electrodes(self, adjacent_only=True):
"""
If this electrode contains multiple voltage steps, then it is possible
to use only a subset of the voltage steps to define other electrodes.
Must be implemented for each electrode object.
Args:
adjacent_only: Only return electrodes from compounds that are
adjacent on the convex hull, i.e. no electrodes returned
will have multiple voltage steps if this is set true
Returns:
A list of Electrode objects
"""
NotImplementedError(
"The get_sub_electrodes function must be implemented for each concrete electrode "
f"class {self.__class__.__name__,}"
)
def get_average_voltage(self, min_voltage=None, max_voltage=None):
"""
Average voltage for path satisfying between a min and max voltage.
Args:
min_voltage (float): The minimum allowable voltage for a given
step.
max_voltage (float): The maximum allowable voltage allowable for a
given step.
Returns:
Average voltage in V across the insertion path (a subset of the
path can be chosen by the optional arguments)
"""
pairs_in_range = self._select_in_voltage_range(min_voltage, max_voltage)
if len(pairs_in_range) == 0:
return 0
total_cap_in_range = sum([p.mAh for p in pairs_in_range])
total_edens_in_range = sum([p.mAh * p.voltage for p in pairs_in_range])
return total_edens_in_range / total_cap_in_range
def get_capacity_grav(self, min_voltage=None, max_voltage=None, use_overall_normalization=True):
"""
Get the gravimetric capacity of the electrode.
Args:
min_voltage (float): The minimum allowable voltage for a given
step.
max_voltage (float): The maximum allowable voltage allowable for a
given step.
use_overall_normalization (booL): If False, normalize by the
discharged state of only the voltage pairs matching the voltage
criteria. if True, use default normalization of the full
electrode path.
Returns:
Gravimetric capacity in mAh/g across the insertion path (a subset
of the path can be chosen by the optional arguments).
"""
pairs_in_range = self._select_in_voltage_range(min_voltage, max_voltage)
normalization_mass = (
self.normalization_mass
if use_overall_normalization or len(pairs_in_range) == 0
else pairs_in_range[-1].mass_discharge
)
return sum([pair.mAh for pair in pairs_in_range]) / normalization_mass
def get_capacity_vol(self, min_voltage=None, max_voltage=None, use_overall_normalization=True):
"""
Get the volumetric capacity of the electrode.
Args:
min_voltage (float): The minimum allowable voltage for a given
step.
max_voltage (float): The maximum allowable voltage allowable for a
given step.
use_overall_normalization (booL): If False, normalize by the
discharged state of only the voltage pairs matching the voltage
criteria. if True, use default normalization of the full
electrode path.
Returns:
Volumetric capacity in mAh/cc across the insertion path (a subset
of the path can be chosen by the optional arguments)
"""
pairs_in_range = self._select_in_voltage_range(min_voltage, max_voltage)
normalization_vol = (
self.normalization_volume
if use_overall_normalization or len(pairs_in_range) == 0
else pairs_in_range[-1].vol_discharge
)
return sum([pair.mAh for pair in pairs_in_range]) / normalization_vol * 1e24 / N_A
def get_specific_energy(self, min_voltage=None, max_voltage=None, use_overall_normalization=True):
"""
Returns the specific energy of the battery in mAh/g.
Args:
min_voltage (float): The minimum allowable voltage for a given
step.
max_voltage (float): The maximum allowable voltage allowable for a
given step.
use_overall_normalization (booL): If False, normalize by the
discharged state of only the voltage pairs matching the voltage
criteria. if True, use default normalization of the full
electrode path.
Returns:
Specific energy in Wh/kg across the insertion path (a subset of
the path can be chosen by the optional arguments)
"""
return self.get_capacity_grav(min_voltage, max_voltage, use_overall_normalization) * self.get_average_voltage(
min_voltage, max_voltage
)
def get_energy_density(self, min_voltage=None, max_voltage=None, use_overall_normalization=True):
"""
Args:
min_voltage (float): The minimum allowable voltage for a given
step.
max_voltage (float): The maximum allowable voltage allowable for a
given step.
use_overall_normalization (booL): If False, normalize by the
discharged state of only the voltage pairs matching the voltage
criteria. if True, use default normalization of the full
electrode path.
Returns:
Energy density in Wh/L across the insertion path (a subset of the
path can be chosen by the optional arguments).
"""
return self.get_capacity_vol(min_voltage, max_voltage, use_overall_normalization) * self.get_average_voltage(
min_voltage, max_voltage
)
def _select_in_voltage_range(self, min_voltage=None, max_voltage=None):
"""
Selects VoltagePairs within a certain voltage range.
Args:
min_voltage (float): The minimum allowable voltage for a given
step.
max_voltage (float): The maximum allowable voltage allowable for a
given step.
Returns:
A list of VoltagePair objects
"""
min_voltage = min_voltage if min_voltage is not None else self.min_voltage
max_voltage = max_voltage if max_voltage is not None else self.max_voltage
return list(filter(lambda p: min_voltage <= p.voltage <= max_voltage, self.voltage_pairs))
def get_summary_dict(self, print_subelectrodes=True) -> Dict:
"""
Generate a summary dict.
Args:
print_subelectrodes: Also print data on all the possible
subelectrodes.
Returns:
A summary of this electrode"s properties in dict format.
"""
d = {
"average_voltage": self.get_average_voltage(),
"max_voltage": self.max_voltage,
"min_voltage": self.min_voltage,
"max_delta_volume": self.max_delta_volume,
"max_voltage_step": self.max_voltage_step,
"capacity_grav": self.get_capacity_grav(),
"capacity_vol": self.get_capacity_vol(),
"energy_grav": self.get_specific_energy(),
"energy_vol": self.get_energy_density(),
"working_ion": self.working_ion.symbol,
"nsteps": self.num_steps,
"fracA_charge": self.voltage_pairs[0].frac_charge,
"fracA_discharge": self.voltage_pairs[-1].frac_discharge,
"framework_formula": self._framework_formula,
}
if print_subelectrodes:
def f_dict(c):
return c.get_summary_dict(print_subelectrodes=False)
d["adj_pairs"] = list(map(f_dict, self.get_sub_electrodes(adjacent_only=True)))
d["all_pairs"] = list(map(f_dict, self.get_sub_electrodes(adjacent_only=False)))
return d
|
richardtran415/pymatgen
|
pymatgen/apps/battery/battery_abc.py
|
Python
|
mit
| 16,560
|
[
"pymatgen"
] |
d4e4f2d49ce618c73680c43de3fcb55ece20ee0893e4108617748637f9d3c5a7
|
import numpy as np
import pandas as pd
# from matplotlib.pyplot import plot,show,draw
import scipy.io
import sys
sys.path.append("../")
from functions import *
from pylab import *
from sklearn.decomposition import PCA
import _pickle as cPickle
import matplotlib.cm as cm
import os
import matplotlib.gridspec as gridspec
###############################################################################################################
# TO LOAD
###############################################################################################################
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
# WHICH NEURONS
space = pd.read_hdf("../figures/figures_articles_v2/figure1/space.hdf5")
burst = pd.HDFStore("/mnt/DataGuillaume/MergedData/BURSTINESS.h5")['w']
burst = burst.loc[space.index]
hd_index = space.index.values[space['hd'] == 1]
neurontoplot = [np.intersect1d(hd_index, space.index.values[space['cluster'] == 1])[0],
burst.loc[space.index.values[space['cluster'] == 0]].sort_values('sws').index[3],
burst.sort_values('sws').index.values[-20]]
firing_rate = pd.read_hdf("/mnt/DataGuillaume/MergedData/FIRING_RATE_ALL.h5")
fr_index = firing_rate.index.values[((firing_rate >= 1.0).sum(1) == 3).values]
# SWR MODULATION
swr_mod, swr_ses = loadSWRMod('/mnt/DataGuillaume/MergedData/SWR_THAL_corr.pickle', datasets, return_index=True)
nbins = 400
binsize = 5
times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2
swr = pd.DataFrame( columns = swr_ses,
index = times,
data = gaussFilt(swr_mod, (5,)).transpose())
swr = swr.loc[-500:500]
# AUTOCORR FAST
store_autocorr = pd.HDFStore("/mnt/DataGuillaume/MergedData/AUTOCORR_ALL.h5")
autocorr_wak = store_autocorr['wake'].loc[0.5:]
autocorr_rem = store_autocorr['rem'].loc[0.5:]
autocorr_sws = store_autocorr['sws'].loc[0.5:]
autocorr_wak = autocorr_wak.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
autocorr_rem = autocorr_rem.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
autocorr_sws = autocorr_sws.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
autocorr_wak = autocorr_wak[2:20]
autocorr_rem = autocorr_rem[2:20]
autocorr_sws = autocorr_sws[2:20]
neurons = np.intersect1d(swr.dropna(1).columns.values, autocorr_sws.dropna(1).columns.values)
neurons = np.intersect1d(neurons, fr_index)
X = np.copy(swr[neurons].values.T)
Y = np.copy(np.vstack((autocorr_wak[neurons].values,autocorr_rem[neurons].values, autocorr_sws[neurons].values))).T
Y = Y - Y.mean(1)[:,np.newaxis]
Y = Y / Y.std(1)[:,np.newaxis]
pca_swr = PCA(n_components=10).fit(X)
pca_aut = PCA(n_components=10).fit(Y)
pc_swr = pca_swr.transform(X)
pc_aut = pca_aut.transform(Y)
m = 'Mouse17'
mappings = pd.read_hdf("/mnt/DataGuillaume/MergedData/MAPPING_NUCLEUS.h5")
mappings = mappings.loc[neurons]
mappings = mappings[mappings.index.str.contains(m)]
neurons_ad = mappings.index[np.where(mappings['nucleus'] == 'AD')]
neurons_am = mappings.index[np.where(mappings['nucleus'] == 'AVd')]
groups_ad = mappings.loc[neurons_ad].groupby(by=['session','shank']).groups
groups_am = mappings.loc[neurons_am].groupby(by=['session','shank']).groups
pc_aut = pd.DataFrame(index = neurons, data = pc_aut)
pc_swr = pd.DataFrame(index = neurons, data = pc_swr)
info_ad = pd.DataFrame(index = list(groups_ad.keys()), columns=pd.MultiIndex.from_product((('aut', 'swr'), ('mean', 'var'))))
for k in groups_ad.keys():
if len(groups_ad[k]) > 3:
corr_aut = np.zeros((len(groups_ad[k]),len(groups_ad[k])))
corr_swr = np.zeros((len(groups_ad[k]),len(groups_ad[k])))
for i,n in enumerate(groups_ad[k]):
for j,m in enumerate(groups_ad[k]):
corr_aut[i,j] = scipy.stats.pearsonr(pc_aut.loc[n].values, pc_aut.loc[m].values)[0]
corr_swr[i,j] = scipy.stats.pearsonr(pc_swr.loc[n].values, pc_swr.loc[m].values)[0]
distance = np.vstack((corr_aut[np.triu_indices_from(corr_aut, 1)], corr_swr[np.triu_indices_from(corr_swr, 1)])).T
info_ad.loc[k,('aut','mean')] = np.mean(distance[:,0])
info_ad.loc[k,('swr','mean')] = np.mean(distance[:,1])
info_ad.loc[k,('aut','var')] = np.var(distance[:,0])
info_ad.loc[k,('swr','var')] = np.var(distance[:,1])
info_am = pd.DataFrame(index = list(groups_am.keys()), columns=pd.MultiIndex.from_product((('aut', 'swr'), ('mean', 'var'))))
for k in groups_am.keys():
if len(groups_am[k]) > 3:
corr_aut = np.zeros((len(groups_am[k]),len(groups_am[k])))
corr_swr = np.zeros((len(groups_am[k]),len(groups_am[k])))
for i,n in enumerate(groups_am[k]):
for j,m in enumerate(groups_am[k]):
corr_aut[i,j] = scipy.stats.pearsonr(pc_aut.loc[n].values, pc_aut.loc[m].values)[0]
corr_swr[i,j] = scipy.stats.pearsonr(pc_swr.loc[n].values, pc_swr.loc[m].values)[0]
distance = np.vstack((corr_aut[np.triu_indices_from(corr_aut, 1)], corr_swr[np.triu_indices_from(corr_swr, 1)])).T
info_am.loc[k,('aut','mean')] = np.mean(distance[:,0])
info_am.loc[k,('swr','mean')] = np.mean(distance[:,1])
info_am.loc[k,('aut','var')] = np.var(distance[:,0])
info_am.loc[k,('swr','var')] = np.var(distance[:,1])
info_ad = info_ad.dropna()
info_am = info_am.dropna()
info_ad = info_ad.sort_values(('swr', 'var'))
info_am = info_am.sort_values(('swr', 'var'))
neurons_ad = groups_ad[info_ad.index[0]]
# neurons_am = groups_am[(17,2)]
# neurons_am = groups_am[list(groups_am.keys())[8]] # good one
neurons_am = groups_am[list(groups_am.keys())[8]]
# neurons_am = groups_am[(11,3)]
# Sorting by channel position
# neurons_am = mappings.loc[neurons_am, 'channel'].sort_values().index.values
corr_aut_am = pd.DataFrame(index = neurons_am, columns = np.arange(len(neurons_am)-1))
corr_swr_am = pd.DataFrame(index = neurons_am, columns = np.arange(len(neurons_am)-1))
for n in neurons_am:
neurons_am2 = list(np.copy(neurons_am))
neurons_am2.remove(n)
for i, m in enumerate(neurons_am2):
corr_aut_am.loc[n,i] = scipy.stats.pearsonr(pc_aut.loc[m].values, pc_aut.loc[n].values)[0]
corr_swr_am.loc[n,i] = scipy.stats.pearsonr(pc_swr.loc[m].values, pc_swr.loc[n].values)[0]
corr_swr_aut = pd.Series(index = neurons_am)
for n in neurons_am:
corr_swr_aut[n] = scipy.stats.pearsonr(corr_aut_am.loc[n].values, corr_swr_am.loc[n].values)[0]
neuron_seed = corr_swr_aut.index[6]
neurons_am2 = list(np.copy(neurons_am))
neurons_am2.remove(neuron_seed)
neurons_am2 = np.array(neurons_am2)[corr_aut_am.loc[neuron_seed].sort_values().index.values]
# neuron_seed = neurons_am[0]
# neurons_am2 = neurons_am[1:]
corr_am2 = pd.Series(index = neurons_am2)
for n in neurons_am2:
corr_am2[n] = scipy.stats.pearsonr(pc_aut.loc[n].values, pc_aut.loc[neuron_seed].values)[0]
corr_am2 = corr_am2.sort_values()[::-1]
figure()
gs = gridspec.GridSpec(2,len(corr_am2))
for i,n in enumerate(corr_am2.index):
pair = [neuron_seed, n]
subplot(gs[0,i])
tmp = pd.concat([autocorr_wak[pair], autocorr_rem[pair], autocorr_sws[pair]])
plot(tmp.values)
title(i)
subplot(gs[1,i])
plot(swr[pair])
show()
neurons_to_plot = [corr_am2.index[i] for i in [0,4,8]]
figure()
gs = gridspec.GridSpec(2, 3)
for i, n in enumerate(neurons_to_plot):
pair = [neuron_seed, n]
subplot(gs[0,i])
tmp = pd.concat([autocorr_wak[pair], autocorr_rem[pair], autocorr_sws[pair]])
plot(tmp.values)
title(i)
subplot(gs[1,i])
plot(swr[pair])
show()
|
gviejo/ThalamusPhysio
|
python/main_search_examples_fig3.py
|
Python
|
gpl-3.0
| 7,499
|
[
"Gaussian"
] |
ed85772963709dde390593345c099a1458c69e541f27133f58e0b805b583a6c4
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import array
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_, run_module_suite)
from scipy import signal
window_funcs = [
('boxcar', ()),
('triang', ()),
('parzen', ()),
('bohman', ()),
('blackman', ()),
('nuttall', ()),
('blackmanharris', ()),
('flattop', ()),
('bartlett', ()),
('hanning', ()),
('barthann', ()),
('hamming', ()),
('kaiser', (1,)),
('gaussian', (0.5,)),
('general_gaussian', (1.5, 2)),
('chebwin', (1,)),
('slepian', (2,)),
('cosine', ()),
('hann', ()),
]
cheb_odd_true = array([0.200938, 0.107729, 0.134941, 0.165348,
0.198891, 0.235450, 0.274846, 0.316836,
0.361119, 0.407338, 0.455079, 0.503883,
0.553248, 0.602637, 0.651489, 0.699227,
0.745266, 0.789028, 0.829947, 0.867485,
0.901138, 0.930448, 0.955010, 0.974482,
0.988591, 0.997138, 1.000000, 0.997138,
0.988591, 0.974482, 0.955010, 0.930448,
0.901138, 0.867485, 0.829947, 0.789028,
0.745266, 0.699227, 0.651489, 0.602637,
0.553248, 0.503883, 0.455079, 0.407338,
0.361119, 0.316836, 0.274846, 0.235450,
0.198891, 0.165348, 0.134941, 0.107729,
0.200938])
cheb_even_true = array([0.203894, 0.107279, 0.133904,
0.163608, 0.196338, 0.231986,
0.270385, 0.311313, 0.354493,
0.399594, 0.446233, 0.493983,
0.542378, 0.590916, 0.639071,
0.686302, 0.732055, 0.775783,
0.816944, 0.855021, 0.889525,
0.920006, 0.946060, 0.967339,
0.983557, 0.994494, 1.000000,
1.000000, 0.994494, 0.983557,
0.967339, 0.946060, 0.920006,
0.889525, 0.855021, 0.816944,
0.775783, 0.732055, 0.686302,
0.639071, 0.590916, 0.542378,
0.493983, 0.446233, 0.399594,
0.354493, 0.311313, 0.270385,
0.231986, 0.196338, 0.163608,
0.133904, 0.107279, 0.203894])
class TestChebWin(object):
def test_cheb_odd_high_attenuation(self):
cheb_odd = signal.chebwin(53, at=-40)
assert_array_almost_equal(cheb_odd, cheb_odd_true, decimal=4)
def test_cheb_even_high_attenuation(self):
cheb_even = signal.chebwin(54, at=-40)
assert_array_almost_equal(cheb_even, cheb_even_true, decimal=4)
def test_cheb_odd_low_attenuation(self):
cheb_odd_low_at_true = array([1.000000, 0.519052, 0.586405,
0.610151, 0.586405, 0.519052,
1.000000])
cheb_odd = signal.chebwin(7, at=-10)
assert_array_almost_equal(cheb_odd, cheb_odd_low_at_true, decimal=4)
def test_cheb_even_low_attenuation(self):
cheb_even_low_at_true = array([1.000000, 0.451924, 0.51027,
0.541338, 0.541338, 0.51027,
0.451924, 1.000000])
cheb_even = signal.chebwin(8, at=-10)
assert_array_almost_equal(cheb_even, cheb_even_low_at_true, decimal=4)
class TestGetWindow(object):
def test_boxcar(self):
w = signal.get_window('boxcar', 12)
assert_array_equal(w, np.ones_like(w))
def test_cheb_odd(self):
w = signal.get_window(('chebwin', -40), 53, fftbins=False)
assert_array_almost_equal(w, cheb_odd_true, decimal=4)
def test_cheb_even(self):
w = signal.get_window(('chebwin', -40), 54, fftbins=False)
assert_array_almost_equal(w, cheb_even_true, decimal=4)
def test_windowfunc_basics():
for window_name, params in window_funcs:
window = getattr(signal, window_name)
w1 = window(7, *params, sym=True)
w2 = window(7, *params, sym=False)
assert_array_almost_equal(w1, w2)
# just check the below runs
window(6, *params, sym=True)
window(6, *params, sym=False)
if __name__ == "__main__":
run_module_suite()
|
ogrisel/scipy
|
scipy/signal/tests/test_windows.py
|
Python
|
bsd-3-clause
| 4,508
|
[
"Gaussian"
] |
fb776070b3defdec90386247cd3db0247eba0a22a31e60ac044b33ac1d5733a2
|
"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to :math:`\\frac{n_samples - n_features-1}{2}` outliers) estimator of
covariance. The idea is to find :math:`\\frac{n_samples+n_features+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided n_samples > 5 * n_features
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. Journal of Computational and
Graphical Statistics. December 1, 2005, 14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
|
loli/sklearn-ensembletrees
|
examples/covariance/plot_robust_vs_empirical_covariance.py
|
Python
|
bsd-3-clause
| 6,309
|
[
"Gaussian"
] |
58df2d7a6962b2900c72e07894f52f437bb366cf8354fa697dbe3afd39094d9f
|
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
class BMPReader:
kits = ['vtk_kit']
cats = ['Readers']
help = """Reads a series of BMP files.
Set the file pattern by making use of the file browsing dialog. Replace
the increasing index by a %d format specifier. %03d can be used for
example, in which case %d will be replaced by an integer zero padded to 3
digits, i.e. 000, 001, 002 etc. %d counts from the 'First slice' to the
'Last slice'.
"""
class DICOMReader:
kits = ['vtk_kit']
cats = ['Readers', 'Medical', 'DICOM']
help = """New module for reading DICOM data.
GDCM-based module for reading DICOM data. This is newer than
dicomRDR (which is DCMTK-based) and should be able to read more
kinds of data. The interface is deliberately less rich, as the
DICOMReader is supposed to be used in concert with the
DICOMBrowser.
If DICOMReader fails to read your DICOM data, please also try the
dicomRDR as its code is a few more years more mature than that of
the more flexible but younger DICOMReader.
"""
class dicomRDR:
kits = ['vtk_kit']
cats = ['Readers']
help = """Module for reading DICOM data.
This is older DCMTK-based DICOM reader class. It used to be the
default in DeVIDE before the advent of the GDCM-based DICOMReader
in 8.5.
Add DICOM files (they may be from multiple series) by using the 'Add'
button on the view/config window. You can select multiple files in
the File dialog by holding shift or control whilst clicking. You
can also drag and drop files from a file or DICOM browser either
onto an existing dicomRDR or directly onto the Graph Editor
canvas.
"""
class JPEGReader:
kits = ['vtk_kit']
cats = ['Readers']
help = """Reads a series of JPG (JPEG) files.
Set the file pattern by making use of the file browsing dialog. Replace
the increasing index by a %d format specifier. %03d can be used for
example, in which case %d will be replaced by an integer zero padded to 3
digits, i.e. 000, 001, 002 etc. %d counts from the 'First slice' to the
'Last slice'.
"""
class metaImageRDR:
kits = ['vtk_kit']
cats = ['Readers']
help = """Reads MetaImage format files.
MetaImage files have an .mha or .mhd file extension. .mha files are
single files containing header and data, whereas .mhd are separate headers
that refer to a separate raw data file.
"""
class objRDR:
kits = ['vtk_kit']
cats = ['Readers']
help = """Reader for OBJ polydata format.
"""
class plyRDR:
kits = ['vtk_kit']
cats = ['Readers']
help = """Reader for the Polygon File Format (Stanford Triangle Format) polydata format.
"""
class pngRDR:
kits = ['vtk_kit']
cats = ['Readers']
help = """Reads a series of PNG files.
Set the file pattern by making use of the file browsing dialog. Replace
the increasing index by a %d format specifier. %03d can be used for
example, in which case %d will be replaced by an integer zero padded to 3
digits, i.e. 000, 001, 002 etc. %d counts from the 'First slice' to the
'Last slice'.
"""
class points_reader:
# BUG: empty kits list screws up dependency checking
kits = ['vtk_kit']
cats = ['Writers']
help = """TBD
"""
class rawVolumeRDR:
kits = ['vtk_kit']
cats = ['Readers']
help = """Use this module to read raw data volumes from disk.
"""
class stlRDR:
kits = ['vtk_kit']
cats = ['Readers']
help = """Reader for simple STL triangle-based polydata format.
"""
class TIFFReader:
kits = ['vtk_kit']
cats = ['Readers']
help = """Reads a series of TIFF files.
Set the file pattern by making use of the file browsing dialog. Replace
the increasing index by a %d format specifier. %03d can be used for
example, in which case %d will be replaced by an integer zero padded to 3
digits, i.e. 000, 001, 002 etc. %d counts from the 'First slice' to the
'Last slice'.
"""
class vtiRDR:
kits = ['vtk_kit']
cats = ['Readers']
help = """Reader for VTK XML Image Data, the preferred format for all
VTK-compatible image data storage.
"""
class vtkPolyDataRDR:
kits = ['vtk_kit']
cats = ['Readers']
help = """Reader for legacy VTK polydata.
"""
class vtkStructPtsRDR:
kits = ['vtk_kit']
cats = ['Readers']
help = """Reader for legacy VTK structured points (image) data.
"""
class vtpRDR:
kits = ['vtk_kit']
cats = ['Readers']
help = """Reads VTK PolyData in the VTK XML format.
VTP is the preferred format for DeVIDE PolyData.
"""
|
nagyistoce/devide
|
modules/readers/module_index.py
|
Python
|
bsd-3-clause
| 4,768
|
[
"VTK"
] |
55324d2c811a126ef52bb62d4105fc0733d450f512f1b29aefa5254c2d74f3a7
|
#!/usr/bin/env python
import sys, os, re, shutil, commands, scipy.stats
from pyrna.parsers import to_fasta
from pyrna.features import DNA
from pyrna.computations import Tool, Blast
from pyrna import utils
from pymongo import MongoClient
from pandas import DataFrame
from Bio import AlignIO
from bson import ObjectId
"""
COMMAND-LINE
------------
You can run the program with this command-line:
./search_orthologs.py
"""
class MyBlast(Blast):
def __init__(self, target_molecules, cache_dir):# cache_dir="/tmp" in the original class Blast
Blast.__init__(self, target_molecules, cache_dir)
class Hmmer(Tool):
def __init__(self, seqdb, cache_dir="/tmp"):
Tool.__init__(self, cache_dir = cache_dir)
self.seqdb = seqdb
def hmmbuild(self, multiple_alignment, query_name):
"""
Constructs a profile HMM from a multiple sequence alignement (DNA or proteins).
Format CLUSTAL W is read.
"""
path = self.cache_dir+"/"+utils.generate_random_name(7)
os.mkdir(path)
with open("%s/input.aln"%path, 'w+b') as clustal_file:
clustal_file.write(multiple_alignment)
self.hmm_profile = path+"/profile.hmm"
commands.getoutput("cd %s ; hmmbuild -n %s %s %s"%(self.cache_dir, query_name, self.hmm_profile, clustal_file.name))
def hmmsearch(self):
"""
hmmsearch searches profile(s) against a sequence database.
hmmsearch [options] <hmmfile> <seqdb>
<seqdb> BLAST database
"""
return self.parse_hmmsearch_output(commands.getoutput("cd %s ; hmmsearch %s %s"%(self.cache_dir, self.hmm_profile, self.seqdb)))
def parse_hmmsearch_output(self, output):
"""
Parses the hmmsearch output.
Parameters:
---------
- output: the hmmsearch output content as a String
Returns:
--------
A pandas DataFrame describing all the hmmsearch hits. The index stores hit ids. The columns are:
- query
- target
- e_value
- score (bit score)
"""
# print output
hits=[]
query_name = None
target_name = None
evalue = None
tag = False
lines = output.split('\n')
i = 0
while i < len(lines):
line = lines[i].strip()
if line:
if line.startswith("Query:"):
query_name = line.split()[1]
elif line.startswith("E-value") and not tag:
tag = True
elif not line.startswith("-") and not line.startswith("Domain") and not line.startswith("[No") and tag:
tokens = line.split()
hits.append({
"query": query_name,
"e_value": float(tokens[0]),
"score": float(tokens[1]),
"target": tokens[8]
})
elif line.startswith("[No"):
hits.append({
"query": query_name,
"e_value": '',
"score": '',
"target": ''
})
break
elif (line.startswith("-") or line.startswith("Domain")) and hits:
break
i += 1
return DataFrame(hits)
class Muscle(Tool):
def __init__(self, fasta_sequences, cache_dir="/tmp"):
Tool.__init__(self, cache_dir = cache_dir)
path = cache_dir+"/"+utils.generate_random_name(7)
os.mkdir(path)
with open("%s/input.fasta"%path, 'w+b') as fasta_file:
fasta_file.write(fasta_sequences)
self.fasta_sequences = fasta_file.name
def align(self, output_format=None):
"""
Aligns multiple sequences using the MUSCLE algorithm.
"""
format = ''#FASTA format
if output_format == "clustalw":
format = ' -clwstrict'
return self.remove_header(commands.getoutput("cd %s ; muscle -in %s%s"%(self.cache_dir, self.fasta_sequences, format)))#IF HEADER
def realign(self, existing_msa, new_seqs=None, output_format=None):
"""
Aligns a multiple sequence alignment with new sequence(s) using the MUSCLE algorithm.
Command line: muscle -profile -in1 existing_aln.afa -in2 new_seqs.afa
The output alignment will be at the FASTA (by default) or CLUSTAL W format and will contain a CONSENSUS line. If the output format is FASTA, no consensus line !
"""
tmp_dir = os.path.dirname(self.fasta_sequences)
with open("%s/existing_msa.fasta"%tmp_dir, 'w+b') as existing_msa_file:
existing_msa_file.write(existing_msa)
format = ''#FASTA format
if output_format == "clustalw":
format = ' -clwstrict '
if new_seqs:
with open("%s/new_seqs.fasta"%tmp_dir, 'w+b') as new_seqs_msa_file:
new_seqs_msa_file.write(new_seqs)
return self.remove_header(commands.getoutput("cd %s ; muscle -profile%s -in1 %s -in2 %s"%(self.cache_dir, format, existing_msa_file.name, new_seqs_msa_file.name)))
else:
return self.remove_header(commands.getoutput("cd %s ; muscle -profile%s -in1 %s -in2 %s"%(self.cache_dir, format, existing_msa_file.name, self.fasta_sequences)))
def get_spscore(self):
spscore_output = commands.getoutput("cd %s ; muscle -spscore %s"%(self.cache_dir, self.fasta_sequences))
spscore = None
lines = spscore_output.split('\n')
for line in lines:
if line.startswith('File='):#'File=alignment.fsa;SP=18.97'
spscore = float(line.split('SP=')[1].strip())
return spscore
def remove_header(self, msa_output):
"""
Removes the header of a Muscle output.
"""
msa_wo_header = []
tag = False
lines = msa_output.split('\n')
for line in lines:
if line.startswith(">") or line.startswith("CLUSTAL W (1.81) multiple sequence alignment") or line.startswith("MUSCLE (3.8) multiple sequence alignment"):
tag = True
if tag:
msa_wo_header.append(line)
return '\n'.join(msa_wo_header)
def convert_msa_format(input_msa, input_format, output_format):
"""
Function that converts the format of a multiple sequence alignment (MSA).
"""
path = "/tmp/"+utils.generate_random_name(7)
os.mkdir(path)
with open("%s/msa.aln"%path, 'w+b') as msa_file:
msa_file.write(input_msa)
input_infh = open("%s/msa.aln"%path, 'rU')
output_outfh = open("%s/msa.fasta"%path, 'w')
alignment = AlignIO.parse(input_infh, input_format)
AlignIO.write(alignment, output_outfh, output_format)
output_outfh.close()
input_infh.close()
with open("%s/msa.fasta"%path, 'r') as output_infh:
output_msa = output_infh.read()
return output_msa
def purge_dir(directory, option=None):
"""
Removes in a given directory the sub-directories created by the script search_orthologs.py
Parameters:
---------
- directory
- option: None or "blastdb"
With the option "blastdb", we remove sub-directories created by the command 'formatdb' (formats a database that will be used by the tool BLASTP).
"""
directories = os.listdir(directory)
tag = False
for d in directories:
if os.path.isdir(directory+'/'+d): # we test if d is a directory
if re.match('^\w+$', d) and os.access(directory+'/'+d, os.R_OK):
files = os.listdir(directory+'/'+d)
for f in files:
if option == 'blastdb':
if f.endswith('.pin'): # file for a database used by BLASTP
tag = True
else:
if not d.endswith('_db'):
if f.endswith('.hmm') or f.endswith('.fasta') or f.endswith('.aln'):# file for a database used by the functions Hmmer(), Muscle() and convert_msa_format()
tag = True
if tag:
shutil.rmtree(directory+'/'+d)
tag = False
def hmm(hmm_threshold):
"""
Arguments:
---------
-hmm_threshold: Integer (by default: 100 ; the best score) threshold percent / best bit score of hmmsearch
Description:
-----------
This function aligns orthologs of the Candida and/or S. cerevisiae species and searches orthologs in the Nakaseomyces species.
From each gene of C. glabrata and its Candida and/or S. cerevisiae orthologs, it creates an alignment of multiple protein sequences and builds a HMM profil that will allow to find orthologs in the Nakaseomyces species using the tool "hmmsearch".
Step 1: alignment with the Muscle algorithm of the protein sequences of a C. glabrata gene and its Candida and/or S. cerevisiae orthologous genes
Step 2: building formatted BLAST databases from protein sequences of the Nakaseomyces species.
Step 3: for each multiple sequence alignment of Candida proteins, building an HMM profil. Then, searching Nakaseomyces orthologs using the tool "hmmsearch".
Utilization of the bit score to retrieve the orthologs having a score >= n percent of the best score (see the argument -hmm_threshold).
Step 4: if nakaseo orthologs exist, realignment with Muscle i.e. alignment of the Nakaseomyces sequences with its Candida and/or cerevisiae orthologous sequences
otherwise, the original alignment is stored in the MongoDB
Step 5: updating data in the Mongo databases of the Candida and Nakaseomyces species:
table 'annotations'
field 'alignment': list of alignment IDs ; for C. glabrata, this list contains a unique ID
field 'orthologs_in_candida_species': list of 'annotations_id@database_name'
Step 6: creation of a Mongo database named 'comparative_genomics':
table 'proteins'
field '_id': proteins ID = alignment ID
field 'locus_tag': name of the C. glabrata locus used to do the original alignment
field 'alignment': multiple sequence alignments = dictionary with keys 'all_species' AND 'non_pathogenic_species' AND/OR 'pathogenic_species'
field 'spscore': SP scores of the MSA = dictionary with keys 'all_species' AND 'non_pathogenic_species' AND/OR 'pathogenic_species'
field 'percentile_of_spscore': percentiles of SP scores = dictionary with keys 'all_species' AND 'non_pathogenic_species' AND/OR 'pathogenic_species'
Comments:
Species in CGD = ['Candida_glabrata_CBS_138', 'Candida_albicans_SC5314', 'Candida_dubliniensis_CD36' and 'Candida_parapsilosis_CDC317']
Pathogenic species = ['Candida_glabrata_CBS_138', 'Candida_albicans_SC5314', 'Candida_dubliniensis_CD36', 'Candida_parapsilosis_CDC317', 'Nakaseomyces_bracarensis_CBS_10154', 'Nakaseomyces_nivariensis_CBS_9983']
"""
non_pathogens = ['Nakaseomyces_bacillisporus_CBS_7720', 'Nakaseomyces_castellii_CBS_4332', 'Nakaseomyces_delphensis_CBS_2170']#and Saccharomyces cerevisiae S288C
client = MongoClient()
all_spscores = []
non_patho_spscores = []
patho_spscores = []
### CREATION OF FORMATTED BLAST DATABASES FROM PROTEINS OF THE NAKASEOMYCES SPECIES ###
nakaseo_dbs = ['Nakaseomyces_bracarensis_CBS_10154', 'Nakaseomyces_castellii_CBS_4332', 'Nakaseomyces_nivariensis_CBS_9983', 'Nakaseomyces_delphensis_CBS_2170', 'Nakaseomyces_bacillisporus_CBS_7720']
nakaseo_dict = {}
for species in nakaseo_dbs:
nakaseo_molecules = []
for annotation in client[species]['annotations'].find({'translation':{'$exists':True}}, no_cursor_timeout = True):#tRNA, ncRNA and rRNA have not translated sequence
dna = DNA(name=annotation['locus_tag'], sequence=annotation['translation'])
dna.id = annotation['_id']
nakaseo_molecules.append(dna)
blast = Blast(target_molecules=nakaseo_molecules, cache_dir="/tmp/%s_db"%species)
blast.format_db(is_nucleotide=False)
nakaseo_dict[species] = [blast.formatted_db, nakaseo_molecules]
total_loci = client['Candida_glabrata_CBS_138']['annotations'].find({'alignment':{'$exists':False}}).count()
print "Total number of genes processed: %i"%total_loci
loci_counter = 0
### FROM EACH C. GLABRATA LOCUS THAT HAS NOT ALIGNMENT IN THE MONGODB ###
for glabrata_annotation in client['Candida_glabrata_CBS_138']['annotations'].find({'alignment':{'$exists':False}}, no_cursor_timeout = True):#find({'locus_tag':{'$in':["CAGL0M05665g", "CAGL0G06006g"]}}):
loci_counter += 1
locus_tag = glabrata_annotation['locus_tag']
print "Locus number %i named %s in progress (%.2f %% of total loci)"%(loci_counter, locus_tag, (loci_counter/float(total_loci))*100)
aligned_orthologs = []
cgd_molecules_to_align = []
nakaseo_molecules_to_align = []
non_patho_mol_to_align = []
patho_mol_to_align = []
fasta_alignment = None
non_pathogen_alignment = None
pathogen_alignment = None
spscore = None
spscore_for_non_pathogen = None
spscore_for_pathogen = None
if glabrata_annotation.has_key('orthologs_in_candida_species'):
for ortholog_in_candida in glabrata_annotation['orthologs_in_candida_species']:
aligned_orthologs.append(ortholog_in_candida)
ortholog_annotation = client[ortholog_in_candida.split('@')[1]]['annotations'].find_one({'_id': ortholog_in_candida.split('@')[0]})
molecule_to_align = DNA(name=ortholog_annotation['locus_tag'], sequence=ortholog_annotation['translation'])# we get Candida orthologous sequence
cgd_molecules_to_align.append(molecule_to_align)
patho_mol_to_align.append(molecule_to_align)#all CGD Candida species are pathogenic
if glabrata_annotation.has_key('sace_ortholog'):
sace_molecule_to_align = DNA(name=glabrata_annotation['sace_ortholog'][0], sequence=glabrata_annotation['sace_ortholog'][1])# we get S. cerevisiae orthologous sequence
cgd_molecules_to_align.append(sace_molecule_to_align)
non_patho_mol_to_align.append(sace_molecule_to_align)
### ALIGNMENT OF THE ORTHOLOGOUS PROTEIN SEQUENCES OF CANDIDA SPECIES & CEREVISIAE ###
if cgd_molecules_to_align:
aligned_orthologs.append("%s@Candida_glabrata_CBS_138"%glabrata_annotation['_id'])
cagl_molecule_to_align = DNA(name=locus_tag, sequence=glabrata_annotation['translation'])# we get C. glabrata sequence
cgd_molecules_to_align.append(cagl_molecule_to_align)
patho_mol_to_align.append(cagl_molecule_to_align)
muscle = Muscle(fasta_sequences=to_fasta(molecules=cgd_molecules_to_align))
fasta_alignment = muscle.align()
### FOR EACH PROTEIN ALIGNMENT, SEACHING OF ORTHOLOGS IN THE NAKASEOMYCES SPECIES AND REALIGNMENT ###
for species in nakaseo_dbs:
hmmer = Hmmer(seqdb=nakaseo_dict[species][0]) # seqdb = BLAST protein database
hmmer.hmmbuild(fasta_alignment, locus_tag)
df = hmmer.hmmsearch()
if not df.empty:
i = 0
best_score = None
for row in df.iterrows():
i += 1
data_hmmer = row[1]
if data_hmmer['target']:
if i == 1:
best_score = data_hmmer['score']
percent = 100
elif i > 1:
percent = (data_hmmer['score'] / best_score) * 100
if percent >= hmm_threshold:
for nakaseo_molecule in nakaseo_dict[species][1]:
if nakaseo_molecule.name == data_hmmer['target']:
if nakaseo_molecule.sequence.endswith('*'):
nakaseo_molecule.sequence = nakaseo_molecule.sequence[:-1]
nakaseo_molecules_to_align.append(nakaseo_molecule)
if species in non_pathogens:
non_patho_mol_to_align.append(nakaseo_molecule)
else:
patho_mol_to_align.append(nakaseo_molecule)
aligned_orthologs.append("%s@%s"%(nakaseo_molecule.id, species))
break
### REALIGNMENT WITH ALL ORTHOLOGOUS SEQUENCES ###
if nakaseo_molecules_to_align:
muscle = Muscle(fasta_sequences=to_fasta(molecules=nakaseo_molecules_to_align))
if len(nakaseo_molecules_to_align) == 1:
fasta_alignment = muscle.realign(existing_msa=fasta_alignment)
else:
input_msa = muscle.align()
fasta_alignment = muscle.realign(existing_msa=fasta_alignment, new_seqs=input_msa)
### SP SCORE CALCULATION FROM THE MSA ###
muscle2 = Muscle(fasta_sequences=fasta_alignment)# MSA with or without Nakaseo
spscore = muscle2.get_spscore()
all_spscores.append(spscore)
### ALIGNMENT BETWEEN SEQUENCES OF NON-PATHOGENIC OR PATHOGENIC SPECIES ###
if non_patho_mol_to_align:
muscle = Muscle(fasta_sequences=to_fasta(molecules=non_patho_mol_to_align))
non_pathogen_alignment = muscle.align()
muscle2 = Muscle(fasta_sequences=non_pathogen_alignment)
spscore_for_non_pathogen = muscle2.get_spscore()
non_patho_spscores.append(spscore_for_non_pathogen)
if patho_mol_to_align:
muscle = Muscle(fasta_sequences=to_fasta(molecules=patho_mol_to_align))
pathogen_alignment = muscle.align()
muscle2 = Muscle(fasta_sequences=pathogen_alignment)
spscore_for_pathogen = muscle2.get_spscore()
patho_spscores.append(spscore_for_pathogen)
### UPDATING THE MONGO DATABASES ###
if aligned_orthologs:
alignment_id = str(ObjectId())
for aligned_ortholog in aligned_orthologs:
aligned_orthologs_copy = aligned_orthologs[:]
aligned_orthologs_copy.remove(aligned_ortholog) # we remove the current ortholog in the list
annotation = client[aligned_ortholog.split('@')[1]]['annotations'].find_one({'_id': aligned_ortholog.split('@')[0]})
aln_ids = annotation.get('alignment')
if not aln_ids:
client[aligned_ortholog.split('@')[1]]['annotations'].update({'_id': annotation['_id']},{'$set':{'alignment': [alignment_id], 'orthologs_in_candida_species': aligned_orthologs_copy}}, False)
else:
### A sequence that is orthologous to several genes of C. glabrata will be in several multiple alignments ### Example: CAAL locus "C4_02340W_A" ; CAGL loci "CAGL0M05665g" AND "CAGL0G06006g"
aln_ids.append(alignment_id)
candida_orthologs = annotation.get('orthologs_in_candida_species')
for ortho in aligned_orthologs_copy:
if ortho not in candida_orthologs:
candida_orthologs.append(ortho)
### UPDATING THE CANDIDA AND NAKASEO MONGO DATABASES ###
client[aligned_ortholog.split('@')[1]]['annotations'].update({'_id': annotation['_id']},{'$set':{'alignment': aln_ids, 'orthologs_in_candida_species': candida_orthologs}}, False)
### INSERT A DOCUMENT IN THE COMPARATIVE GENOMICS MONGO DATABASE ###
proteins_document = {
'_id': alignment_id,
'locus_tag': locus_tag,
'alignment': {'all_species': fasta_alignment},# if we need a msa at the format clustalw: convert_msa_format(fasta_alignment, "fasta", "clustal")
'spscore': {'all_species': spscore}
}
if spscore_for_non_pathogen.__class__ is float:# we eliminate None but conserve 0.0 ; if spscore is not None, msa also
proteins_document['alignment']['non_pathogenic_species'] = non_pathogen_alignment
proteins_document['spscore']['non_pathogenic_species'] = spscore_for_non_pathogen
if spscore_for_pathogen.__class__ is float:
proteins_document['alignment']['pathogenic_species'] = pathogen_alignment
proteins_document['spscore']['pathogenic_species'] = spscore_for_pathogen
client['comparative_genomics']['proteins'].insert(proteins_document)
purge_dir(directory="/tmp")
purge_dir(directory="/tmp", option="blastdb")
### WE CALCULATE THE PERCENTILE OF SP SCORE FOR EACH ALIGNMENT ###
for annotation in client['comparative_genomics']['proteins'].find({'spscore':{'$exists':True}}, no_cursor_timeout = True):
percentile = {'all_species': scipy.stats.percentileofscore(all_spscores, annotation['spscore']['all_species'])}
if annotation['spscore'].has_key('non_pathogenic_species'):
percentile['non_pathogenic_species'] = scipy.stats.percentileofscore(all_spscores, annotation['spscore']['non_pathogenic_species'])
if annotation['spscore'].has_key('pathogenic_species'):
percentile['pathogenic_species'] = scipy.stats.percentileofscore(all_spscores, annotation['spscore']['pathogenic_species'])
client['comparative_genomics']['proteins'].update({'_id': annotation['_id']}, {'$set':{'percentile_of_spscore': percentile}}, False)
client.close()
if __name__ == '__main__':
hmm_threshold = 100
if "-threshold" in sys.argv:
hmm_threshold = int(sys.argv[sys.argv.index("-threshold")+1])
hmm(hmm_threshold=hmm_threshold)
|
JossinetLab/CAAGLE
|
scripts/search_orthologs.py
|
Python
|
mit
| 22,430
|
[
"BLAST"
] |
3e0b1a0a5232463d6a5974ee9d412a9ed5da4440030aa597073166fd421c4dbd
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.